Index: vendor/compiler-rt/dist/lib/asan/asan_mapping.h =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_mapping.h (revision 337140) +++ vendor/compiler-rt/dist/lib/asan/asan_mapping.h (revision 337141) @@ -1,401 +1,401 @@ //===-- asan_mapping.h ------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Defines ASan memory mapping. //===----------------------------------------------------------------------===// #ifndef ASAN_MAPPING_H #define ASAN_MAPPING_H #include "asan_internal.h" // The full explanation of the memory mapping could be found here: // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm // // Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000: // || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || // || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || // || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap || // || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || // || `[0x000000000000, 0x00007fff7fff]` || LowMem || // // When SHADOW_OFFSET is zero (-pie): // || `[0x100000000000, 0x7fffffffffff]` || HighMem || // || `[0x020000000000, 0x0fffffffffff]` || HighShadow || // || `[0x000000040000, 0x01ffffffffff]` || ShadowGap || // // Special case when something is already mapped between // 0x003000000000 and 0x005000000000 (e.g. when prelink is installed): // || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || // || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || // || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 || // || `[0x003000000000, 0x004fffffffff]` || MidMem || // || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 || // || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow || // || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap || // || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || // || `[0x000000000000, 0x00007fff7fff]` || LowMem || // // Default Linux/i386 mapping on x86_64 machine: // || `[0x40000000, 0xffffffff]` || HighMem || // || `[0x28000000, 0x3fffffff]` || HighShadow || // || `[0x24000000, 0x27ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/i386 mapping on i386 machine // (addresses starting with 0xc0000000 are reserved // for kernel and thus not sanitized): // || `[0x38000000, 0xbfffffff]` || HighMem || // || `[0x27000000, 0x37ffffff]` || HighShadow || // || `[0x24000000, 0x26ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/MIPS32 mapping: // || `[0x2aaa0000, 0xffffffff]` || HighMem || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || // || `[0x00000000, 0x0aa9ffff]` || LowMem || // // Default Linux/MIPS64 mapping: // || `[0x4000000000, 0xffffffffff]` || HighMem || // || `[0x2800000000, 0x3fffffffff]` || HighShadow || // || `[0x2400000000, 0x27ffffffff]` || ShadowGap || // || `[0x2000000000, 0x23ffffffff]` || LowShadow || // || `[0x0000000000, 0x1fffffffff]` || LowMem || // // Default Linux/AArch64 (39-bit VMA) mapping: // || `[0x2000000000, 0x7fffffffff]` || highmem || // || `[0x1400000000, 0x1fffffffff]` || highshadow || // || `[0x1200000000, 0x13ffffffff]` || shadowgap || // || `[0x1000000000, 0x11ffffffff]` || lowshadow || // || `[0x0000000000, 0x0fffffffff]` || lowmem || // // Default Linux/AArch64 (42-bit VMA) mapping: // || `[0x10000000000, 0x3ffffffffff]` || highmem || // || `[0x0a000000000, 0x0ffffffffff]` || highshadow || // || `[0x09000000000, 0x09fffffffff]` || shadowgap || // || `[0x08000000000, 0x08fffffffff]` || lowshadow || // || `[0x00000000000, 0x07fffffffff]` || lowmem || // // Default Linux/S390 mapping: // || `[0x30000000, 0x7fffffff]` || HighMem || // || `[0x26000000, 0x2fffffff]` || HighShadow || // || `[0x24000000, 0x25ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/SystemZ mapping: // || `[0x14000000000000, 0x1fffffffffffff]` || HighMem || // || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow || // || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap || // || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow || // || `[0x00000000000000, 0x0fffffffffffff]` || LowMem || // // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || // || `[0x480000000000, 0x49ffffffffff]` || ShadowGap || // || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x000000000000, 0x3fffffffffff]` || LowMem || // // Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000: // || `[0x60000000, 0xffffffff]` || HighMem || // || `[0x4c000000, 0x5fffffff]` || HighShadow || // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || // // Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem || // || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow || // || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap || // || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x000000000000, 0x3fffffffffff]` || LowMem || // -// Shadow mapping on NerBSD/i386 with SHADOW_OFFSET == 0x40000000: +// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000: // || `[0x60000000, 0xfffff000]` || HighMem || // || `[0x4c000000, 0x5fffffff]` || HighShadow || // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || // // Default Windows/i386 mapping: // (the exact location of HighShadow/HighMem may vary depending // on WoW64, /LARGEADDRESSAWARE, etc). // || `[0x50000000, 0xffffffff]` || HighMem || // || `[0x3a000000, 0x4fffffff]` || HighShadow || // || `[0x36000000, 0x39ffffff]` || ShadowGap || // || `[0x30000000, 0x35ffffff]` || LowShadow || // || `[0x00000000, 0x2fffffff]` || LowMem || // // Shadow mapping on Myriad2 (for shadow scale 5): // || `[0x9ff80000, 0x9fffffff]` || ShadowGap || // || `[0x9f000000, 0x9ff7ffff]` || LowShadow || // || `[0x80000000, 0x9effffff]` || LowMem || // || `[0x00000000, 0x7fffffff]` || Ignored || #if defined(ASAN_SHADOW_SCALE) static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE; #else static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3; #endif static const u64 kDefaultShadowSentinel = ~(uptr)0; static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShort64bitShadowOffset = 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kIosShadowOffset64 = 0x120200000; static const u64 kIosSimShadowOffset32 = 1ULL << 30; static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 static const u64 kMyriadMemoryOffset32 = 0x80000000ULL; static const u64 kMyriadMemorySize32 = 0x20000000ULL; static const u64 kMyriadMemoryEnd32 = kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1; static const u64 kMyriadShadowOffset32 = (kMyriadMemoryOffset32 + kMyriadMemorySize32 - (kMyriadMemorySize32 >> kDefaultShadowScale)); static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; #define SHADOW_SCALE kDefaultShadowScale #if SANITIZER_FUCHSIA # define SHADOW_OFFSET (0) #elif SANITIZER_WORDSIZE == 32 # if SANITIZER_ANDROID # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # elif defined(__mips__) # define SHADOW_OFFSET kMIPS32_ShadowOffset32 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 # elif SANITIZER_NETBSD # define SHADOW_OFFSET kNetBSD_ShadowOffset32 # elif SANITIZER_WINDOWS # define SHADOW_OFFSET kWindowsShadowOffset32 # elif SANITIZER_IOS # if SANITIZER_IOSSIM # define SHADOW_OFFSET kIosSimShadowOffset32 # else # define SHADOW_OFFSET kIosShadowOffset32 # endif # elif SANITIZER_MYRIAD2 # define SHADOW_OFFSET kMyriadShadowOffset32 # else # define SHADOW_OFFSET kDefaultShadowOffset32 # endif #else # if SANITIZER_IOS # if SANITIZER_IOSSIM # define SHADOW_OFFSET kIosSimShadowOffset64 # else # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # endif # elif defined(__aarch64__) # define SHADOW_OFFSET kAArch64_ShadowOffset64 # elif defined(__powerpc64__) # define SHADOW_OFFSET kPPC64_ShadowOffset64 # elif defined(__s390x__) # define SHADOW_OFFSET kSystemZ_ShadowOffset64 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset64 # elif SANITIZER_NETBSD # define SHADOW_OFFSET kNetBSD_ShadowOffset64 # elif SANITIZER_MAC # define SHADOW_OFFSET kDefaultShadowOffset64 # elif defined(__mips64) # define SHADOW_OFFSET kMIPS64_ShadowOffset64 # elif SANITIZER_WINDOWS64 # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # else # define SHADOW_OFFSET kDefaultShort64bitShadowOffset # endif #endif #if SANITIZER_ANDROID && defined(__arm__) # define ASAN_PREMAP_SHADOW 1 #else # define ASAN_PREMAP_SHADOW 0 #endif #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) #define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. #if DO_ASAN_MAPPING_PROFILE # define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; #else # define PROFILE_ASAN_MAPPING() #endif // If 1, all shadow boundaries are constants. // Don't set to 1 other than for testing. #define ASAN_FIXED_MAPPING 0 namespace __asan { extern uptr AsanMappingProfile[]; #if ASAN_FIXED_MAPPING // Fixed mapping for 64-bit Linux. Mostly used for performance comparison // with non-fixed mapping. As of r175253 (Feb 2013) the performance // difference between fixed and non-fixed mapping is below the noise level. static uptr kHighMemEnd = 0x7fffffffffffULL; static uptr kMidMemBeg = 0x3000000000ULL; static uptr kMidMemEnd = 0x4fffffffffULL; #else extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. #endif } // namespace __asan #if SANITIZER_MYRIAD2 #include "asan_mapping_myriad.h" #else #define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) #define kLowMemBeg 0 #define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) #define kLowShadowBeg SHADOW_OFFSET #define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) #define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) #define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) #define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) # define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) # define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. #define kZeroBaseShadowStart 0 #define kZeroBaseMaxShadowStart (1 << 18) #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ : kZeroBaseShadowStart) #define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) #define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) #define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) #define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) #define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) namespace __asan { static inline bool AddrIsInLowMem(uptr a) { PROFILE_ASAN_MAPPING(); return a <= kLowMemEnd; } static inline bool AddrIsInLowShadow(uptr a) { PROFILE_ASAN_MAPPING(); return a >= kLowShadowBeg && a <= kLowShadowEnd; } static inline bool AddrIsInMidMem(uptr a) { PROFILE_ASAN_MAPPING(); return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; } static inline bool AddrIsInMidShadow(uptr a) { PROFILE_ASAN_MAPPING(); return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd; } static inline bool AddrIsInHighMem(uptr a) { PROFILE_ASAN_MAPPING(); return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; } static inline bool AddrIsInHighShadow(uptr a) { PROFILE_ASAN_MAPPING(); return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; } static inline bool AddrIsInShadowGap(uptr a) { PROFILE_ASAN_MAPPING(); if (kMidMemBeg) { if (a <= kShadowGapEnd) return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; return (a >= kShadowGap2Beg && a <= kShadowGap2End) || (a >= kShadowGap3Beg && a <= kShadowGap3End); } // In zero-based shadow mode we treat addresses near zero as addresses // in shadow gap as well. if (SHADOW_OFFSET == 0) return a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd; } } // namespace __asan #endif // SANITIZER_MYRIAD2 namespace __asan { static inline bool AddrIsInMem(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); } static inline uptr MemToShadow(uptr p) { PROFILE_ASAN_MAPPING(); CHECK(AddrIsInMem(p)); return MEM_TO_SHADOW(p); } static inline bool AddrIsInShadow(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); } static inline bool AddrIsAlignedByGranularity(uptr a) { PROFILE_ASAN_MAPPING(); return (a & (SHADOW_GRANULARITY - 1)) == 0; } static inline bool AddressIsPoisoned(uptr a) { PROFILE_ASAN_MAPPING(); if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a)) return false; const uptr kAccessSize = 1; u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); s8 shadow_value = *shadow_address; if (shadow_value) { u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) + kAccessSize - 1; return (last_accessed_byte >= shadow_value); } return false; } // Must be after all calls to PROFILE_ASAN_MAPPING(). static const uptr kAsanMappingProfileSize = __LINE__; } // namespace __asan #endif // ASAN_MAPPING_H Index: vendor/compiler-rt/dist/lib/asan/tests/asan_test.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/tests/asan_test.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/asan/tests/asan_test.cc (revision 337141) @@ -1,1361 +1,1361 @@ //===-- asan_test.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // //===----------------------------------------------------------------------===// #include "asan_test_utils.h" #include #include #ifdef _LIBCPP_GET_C_LOCALE #define SANITIZER_GET_C_LOCALE _LIBCPP_GET_C_LOCALE #else #if defined(__FreeBSD__) #define SANITIZER_GET_C_LOCALE 0 #elif defined(__NetBSD__) #define SANITIZER_GET_C_LOCALE LC_C_LOCALE #endif #endif #if defined(__sun__) && defined(__svr4__) using std::_setjmp; using std::_longjmp; #endif NOINLINE void *malloc_fff(size_t size) { void *res = malloc/**/(size); break_optimization(0); return res;} NOINLINE void *malloc_eee(size_t size) { void *res = malloc_fff(size); break_optimization(0); return res;} NOINLINE void *malloc_ddd(size_t size) { void *res = malloc_eee(size); break_optimization(0); return res;} NOINLINE void *malloc_ccc(size_t size) { void *res = malloc_ddd(size); break_optimization(0); return res;} NOINLINE void *malloc_bbb(size_t size) { void *res = malloc_ccc(size); break_optimization(0); return res;} NOINLINE void *malloc_aaa(size_t size) { void *res = malloc_bbb(size); break_optimization(0); return res;} NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);} NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);} NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);} template NOINLINE void uaf_test(int size, int off) { void *p = malloc_aaa(size); free_aaa(p); for (int i = 1; i < 100; i++) free_aaa(malloc_aaa(i)); fprintf(stderr, "writing %ld byte(s) at %p with offset %d\n", (long)sizeof(T), p, off); asan_write((T *)((char *)p + off)); } TEST(AddressSanitizer, HasFeatureAddressSanitizerTest) { #if defined(__has_feature) && __has_feature(address_sanitizer) bool asan = 1; #elif defined(__SANITIZE_ADDRESS__) bool asan = 1; #else bool asan = 0; #endif EXPECT_EQ(true, asan); } TEST(AddressSanitizer, SimpleDeathTest) { EXPECT_DEATH(exit(1), ""); } TEST(AddressSanitizer, VariousMallocsTest) { int *a = (int*)malloc(100 * sizeof(int)); a[50] = 0; free(a); int *r = (int*)malloc(10); r = (int*)realloc(r, 2000 * sizeof(int)); r[1000] = 0; free(r); int *b = new int[100]; b[50] = 0; delete [] b; int *c = new int; *c = 0; delete c; #if SANITIZER_TEST_HAS_POSIX_MEMALIGN void *pm = 0; // Valid allocation. int pm_res = posix_memalign(&pm, kPageSize, kPageSize); EXPECT_EQ(0, pm_res); EXPECT_NE(nullptr, pm); free(pm); #endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN #if SANITIZER_TEST_HAS_MEMALIGN int *ma = (int*)memalign(kPageSize, kPageSize); EXPECT_EQ(0U, (uintptr_t)ma % kPageSize); ma[123] = 0; free(ma); #endif // SANITIZER_TEST_HAS_MEMALIGN } TEST(AddressSanitizer, CallocTest) { int *a = (int*)calloc(100, sizeof(int)); EXPECT_EQ(0, a[10]); free(a); } TEST(AddressSanitizer, CallocReturnsZeroMem) { size_t sizes[] = {16, 1000, 10000, 100000, 2100000}; for (size_t s = 0; s < sizeof(sizes)/sizeof(sizes[0]); s++) { size_t size = sizes[s]; for (size_t iter = 0; iter < 5; iter++) { char *x = Ident((char*)calloc(1, size)); EXPECT_EQ(x[0], 0); EXPECT_EQ(x[size - 1], 0); EXPECT_EQ(x[size / 2], 0); EXPECT_EQ(x[size / 3], 0); EXPECT_EQ(x[size / 4], 0); memset(x, 0x42, size); free(Ident(x)); #if !defined(_WIN32) // FIXME: OOM on Windows. We should just make this a lit test // with quarantine size set to 1. free(Ident(malloc(Ident(1 << 27)))); // Try to drain the quarantine. #endif } } } // No valloc on Windows or Android. #if !defined(_WIN32) && !defined(__ANDROID__) TEST(AddressSanitizer, VallocTest) { void *a = valloc(100); EXPECT_EQ(0U, (uintptr_t)a % kPageSize); free(a); } #endif #if SANITIZER_TEST_HAS_PVALLOC TEST(AddressSanitizer, PvallocTest) { char *a = (char*)pvalloc(kPageSize + 100); EXPECT_EQ(0U, (uintptr_t)a % kPageSize); a[kPageSize + 101] = 1; // we should not report an error here. free(a); a = (char*)pvalloc(0); // pvalloc(0) should allocate at least one page. EXPECT_EQ(0U, (uintptr_t)a % kPageSize); a[101] = 1; // we should not report an error here. free(a); } #endif // SANITIZER_TEST_HAS_PVALLOC #if !defined(_WIN32) // FIXME: Use an equivalent of pthread_setspecific on Windows. void *TSDWorker(void *test_key) { if (test_key) { pthread_setspecific(*(pthread_key_t*)test_key, (void*)0xfeedface); } return NULL; } void TSDDestructor(void *tsd) { // Spawning a thread will check that the current thread id is not -1. pthread_t th; PTHREAD_CREATE(&th, NULL, TSDWorker, NULL); PTHREAD_JOIN(th, NULL); } // This tests triggers the thread-specific data destruction fiasco which occurs // if we don't manage the TSD destructors ourselves. We create a new pthread // key with a non-NULL destructor which is likely to be put after the destructor // of AsanThread in the list of destructors. // In this case the TSD for AsanThread will be destroyed before TSDDestructor // is called for the child thread, and a CHECK will fail when we call // pthread_create() to spawn the grandchild. TEST(AddressSanitizer, DISABLED_TSDTest) { pthread_t th; pthread_key_t test_key; pthread_key_create(&test_key, TSDDestructor); PTHREAD_CREATE(&th, NULL, TSDWorker, &test_key); PTHREAD_JOIN(th, NULL); pthread_key_delete(test_key); } #endif TEST(AddressSanitizer, UAF_char) { const char *uaf_string = "AddressSanitizer:.*heap-use-after-free"; EXPECT_DEATH(uaf_test(1, 0), uaf_string); EXPECT_DEATH(uaf_test(10, 0), uaf_string); EXPECT_DEATH(uaf_test(10, 10), uaf_string); EXPECT_DEATH(uaf_test(kLargeMalloc, 0), uaf_string); EXPECT_DEATH(uaf_test(kLargeMalloc, kLargeMalloc / 2), uaf_string); } TEST(AddressSanitizer, UAF_long_double) { if (sizeof(long double) == sizeof(double)) return; long double *p = Ident(new long double[10]); EXPECT_DEATH(Ident(p)[12] = 0, "WRITE of size 1[026]"); EXPECT_DEATH(Ident(p)[0] = Ident(p)[12], "READ of size 1[026]"); delete [] Ident(p); } #if !defined(_WIN32) struct Packed5 { int x; char c; } __attribute__((packed)); #else # pragma pack(push, 1) struct Packed5 { int x; char c; }; # pragma pack(pop) #endif TEST(AddressSanitizer, UAF_Packed5) { static_assert(sizeof(Packed5) == 5, "Please check the keywords used"); Packed5 *p = Ident(new Packed5[2]); EXPECT_DEATH(p[0] = p[3], "READ of size 5"); EXPECT_DEATH(p[3] = p[0], "WRITE of size 5"); delete [] Ident(p); } #if ASAN_HAS_BLACKLIST TEST(AddressSanitizer, IgnoreTest) { int *x = Ident(new int); delete Ident(x); *x = 0; } #endif // ASAN_HAS_BLACKLIST struct StructWithBitField { int bf1:1; int bf2:1; int bf3:1; int bf4:29; }; TEST(AddressSanitizer, BitFieldPositiveTest) { StructWithBitField *x = new StructWithBitField; delete Ident(x); EXPECT_DEATH(x->bf1 = 0, "use-after-free"); EXPECT_DEATH(x->bf2 = 0, "use-after-free"); EXPECT_DEATH(x->bf3 = 0, "use-after-free"); EXPECT_DEATH(x->bf4 = 0, "use-after-free"); } struct StructWithBitFields_8_24 { int a:8; int b:24; }; TEST(AddressSanitizer, BitFieldNegativeTest) { StructWithBitFields_8_24 *x = Ident(new StructWithBitFields_8_24); x->a = 0; x->b = 0; delete Ident(x); } #if ASAN_NEEDS_SEGV namespace { const char kSEGVCrash[] = "AddressSanitizer: SEGV on unknown address"; const char kOverriddenSigactionHandler[] = "Test sigaction handler\n"; const char kOverriddenSignalHandler[] = "Test signal handler\n"; TEST(AddressSanitizer, WildAddressTest) { char *c = (char*)0x123; EXPECT_DEATH(*c = 0, kSEGVCrash); } void my_sigaction_sighandler(int, siginfo_t*, void*) { fprintf(stderr, kOverriddenSigactionHandler); exit(1); } void my_signal_sighandler(int signum) { fprintf(stderr, kOverriddenSignalHandler); exit(1); } TEST(AddressSanitizer, SignalTest) { struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_sigaction = my_sigaction_sighandler; sigact.sa_flags = SA_SIGINFO; char *c = (char *)0x123; EXPECT_DEATH(*c = 0, kSEGVCrash); // ASan should allow to set sigaction()... EXPECT_EQ(0, sigaction(SIGSEGV, &sigact, 0)); #ifdef __APPLE__ EXPECT_EQ(0, sigaction(SIGBUS, &sigact, 0)); #endif EXPECT_DEATH(*c = 0, kOverriddenSigactionHandler); // ... and signal(). EXPECT_NE(SIG_ERR, signal(SIGSEGV, my_signal_sighandler)); EXPECT_DEATH(*c = 0, kOverriddenSignalHandler); } } // namespace #endif static void TestLargeMalloc(size_t size) { char buff[1024]; sprintf(buff, "is located 1 bytes to the left of %lu-byte", (long)size); EXPECT_DEATH(Ident((char*)malloc(size))[-1] = 0, buff); } TEST(AddressSanitizer, LargeMallocTest) { const int max_size = (SANITIZER_WORDSIZE == 32) ? 1 << 26 : 1 << 28; for (int i = 113; i < max_size; i = i * 2 + 13) { TestLargeMalloc(i); } } #if !GTEST_USES_SIMPLE_RE TEST(AddressSanitizer, HugeMallocTest) { if (SANITIZER_WORDSIZE != 64 || ASAN_AVOID_EXPENSIVE_TESTS) return; size_t n_megs = 4100; EXPECT_DEATH(Ident((char*)malloc(n_megs << 20))[-1] = 0, "is located 1 bytes to the left|" "AddressSanitizer failed to allocate"); } #endif #if SANITIZER_TEST_HAS_MEMALIGN void MemalignRun(size_t align, size_t size, int idx) { char *p = (char *)memalign(align, size); Ident(p)[idx] = 0; free(p); } TEST(AddressSanitizer, memalign) { for (int align = 16; align <= (1 << 23); align *= 2) { size_t size = align * 5; EXPECT_DEATH(MemalignRun(align, size, -1), "is located 1 bytes to the left"); EXPECT_DEATH(MemalignRun(align, size, size + 1), "is located 1 bytes to the right"); } } #endif // SANITIZER_TEST_HAS_MEMALIGN void *ManyThreadsWorker(void *a) { for (int iter = 0; iter < 100; iter++) { for (size_t size = 100; size < 2000; size *= 2) { free(Ident(malloc(size))); } } return 0; } #if !defined(__aarch64__) && !defined(__powerpc64__) // FIXME: Infinite loop in AArch64 (PR24389). // FIXME: Also occasional hang on powerpc. Maybe same problem as on AArch64? TEST(AddressSanitizer, ManyThreadsTest) { const size_t kNumThreads = (SANITIZER_WORDSIZE == 32 || ASAN_AVOID_EXPENSIVE_TESTS) ? 30 : 1000; pthread_t t[kNumThreads]; for (size_t i = 0; i < kNumThreads; i++) { PTHREAD_CREATE(&t[i], 0, ManyThreadsWorker, (void*)i); } for (size_t i = 0; i < kNumThreads; i++) { PTHREAD_JOIN(t[i], 0); } } #endif TEST(AddressSanitizer, ReallocTest) { const int kMinElem = 5; int *ptr = (int*)malloc(sizeof(int) * kMinElem); ptr[3] = 3; for (int i = 0; i < 10000; i++) { ptr = (int*)realloc(ptr, (my_rand() % 1000 + kMinElem) * sizeof(int)); EXPECT_EQ(3, ptr[3]); } free(ptr); // Realloc pointer returned by malloc(0). int *ptr2 = Ident((int*)malloc(0)); ptr2 = Ident((int*)realloc(ptr2, sizeof(*ptr2))); *ptr2 = 42; EXPECT_EQ(42, *ptr2); free(ptr2); } TEST(AddressSanitizer, ReallocFreedPointerTest) { void *ptr = Ident(malloc(42)); ASSERT_TRUE(NULL != ptr); free(ptr); EXPECT_DEATH(ptr = realloc(ptr, 77), "attempting double-free"); } TEST(AddressSanitizer, ReallocInvalidPointerTest) { void *ptr = Ident(malloc(42)); EXPECT_DEATH(ptr = realloc((int*)ptr + 1, 77), "attempting free.*not malloc"); free(ptr); } TEST(AddressSanitizer, ZeroSizeMallocTest) { // Test that malloc(0) and similar functions don't return NULL. void *ptr = Ident(malloc(0)); EXPECT_TRUE(NULL != ptr); free(ptr); #if SANITIZER_TEST_HAS_POSIX_MEMALIGN int pm_res = posix_memalign(&ptr, 1<<20, 0); EXPECT_EQ(0, pm_res); EXPECT_TRUE(NULL != ptr); free(ptr); #endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN int *int_ptr = new int[0]; int *int_ptr2 = new int[0]; EXPECT_TRUE(NULL != int_ptr); EXPECT_TRUE(NULL != int_ptr2); EXPECT_NE(int_ptr, int_ptr2); delete[] int_ptr; delete[] int_ptr2; } #if SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE static const char *kMallocUsableSizeErrorMsg = "AddressSanitizer: attempting to call malloc_usable_size()"; TEST(AddressSanitizer, MallocUsableSizeTest) { const size_t kArraySize = 100; char *array = Ident((char*)malloc(kArraySize)); int *int_ptr = Ident(new int); EXPECT_EQ(0U, malloc_usable_size(NULL)); EXPECT_EQ(kArraySize, malloc_usable_size(array)); EXPECT_EQ(sizeof(int), malloc_usable_size(int_ptr)); EXPECT_DEATH(malloc_usable_size((void*)0x123), kMallocUsableSizeErrorMsg); EXPECT_DEATH(malloc_usable_size(array + kArraySize / 2), kMallocUsableSizeErrorMsg); free(array); EXPECT_DEATH(malloc_usable_size(array), kMallocUsableSizeErrorMsg); delete int_ptr; } #endif // SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE void WrongFree() { int *x = (int*)malloc(100 * sizeof(int)); // Use the allocated memory, otherwise Clang will optimize it out. Ident(x); free(x + 1); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, WrongFreeTest) { EXPECT_DEATH(WrongFree(), ASAN_PCRE_DOTALL "ERROR: AddressSanitizer: attempting free.*not malloc" ".*is located 4 bytes inside of 400-byte region" ".*allocated by thread"); } #endif void DoubleFree() { int *x = (int*)malloc(100 * sizeof(int)); fprintf(stderr, "DoubleFree: x=%p\n", (void *)x); free(x); free(x); fprintf(stderr, "should have failed in the second free(%p)\n", (void *)x); abort(); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, DoubleFreeTest) { EXPECT_DEATH(DoubleFree(), ASAN_PCRE_DOTALL "ERROR: AddressSanitizer: attempting double-free" ".*is located 0 bytes inside of 400-byte region" ".*freed by thread T0 here" ".*previously allocated by thread T0 here"); } #endif template NOINLINE void SizedStackTest() { char a[kSize]; char *A = Ident((char*)&a); const char *expected_death = "AddressSanitizer: stack-buffer-"; for (size_t i = 0; i < kSize; i++) A[i] = i; EXPECT_DEATH(A[-1] = 0, expected_death); EXPECT_DEATH(A[-5] = 0, expected_death); EXPECT_DEATH(A[kSize] = 0, expected_death); EXPECT_DEATH(A[kSize + 1] = 0, expected_death); EXPECT_DEATH(A[kSize + 5] = 0, expected_death); if (kSize > 16) EXPECT_DEATH(A[kSize + 31] = 0, expected_death); } TEST(AddressSanitizer, SimpleStackTest) { SizedStackTest<1>(); SizedStackTest<2>(); SizedStackTest<3>(); SizedStackTest<4>(); SizedStackTest<5>(); SizedStackTest<6>(); SizedStackTest<7>(); SizedStackTest<16>(); SizedStackTest<25>(); SizedStackTest<34>(); SizedStackTest<43>(); SizedStackTest<51>(); SizedStackTest<62>(); SizedStackTest<64>(); SizedStackTest<128>(); } #if !defined(_WIN32) // FIXME: It's a bit hard to write multi-line death test expectations // in a portable way. Anyways, this should just be turned into a lit test. TEST(AddressSanitizer, ManyStackObjectsTest) { char XXX[10]; char YYY[20]; char ZZZ[30]; Ident(XXX); Ident(YYY); EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ"); } #endif #if 0 // This test requires online symbolizer. // Moved to lit_tests/stack-oob-frames.cc. // Reenable here once we have online symbolizer by default. NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { char d[4] = {0}; char *D = Ident(d); switch (frame) { case 3: a[5]++; break; case 2: b[5]++; break; case 1: c[5]++; break; case 0: D[5]++; break; } } NOINLINE static void Frame1(int frame, char *a, char *b) { char c[4] = {0}; Frame0(frame, a, b, c); break_optimization(0); } NOINLINE static void Frame2(int frame, char *a) { char b[4] = {0}; Frame1(frame, a, b); break_optimization(0); } NOINLINE static void Frame3(int frame) { char a[4] = {0}; Frame2(frame, a); break_optimization(0); } TEST(AddressSanitizer, GuiltyStackFrame0Test) { EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0"); } TEST(AddressSanitizer, GuiltyStackFrame1Test) { EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1"); } TEST(AddressSanitizer, GuiltyStackFrame2Test) { EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2"); } TEST(AddressSanitizer, GuiltyStackFrame3Test) { EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3"); } #endif NOINLINE void LongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; longjmp(buf, 1); } NOINLINE void TouchStackFunc() { int a[100]; // long array will intersect with redzones from LongJmpFunc1. int *A = Ident(a); for (int i = 0; i < 100; i++) A[i] = i*i; } // Test that we handle longjmp and do not report false positives on stack. TEST(AddressSanitizer, LongJmpTest) { static jmp_buf buf; if (!setjmp(buf)) { LongJmpFunc1(buf); } else { TouchStackFunc(); } } #if !defined(_WIN32) // Only basic longjmp is available on Windows. NOINLINE void UnderscopeLongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; _longjmp(buf, 1); } NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; siglongjmp(buf, 1); } #if !defined(__ANDROID__) && !defined(__arm__) && \ !defined(__aarch64__) && !defined(__mips__) && \ !defined(__mips64) && !defined(__s390__) NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; __builtin_longjmp((void**)buf, 1); } // Does not work on ARM: // https://github.com/google/sanitizers/issues/185 TEST(AddressSanitizer, BuiltinLongJmpTest) { static jmp_buf buf; if (!__builtin_setjmp((void**)buf)) { BuiltinLongJmpFunc1(buf); } else { TouchStackFunc(); } } #endif // !defined(__ANDROID__) && !defined(__arm__) && // !defined(__aarch64__) && !defined(__mips__) // !defined(__mips64) && !defined(__s390__) TEST(AddressSanitizer, UnderscopeLongJmpTest) { static jmp_buf buf; if (!_setjmp(buf)) { UnderscopeLongJmpFunc1(buf); } else { TouchStackFunc(); } } TEST(AddressSanitizer, SigLongJmpTest) { static sigjmp_buf buf; if (!sigsetjmp(buf, 1)) { SigLongJmpFunc1(buf); } else { TouchStackFunc(); } } #endif // FIXME: Why does clang-cl define __EXCEPTIONS? #if defined(__EXCEPTIONS) && !defined(_WIN32) NOINLINE void ThrowFunc() { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; ASAN_THROW(1); } TEST(AddressSanitizer, CxxExceptionTest) { if (ASAN_UAR) return; // TODO(kcc): this test crashes on 32-bit for some reason... if (SANITIZER_WORDSIZE == 32) return; try { ThrowFunc(); } catch(...) {} TouchStackFunc(); } #endif void *ThreadStackReuseFunc1(void *unused) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; pthread_exit(0); return 0; } void *ThreadStackReuseFunc2(void *unused) { TouchStackFunc(); return 0; } #if !defined(__thumb__) TEST(AddressSanitizer, ThreadStackReuseTest) { pthread_t t; PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc1, 0); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc2, 0); PTHREAD_JOIN(t, 0); } #endif #if defined(__SSE2__) #include TEST(AddressSanitizer, Store128Test) { char *a = Ident((char*)malloc(Ident(12))); char *p = a; if (((uintptr_t)a % 16) != 0) p = a + 8; assert(((uintptr_t)p % 16) == 0); __m128i value_wide = _mm_set1_epi16(0x1234); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "AddressSanitizer: heap-buffer-overflow"); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "WRITE of size 16"); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "located 0 bytes to the right of 12-byte"); free(a); } #endif // FIXME: All tests that use this function should be turned into lit tests. string RightOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance >= 0); char expected_str[100]; sprintf(expected_str, ASAN_PCRE_DOTALL #if !GTEST_USES_SIMPLE_RE "buffer-overflow.*%s.*" #endif "located %d bytes to the right", #if !GTEST_USES_SIMPLE_RE is_write ? "WRITE" : "READ", #endif oob_distance); return string(expected_str); } string RightOOBWriteMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/true); } string RightOOBReadMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/false); } // FIXME: All tests that use this function should be turned into lit tests. string LeftOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, #if !GTEST_USES_SIMPLE_RE ASAN_PCRE_DOTALL "%s.*" #endif "located %d bytes to the left", #if !GTEST_USES_SIMPLE_RE is_write ? "WRITE" : "READ", #endif oob_distance); return string(expected_str); } string LeftOOBWriteMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/true); } string LeftOOBReadMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/false); } string LeftOOBAccessMessage(int oob_distance) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, "located %d bytes to the left", oob_distance); return string(expected_str); } char* MallocAndMemsetString(size_t size, char ch) { char *s = Ident((char*)malloc(size)); memset(s, ch, size); return s; } char* MallocAndMemsetString(size_t size) { return MallocAndMemsetString(size, 'z'); } #if defined(__linux__) && !defined(__ANDROID__) #define READ_TEST(READ_N_BYTES) \ char *x = new char[10]; \ int fd = open("/proc/self/stat", O_RDONLY); \ ASSERT_GT(fd, 0); \ EXPECT_DEATH(READ_N_BYTES, \ ASAN_PCRE_DOTALL \ "AddressSanitizer: heap-buffer-overflow" \ ".* is located 0 bytes to the right of 10-byte region"); \ close(fd); \ delete [] x; \ TEST(AddressSanitizer, pread) { READ_TEST(pread(fd, x, 15, 0)); } TEST(AddressSanitizer, pread64) { READ_TEST(pread64(fd, x, 15, 0)); } TEST(AddressSanitizer, read) { READ_TEST(read(fd, x, 15)); } #endif // defined(__linux__) && !defined(__ANDROID__) // This test case fails // Clang optimizes memcpy/memset calls which lead to unaligned access TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) { int size = Ident(4096); char *s = Ident((char*)malloc(size)); EXPECT_DEATH(memset(s + size - 1, 0, 2), RightOOBWriteMessage(0)); free(s); } NOINLINE static int LargeFunction(bool do_bad_access) { int *x = new int[100]; x[0]++; x[1]++; x[2]++; x[3]++; x[4]++; x[5]++; x[6]++; x[7]++; x[8]++; x[9]++; x[do_bad_access ? 100 : 0]++; int res = __LINE__; x[10]++; x[11]++; x[12]++; x[13]++; x[14]++; x[15]++; x[16]++; x[17]++; x[18]++; x[19]++; delete[] x; return res; } // Test the we have correct debug info for the failing instruction. // This test requires the in-process symbolizer to be enabled by default. TEST(AddressSanitizer, DISABLED_LargeFunctionSymbolizeTest) { int failing_line = LargeFunction(false); char expected_warning[128]; sprintf(expected_warning, "LargeFunction.*asan_test.*:%d", failing_line); EXPECT_DEATH(LargeFunction(true), expected_warning); } // Check that we unwind and symbolize correctly. TEST(AddressSanitizer, DISABLED_MallocFreeUnwindAndSymbolizeTest) { int *a = (int*)malloc_aaa(sizeof(int)); *a = 1; free_aaa(a); EXPECT_DEATH(*a = 1, "free_ccc.*free_bbb.*free_aaa.*" "malloc_fff.*malloc_eee.*malloc_ddd"); } static bool TryToSetThreadName(const char *name) { #if defined(__linux__) && defined(PR_SET_NAME) return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); #else return false; #endif } void *ThreadedTestAlloc(void *a) { EXPECT_EQ(true, TryToSetThreadName("AllocThr")); int **p = (int**)a; *p = new int; return 0; } void *ThreadedTestFree(void *a) { EXPECT_EQ(true, TryToSetThreadName("FreeThr")); int **p = (int**)a; delete *p; return 0; } void *ThreadedTestUse(void *a) { EXPECT_EQ(true, TryToSetThreadName("UseThr")); int **p = (int**)a; **p = 1; return 0; } void ThreadedTestSpawn() { pthread_t t; int *x; PTHREAD_CREATE(&t, 0, ThreadedTestAlloc, &x); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadedTestFree, &x); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadedTestUse, &x); PTHREAD_JOIN(t, 0); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, ThreadedTest) { EXPECT_DEATH(ThreadedTestSpawn(), ASAN_PCRE_DOTALL "Thread T.*created" ".*Thread T.*created" ".*Thread T.*created"); } #endif void *ThreadedTestFunc(void *unused) { // Check if prctl(PR_SET_NAME) is supported. Return if not. if (!TryToSetThreadName("TestFunc")) return 0; EXPECT_DEATH(ThreadedTestSpawn(), ASAN_PCRE_DOTALL "WRITE .*thread T. .UseThr." ".*freed by thread T. .FreeThr. here:" ".*previously allocated by thread T. .AllocThr. here:" ".*Thread T. .UseThr. created by T.*TestFunc" ".*Thread T. .FreeThr. created by T" ".*Thread T. .AllocThr. created by T" ""); return 0; } TEST(AddressSanitizer, ThreadNamesTest) { // Run ThreadedTestFunc in a separate thread because it tries to set a // thread name and we don't want to change the main thread's name. pthread_t t; PTHREAD_CREATE(&t, 0, ThreadedTestFunc, 0); PTHREAD_JOIN(t, 0); } #if ASAN_NEEDS_SEGV TEST(AddressSanitizer, ShadowGapTest) { #if SANITIZER_WORDSIZE == 32 - char *addr = (char*)0x22000000; + char *addr = (char*)0x23000000; #else # if defined(__powerpc64__) char *addr = (char*)0x024000800000; # elif defined(__s390x__) char *addr = (char*)0x11000000000000; # else char *addr = (char*)0x0000100000080000; # endif #endif EXPECT_DEATH(*addr = 1, "AddressSanitizer: (SEGV|BUS) on unknown"); } #endif // ASAN_NEEDS_SEGV extern "C" { NOINLINE static void UseThenFreeThenUse() { char *x = Ident((char*)malloc(8)); *x = 1; free_aaa(x); *x = 2; } } TEST(AddressSanitizer, UseThenFreeThenUseTest) { EXPECT_DEATH(UseThenFreeThenUse(), "freed by thread"); } TEST(AddressSanitizer, StrDupTest) { free(strdup(Ident("123"))); } // Currently we create and poison redzone at right of global variables. static char static110[110]; const char ConstGlob[7] = {1, 2, 3, 4, 5, 6, 7}; static const char StaticConstGlob[3] = {9, 8, 7}; TEST(AddressSanitizer, GlobalTest) { static char func_static15[15]; static char fs1[10]; static char fs2[10]; static char fs3[10]; glob5[Ident(0)] = 0; glob5[Ident(1)] = 0; glob5[Ident(2)] = 0; glob5[Ident(3)] = 0; glob5[Ident(4)] = 0; EXPECT_DEATH(glob5[Ident(5)] = 0, "0 bytes to the right of global variable.*glob5.* size 5"); EXPECT_DEATH(glob5[Ident(5+6)] = 0, "6 bytes to the right of global variable.*glob5.* size 5"); Ident(static110); // avoid optimizations static110[Ident(0)] = 0; static110[Ident(109)] = 0; EXPECT_DEATH(static110[Ident(110)] = 0, "0 bytes to the right of global variable"); EXPECT_DEATH(static110[Ident(110+7)] = 0, "7 bytes to the right of global variable"); Ident(func_static15); // avoid optimizations func_static15[Ident(0)] = 0; EXPECT_DEATH(func_static15[Ident(15)] = 0, "0 bytes to the right of global variable"); EXPECT_DEATH(func_static15[Ident(15 + 9)] = 0, "9 bytes to the right of global variable"); Ident(fs1); Ident(fs2); Ident(fs3); // We don't create left redzones, so this is not 100% guaranteed to fail. // But most likely will. EXPECT_DEATH(fs2[Ident(-1)] = 0, "is located.*of global variable"); EXPECT_DEATH(Ident(Ident(ConstGlob)[8]), "is located 1 bytes to the right of .*ConstGlob"); EXPECT_DEATH(Ident(Ident(StaticConstGlob)[5]), "is located 2 bytes to the right of .*StaticConstGlob"); // call stuff from another file. GlobalsTest(0); } TEST(AddressSanitizer, GlobalStringConstTest) { static const char *zoo = "FOOBAR123"; const char *p = Ident(zoo); EXPECT_DEATH(Ident(p[15]), "is ascii string 'FOOBAR123'"); } TEST(AddressSanitizer, FileNameInGlobalReportTest) { static char zoo[10]; const char *p = Ident(zoo); // The file name should be present in the report. EXPECT_DEATH(Ident(p[15]), "zoo.*asan_test."); } int *ReturnsPointerToALocalObject() { int a = 0; return Ident(&a); } #if ASAN_UAR == 1 TEST(AddressSanitizer, LocalReferenceReturnTest) { int *(*f)() = Ident(ReturnsPointerToALocalObject); int *p = f(); // Call 'f' a few more times, 'p' should still be poisoned. for (int i = 0; i < 32; i++) f(); EXPECT_DEATH(*p = 1, "AddressSanitizer: stack-use-after-return"); EXPECT_DEATH(*p = 1, "is located.*in frame .*ReturnsPointerToALocal"); } #endif template NOINLINE static void FuncWithStack() { char x[kSize]; Ident(x)[0] = 0; Ident(x)[kSize-1] = 0; } static void LotsOfStackReuse() { int LargeStack[10000]; Ident(LargeStack)[0] = 0; for (int i = 0; i < 10000; i++) { FuncWithStack<128 * 1>(); FuncWithStack<128 * 2>(); FuncWithStack<128 * 4>(); FuncWithStack<128 * 8>(); FuncWithStack<128 * 16>(); FuncWithStack<128 * 32>(); FuncWithStack<128 * 64>(); FuncWithStack<128 * 128>(); FuncWithStack<128 * 256>(); FuncWithStack<128 * 512>(); Ident(LargeStack)[0] = 0; } } TEST(AddressSanitizer, StressStackReuseTest) { LotsOfStackReuse(); } TEST(AddressSanitizer, ThreadedStressStackReuseTest) { const int kNumThreads = 20; pthread_t t[kNumThreads]; for (int i = 0; i < kNumThreads; i++) { PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))LotsOfStackReuse, 0); } for (int i = 0; i < kNumThreads; i++) { PTHREAD_JOIN(t[i], 0); } } // pthread_exit tries to perform unwinding stuff that leads to dlopen'ing // libgcc_s.so. dlopen in its turn calls malloc to store "libgcc_s.so" string // that confuses LSan on Thumb because it fails to understand that this // allocation happens in dynamic linker and should be ignored. #if !defined(__thumb__) static void *PthreadExit(void *a) { pthread_exit(0); return 0; } TEST(AddressSanitizer, PthreadExitTest) { pthread_t t; for (int i = 0; i < 1000; i++) { PTHREAD_CREATE(&t, 0, PthreadExit, 0); PTHREAD_JOIN(t, 0); } } #endif // FIXME: Why does clang-cl define __EXCEPTIONS? #if defined(__EXCEPTIONS) && !defined(_WIN32) NOINLINE static void StackReuseAndException() { int large_stack[1000]; Ident(large_stack); ASAN_THROW(1); } // TODO(kcc): support exceptions with use-after-return. TEST(AddressSanitizer, DISABLED_StressStackReuseAndExceptionsTest) { for (int i = 0; i < 10000; i++) { try { StackReuseAndException(); } catch(...) { } } } #endif #if !defined(_WIN32) TEST(AddressSanitizer, MlockTest) { EXPECT_EQ(0, mlockall(MCL_CURRENT)); EXPECT_EQ(0, mlock((void*)0x12345, 0x5678)); EXPECT_EQ(0, munlockall()); EXPECT_EQ(0, munlock((void*)0x987, 0x654)); } #endif struct LargeStruct { int foo[100]; }; // Test for bug http://llvm.org/bugs/show_bug.cgi?id=11763. // Struct copy should not cause asan warning even if lhs == rhs. TEST(AddressSanitizer, LargeStructCopyTest) { LargeStruct a; *Ident(&a) = *Ident(&a); } ATTRIBUTE_NO_SANITIZE_ADDRESS static void NoSanitizeAddress() { char *foo = new char[10]; Ident(foo)[10] = 0; delete [] foo; } TEST(AddressSanitizer, AttributeNoSanitizeAddressTest) { Ident(NoSanitizeAddress)(); } // The new/delete/etc mismatch checks don't work on Android, // as calls to new/delete go through malloc/free. // OS X support is tracked here: // https://github.com/google/sanitizers/issues/131 // Windows support is tracked here: // https://github.com/google/sanitizers/issues/309 #if !defined(__ANDROID__) && \ !defined(__APPLE__) && \ !defined(_WIN32) static string MismatchStr(const string &str) { return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; } static string MismatchOrNewDeleteTypeStr(const string &mismatch_str) { return "(" + MismatchStr(mismatch_str) + ")|(AddressSanitizer: new-delete-type-mismatch)"; } TEST(AddressSanitizer, AllocDeallocMismatch) { EXPECT_DEATH(free(Ident(new int)), MismatchStr("operator new vs free")); EXPECT_DEATH(free(Ident(new int[2])), MismatchStr("operator new \\[\\] vs free")); EXPECT_DEATH( delete (Ident(new int[2])), MismatchOrNewDeleteTypeStr("operator new \\[\\] vs operator delete")); EXPECT_DEATH(delete (Ident((int *)malloc(2 * sizeof(int)))), MismatchOrNewDeleteTypeStr("malloc vs operator delete")); EXPECT_DEATH(delete [] (Ident(new int)), MismatchStr("operator new vs operator delete \\[\\]")); EXPECT_DEATH(delete [] (Ident((int*)malloc(2 * sizeof(int)))), MismatchStr("malloc vs operator delete \\[\\]")); } #endif // ------------------ demo tests; run each one-by-one ------------- // e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests TEST(AddressSanitizer, DISABLED_DemoThreadedTest) { ThreadedTestSpawn(); } void *SimpleBugOnSTack(void *x = 0) { char a[20]; Ident(a)[20] = 0; return 0; } TEST(AddressSanitizer, DISABLED_DemoStackTest) { SimpleBugOnSTack(); } TEST(AddressSanitizer, DISABLED_DemoThreadStackTest) { pthread_t t; PTHREAD_CREATE(&t, 0, SimpleBugOnSTack, 0); PTHREAD_JOIN(t, 0); } TEST(AddressSanitizer, DISABLED_DemoUAFLowIn) { uaf_test(10, 0); } TEST(AddressSanitizer, DISABLED_DemoUAFLowLeft) { uaf_test(10, -2); } TEST(AddressSanitizer, DISABLED_DemoUAFLowRight) { uaf_test(10, 10); } TEST(AddressSanitizer, DISABLED_DemoUAFHigh) { uaf_test(kLargeMalloc, 0); } TEST(AddressSanitizer, DISABLED_DemoOOM) { size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 40) : (0xf0000000); printf("%p\n", malloc(size)); } TEST(AddressSanitizer, DISABLED_DemoDoubleFreeTest) { DoubleFree(); } TEST(AddressSanitizer, DISABLED_DemoNullDerefTest) { int *a = 0; Ident(a)[10] = 0; } TEST(AddressSanitizer, DISABLED_DemoFunctionStaticTest) { static char a[100]; static char b[100]; static char c[100]; Ident(a); Ident(b); Ident(c); Ident(a)[5] = 0; Ident(b)[105] = 0; Ident(a)[5] = 0; } TEST(AddressSanitizer, DISABLED_DemoTooMuchMemoryTest) { const size_t kAllocSize = (1 << 28) - 1024; size_t total_size = 0; while (true) { void *x = malloc(kAllocSize); memset(x, 0, kAllocSize); total_size += kAllocSize; fprintf(stderr, "total: %ldM %p\n", (long)total_size >> 20, x); } } // https://github.com/google/sanitizers/issues/66 TEST(AddressSanitizer, BufferOverflowAfterManyFrees) { for (int i = 0; i < 1000000; i++) { delete [] (Ident(new char [8644])); } char *x = new char[8192]; EXPECT_DEATH(x[Ident(8192)] = 0, "AddressSanitizer: heap-buffer-overflow"); delete [] Ident(x); } // Test that instrumentation of stack allocations takes into account // AllocSize of a type, and not its StoreSize (16 vs 10 bytes for long double). // See http://llvm.org/bugs/show_bug.cgi?id=12047 for more details. TEST(AddressSanitizer, LongDoubleNegativeTest) { long double a, b; static long double c; memcpy(Ident(&a), Ident(&b), sizeof(long double)); memcpy(Ident(&c), Ident(&b), sizeof(long double)); } #if !defined(_WIN32) TEST(AddressSanitizer, pthread_getschedparam) { int policy; struct sched_param param; EXPECT_DEATH( pthread_getschedparam(pthread_self(), &policy, Ident(¶m) + 2), "AddressSanitizer: stack-buffer-.*flow"); EXPECT_DEATH( pthread_getschedparam(pthread_self(), Ident(&policy) - 1, ¶m), "AddressSanitizer: stack-buffer-.*flow"); int res = pthread_getschedparam(pthread_self(), &policy, ¶m); ASSERT_EQ(0, res); } #endif #if SANITIZER_TEST_HAS_PRINTF_L static int vsnprintf_l_wrapper(char *s, size_t n, locale_t l, const char *format, ...) { va_list va; va_start(va, format); int res = vsnprintf_l(s, n , l, format, va); va_end(va); return res; } TEST(AddressSanitizer, snprintf_l) { char buff[5]; // Check that snprintf_l() works fine with Asan. int res = snprintf_l(buff, 5, SANITIZER_GET_C_LOCALE, "%s", "snprintf_l()"); EXPECT_EQ(12, res); // Check that vsnprintf_l() works fine with Asan. res = vsnprintf_l_wrapper(buff, 5, SANITIZER_GET_C_LOCALE, "%s", "vsnprintf_l()"); EXPECT_EQ(13, res); EXPECT_DEATH( snprintf_l(buff, 10, SANITIZER_GET_C_LOCALE, "%s", "snprintf_l()"), "AddressSanitizer: stack-buffer-overflow"); EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10, SANITIZER_GET_C_LOCALE, "%s", "vsnprintf_l()"), "AddressSanitizer: stack-buffer-overflow"); } #endif Index: vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt (revision 337140) +++ vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt (revision 337141) @@ -1,604 +1,613 @@ # This directory contains a large amount of C code which provides # generic implementations of the core runtime library along with optimized # architecture-specific code in various subdirectories. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) cmake_minimum_required(VERSION 3.4.3) project(CompilerRTBuiltins C ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) set(COMPILER_RT_BUILTINS_STANDALONE_BUILD TRUE) list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_SOURCE_DIR}/../../cmake" "${CMAKE_SOURCE_DIR}/../../cmake/Modules") include(base-config-ix) include(CompilerRTUtils) load_llvm_config() construct_compiler_rt_default_triple() if(APPLE) include(CompilerRTDarwinUtils) endif() include(AddCompilerRT) endif() include(builtin-config-ix) # TODO: Need to add a mechanism for logging errors when builtin source files are # added to a sub-directory and not this CMakeLists file. set(GENERIC_SOURCES absvdi2.c absvsi2.c absvti2.c adddf3.c addsf3.c addtf3.c addvdi3.c addvsi3.c addvti3.c apple_versioning.c ashldi3.c ashlti3.c ashrdi3.c ashrti3.c bswapdi2.c bswapsi2.c clzdi2.c clzsi2.c clzti2.c cmpdi2.c cmpti2.c comparedf2.c comparesf2.c ctzdi2.c ctzsi2.c ctzti2.c divdc3.c divdf3.c divdi3.c divmoddi4.c divmodsi4.c divsc3.c divsf3.c divsi3.c divtc3.c divti3.c divtf3.c extendsfdf2.c extendhfsf2.c ffsdi2.c ffssi2.c ffsti2.c fixdfdi.c fixdfsi.c fixdfti.c fixsfdi.c fixsfsi.c fixsfti.c fixunsdfdi.c fixunsdfsi.c fixunsdfti.c fixunssfdi.c fixunssfsi.c fixunssfti.c floatdidf.c floatdisf.c floatsidf.c floatsisf.c floattidf.c floattisf.c floatundidf.c floatundisf.c floatunsidf.c floatunsisf.c floatuntidf.c floatuntisf.c int_util.c lshrdi3.c lshrti3.c moddi3.c modsi3.c modti3.c muldc3.c muldf3.c muldi3.c mulodi4.c mulosi4.c muloti4.c mulsc3.c mulsf3.c multi3.c multf3.c mulvdi3.c mulvsi3.c mulvti3.c negdf2.c negdi2.c negsf2.c negti2.c negvdi2.c negvsi2.c negvti2.c os_version_check.c paritydi2.c paritysi2.c parityti2.c popcountdi2.c popcountsi2.c popcountti2.c powidf2.c powisf2.c powitf2.c subdf3.c subsf3.c subvdi3.c subvsi3.c subvti3.c subtf3.c trampoline_setup.c truncdfhf2.c truncdfsf2.c truncsfhf2.c ucmpdi2.c ucmpti2.c udivdi3.c udivmoddi4.c udivmodsi4.c udivmodti4.c udivsi3.c udivti3.c umoddi3.c umodsi3.c umodti3.c) set(GENERIC_TF_SOURCES comparetf2.c extenddftf2.c extendsftf2.c fixtfdi.c fixtfsi.c fixtfti.c fixunstfdi.c fixunstfsi.c fixunstfti.c floatditf.c floatsitf.c floattitf.c floatunditf.c floatunsitf.c floatuntitf.c multc3.c trunctfdf2.c trunctfsf2.c) option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN "Skip the atomic builtin (these should normally be provided by a shared library)" On) if(NOT FUCHSIA AND NOT COMPILER_RT_BAREMETAL_BUILD) set(GENERIC_SOURCES ${GENERIC_SOURCES} emutls.c enable_execute_stack.c eprintf.c) endif() if(COMPILER_RT_HAS_ATOMIC_KEYWORD AND NOT COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN) set(GENERIC_SOURCES ${GENERIC_SOURCES} atomic.c) endif() if(APPLE) set(GENERIC_SOURCES ${GENERIC_SOURCES} atomic_flag_clear.c atomic_flag_clear_explicit.c atomic_flag_test_and_set.c atomic_flag_test_and_set_explicit.c atomic_signal_fence.c atomic_thread_fence.c) endif() if (HAVE_UNWIND_H) set(GENERIC_SOURCES ${GENERIC_SOURCES} gcc_personality_v0.c) endif () if (NOT FUCHSIA) set(GENERIC_SOURCES ${GENERIC_SOURCES} clear_cache.c) endif() # These sources work on all x86 variants, but only x86 variants. set(x86_ARCH_SOURCES cpu_model.c divxc3.c fixxfdi.c fixxfti.c fixunsxfdi.c fixunsxfsi.c fixunsxfti.c floatdixf.c floattixf.c floatundixf.c floatuntixf.c mulxc3.c powixf2.c ) if (NOT MSVC) set(x86_64_SOURCES x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c x86_64/floatundidf.S x86_64/floatundisf.S x86_64/floatundixf.S) filter_builtin_sources(x86_64_SOURCES EXCLUDE x86_64_SOURCES "${x86_64_SOURCES};${GENERIC_SOURCES}") set(x86_64h_SOURCES ${x86_64_SOURCES}) if (WIN32) set(x86_64_SOURCES ${x86_64_SOURCES} x86_64/chkstk.S x86_64/chkstk2.S) endif() set(i386_SOURCES i386/ashldi3.S i386/ashrdi3.S i386/divdi3.S i386/floatdidf.S i386/floatdisf.S i386/floatdixf.S i386/floatundidf.S i386/floatundisf.S i386/floatundixf.S i386/lshrdi3.S i386/moddi3.S i386/muldi3.S i386/udivdi3.S i386/umoddi3.S) filter_builtin_sources(i386_SOURCES EXCLUDE i386_SOURCES "${i386_SOURCES};${GENERIC_SOURCES}") if (WIN32) set(i386_SOURCES ${i386_SOURCES} i386/chkstk.S i386/chkstk2.S) endif() else () # MSVC # Use C versions of functions when building on MSVC # MSVC's assembler takes Intel syntax, not AT&T syntax. # Also use only MSVC compilable builtin implementations. set(x86_64_SOURCES x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c ${GENERIC_SOURCES}) set(x86_64h_SOURCES ${x86_64_SOURCES}) set(i386_SOURCES ${GENERIC_SOURCES}) endif () # if (NOT MSVC) set(x86_64h_SOURCES ${x86_64h_SOURCES} ${x86_ARCH_SOURCES}) set(x86_64_SOURCES ${x86_64_SOURCES} ${x86_ARCH_SOURCES}) set(i386_SOURCES ${i386_SOURCES} ${x86_ARCH_SOURCES}) set(i686_SOURCES ${i686_SOURCES} ${x86_ARCH_SOURCES}) set(arm_SOURCES arm/bswapdi2.S arm/bswapsi2.S arm/clzdi2.S arm/clzsi2.S arm/comparesf2.S arm/divmodsi4.S arm/divsi3.S arm/modsi3.S arm/sync_fetch_and_add_4.S arm/sync_fetch_and_add_8.S arm/sync_fetch_and_and_4.S arm/sync_fetch_and_and_8.S arm/sync_fetch_and_max_4.S arm/sync_fetch_and_max_8.S arm/sync_fetch_and_min_4.S arm/sync_fetch_and_min_8.S arm/sync_fetch_and_nand_4.S arm/sync_fetch_and_nand_8.S arm/sync_fetch_and_or_4.S arm/sync_fetch_and_or_8.S arm/sync_fetch_and_sub_4.S arm/sync_fetch_and_sub_8.S arm/sync_fetch_and_umax_4.S arm/sync_fetch_and_umax_8.S arm/sync_fetch_and_umin_4.S arm/sync_fetch_and_umin_8.S arm/sync_fetch_and_xor_4.S arm/sync_fetch_and_xor_8.S arm/udivmodsi4.S arm/udivsi3.S arm/umodsi3.S) filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}") set(thumb1_SOURCES arm/divsi3.S arm/udivsi3.S arm/comparesf2.S arm/addsf3.S ${GENERIC_SOURCES}) set(arm_EABI_SOURCES arm/aeabi_cdcmp.S arm/aeabi_cdcmpeq_check_nan.c arm/aeabi_cfcmp.S arm/aeabi_cfcmpeq_check_nan.c arm/aeabi_dcmp.S arm/aeabi_div0.c arm/aeabi_drsub.c arm/aeabi_fcmp.S arm/aeabi_frsub.c arm/aeabi_idivmod.S arm/aeabi_ldivmod.S arm/aeabi_memcmp.S arm/aeabi_memcpy.S arm/aeabi_memmove.S arm/aeabi_memset.S arm/aeabi_uidivmod.S arm/aeabi_uldivmod.S) set(arm_Thumb1_JT_SOURCES arm/switch16.S arm/switch32.S arm/switch8.S arm/switchu8.S) set(arm_Thumb1_SjLj_EH_SOURCES arm/restore_vfp_d8_d15_regs.S arm/save_vfp_d8_d15_regs.S) set(arm_Thumb1_VFPv2_SOURCES arm/adddf3vfp.S arm/addsf3vfp.S arm/divdf3vfp.S arm/divsf3vfp.S arm/eqdf2vfp.S arm/eqsf2vfp.S arm/extendsfdf2vfp.S arm/fixdfsivfp.S arm/fixsfsivfp.S arm/fixunsdfsivfp.S arm/fixunssfsivfp.S arm/floatsidfvfp.S arm/floatsisfvfp.S arm/floatunssidfvfp.S arm/floatunssisfvfp.S arm/gedf2vfp.S arm/gesf2vfp.S arm/gtdf2vfp.S arm/gtsf2vfp.S arm/ledf2vfp.S arm/lesf2vfp.S arm/ltdf2vfp.S arm/ltsf2vfp.S arm/muldf3vfp.S arm/mulsf3vfp.S arm/nedf2vfp.S arm/negdf2vfp.S arm/negsf2vfp.S arm/nesf2vfp.S arm/subdf3vfp.S arm/subsf3vfp.S arm/truncdfsf2vfp.S arm/unorddf2vfp.S arm/unordsf2vfp.S) set(arm_Thumb1_icache_SOURCES arm/sync_synchronize.S) set(arm_Thumb1_SOURCES ${arm_Thumb1_JT_SOURCES} ${arm_Thumb1_SjLj_EH_SOURCES} ${arm_Thumb1_VFPv2_SOURCES} ${arm_Thumb1_icache_SOURCES}) if(MINGW) set(arm_SOURCES arm/aeabi_idivmod.S arm/aeabi_ldivmod.S arm/aeabi_uidivmod.S arm/aeabi_uldivmod.S arm/chkstk.S divmoddi4.c divmodsi4.c divdi3.c divsi3.c fixdfdi.c fixsfdi.c fixunsdfdi.c fixunssfdi.c floatdidf.c floatdisf.c floatundidf.c floatundisf.c mingw_fixfloat.c moddi3.c udivmoddi4.c udivmodsi4.c udivsi3.c umoddi3.c emutls.c) filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}") elseif(NOT WIN32) # TODO the EABI sources should only be added to EABI targets set(arm_SOURCES ${arm_SOURCES} ${arm_EABI_SOURCES} ${arm_Thumb1_SOURCES}) set(thumb1_SOURCES ${thumb1_SOURCES} ${arm_EABI_SOURCES}) endif() set(aarch64_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES}) if (MINGW) set(aarch64_SOURCES ${aarch64_SOURCES} aarch64/chkstk.S) endif() set(armhf_SOURCES ${arm_SOURCES}) set(armv7_SOURCES ${arm_SOURCES}) set(armv7s_SOURCES ${arm_SOURCES}) set(armv7k_SOURCES ${arm_SOURCES}) set(arm64_SOURCES ${aarch64_SOURCES}) # macho_embedded archs set(armv6m_SOURCES ${thumb1_SOURCES}) set(armv7m_SOURCES ${arm_SOURCES}) set(armv7em_SOURCES ${arm_SOURCES}) # hexagon arch set(hexagon_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) set(hexagon_SOURCES hexagon/common_entry_exit_abi1.S hexagon/common_entry_exit_abi2.S hexagon/common_entry_exit_legacy.S hexagon/dfaddsub.S hexagon/dfdiv.S hexagon/dffma.S hexagon/dfminmax.S hexagon/dfmul.S hexagon/dfsqrt.S hexagon/divdi3.S hexagon/divsi3.S hexagon/fabs_opt.S hexagon/fastmath2_dlib_asm.S hexagon/fastmath2_ldlib_asm.S hexagon/fastmath_dlib_asm.S hexagon/fma_opt.S hexagon/fmax_opt.S hexagon/fmin_opt.S hexagon/memcpy_forward_vp4cp4n2.S hexagon/memcpy_likely_aligned.S hexagon/moddi3.S hexagon/modsi3.S hexagon/sfdiv_opt.S hexagon/sfsqrt_opt.S hexagon/udivdi3.S hexagon/udivmoddi4.S hexagon/udivmodsi4.S hexagon/udivsi3.S hexagon/umoddi3.S hexagon/umodsi3.S) set(mips_SOURCES ${GENERIC_SOURCES}) set(mipsel_SOURCES ${mips_SOURCES}) set(mips64_SOURCES ${GENERIC_TF_SOURCES} ${mips_SOURCES}) set(mips64el_SOURCES ${GENERIC_TF_SOURCES} ${mips_SOURCES}) set(powerpc64_SOURCES ppc/divtc3.c ppc/fixtfdi.c ppc/fixunstfdi.c ppc/floatditf.c ppc/floatunditf.c ppc/gcc_qadd.c ppc/gcc_qdiv.c ppc/gcc_qmul.c ppc/gcc_qsub.c ppc/multc3.c ${GENERIC_SOURCES}) set(powerpc64le_SOURCES ${powerpc64_SOURCES}) set(riscv_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) set(riscv32_SOURCES riscv/mulsi3.S ${riscv_SOURCES}) set(riscv64_SOURCES ${riscv_SOURCES}) set(wasm32_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES}) set(wasm64_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES}) add_custom_target(builtins) set_target_properties(builtins PROPERTIES FOLDER "Compiler-RT Misc") if (APPLE) add_subdirectory(Darwin-excludes) add_subdirectory(macho_embedded) darwin_add_builtin_libraries(${BUILTIN_SUPPORTED_OS}) else () set(BUILTIN_CFLAGS "") append_list_if(COMPILER_RT_HAS_STD_C11_FLAG -std=c11 BUILTIN_CFLAGS) # These flags would normally be added to CMAKE_C_FLAGS by the llvm # cmake step. Add them manually if this is a standalone build. if(COMPILER_RT_STANDALONE_BUILD) append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin BUILTIN_CFLAGS) append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG -fvisibility=hidden BUILTIN_CFLAGS) if(NOT COMPILER_RT_DEBUG) append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fomit-frame-pointer BUILTIN_CFLAGS) endif() endif() set(BUILTIN_DEFS "") append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG VISIBILITY_HIDDEN BUILTIN_DEFS) foreach (arch ${BUILTIN_SUPPORTED_ARCH}) if (CAN_TARGET_${arch}) # NOTE: some architectures (e.g. i386) have multiple names. Ensure that # we catch them all. set(_arch ${arch}) if("${arch}" STREQUAL "armv6m") set(_arch "arm|armv6m") elseif("${arch}" MATCHES "^(armhf|armv7|armv7s|armv7k|armv7m|armv7em)$") set(_arch "arm") endif() + # For ARM archs, exclude any VFP builtins if VFP is not supported + if (${arch} MATCHES "^(arm|armhf|armv7|armv7s|armv7k|armv7m|armv7em)$") + string(REPLACE ";" " " _TARGET_${arch}_CFLAGS "${TARGET_${arch}_CFLAGS}") + check_compile_definition(__VFP_FP__ "${CMAKE_C_FLAGS} ${_TARGET_${arch}_CFLAGS}" COMPILER_RT_HAS_${arch}_VFP) + if(NOT COMPILER_RT_HAS_${arch}_VFP) + list(REMOVE_ITEM ${arch}_SOURCES ${arm_Thumb1_VFPv2_SOURCES} ${arm_Thumb1_SjLj_EH_SOURCES}) + endif() + endif() + # Filter out generic versions of routines that are re-implemented in # architecture specific manner. This prevents multiple definitions of the # same symbols, making the symbol selection non-deterministic. foreach (_file ${${arch}_SOURCES}) if (${_file} MATCHES ${_arch}/*) get_filename_component(_name ${_file} NAME) string(REPLACE ".S" ".c" _cname "${_name}") list(REMOVE_ITEM ${arch}_SOURCES ${_cname}) endif () endforeach () # Needed for clear_cache on debug mode, due to r7's usage in inline asm. # Release mode already sets it via -O2/3, Debug mode doesn't. if (${arch} STREQUAL "armhf") list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer -DCOMPILER_RT_ARMHF_TARGET) endif() # For RISCV32, we must force enable int128 for compiling long # double routines. if("${arch}" STREQUAL "riscv32") list(APPEND BUILTIN_CFLAGS -fforce-enable-int128) endif() add_compiler_rt_runtime(clang_rt.builtins STATIC ARCHS ${arch} SOURCES ${${arch}_SOURCES} DEFS ${BUILTIN_DEFS} CFLAGS ${BUILTIN_CFLAGS} PARENT_TARGET builtins) endif () endforeach () endif () add_dependencies(compiler-rt builtins) Index: vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.c =================================================================== --- vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.c (revision 337140) +++ vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.c (revision 337141) @@ -1,281 +1,290 @@ /*===- InstrProfilingUtil.c - Support library for PGO instrumentation -----===*\ |* |* The LLVM Compiler Infrastructure |* |* This file is distributed under the University of Illinois Open Source |* License. See LICENSE.TXT for details. |* \*===----------------------------------------------------------------------===*/ #ifdef _WIN32 #include #include #include #include "WindowsMMap.h" #else #include #include #include #include #include #endif #ifdef COMPILER_RT_HAS_UNAME #include #endif #include #include #if defined(__linux__) #include #include #endif #include "InstrProfiling.h" #include "InstrProfilingUtil.h" +COMPILER_RT_WEAK unsigned lprofDirMode = 0755; + COMPILER_RT_VISIBILITY void __llvm_profile_recursive_mkdir(char *path) { int i; for (i = 1; path[i] != '\0'; ++i) { char save = path[i]; if (!IS_DIR_SEPARATOR(path[i])) continue; path[i] = '\0'; #ifdef _WIN32 _mkdir(path); #else - mkdir(path, 0755); /* Some of these will fail, ignore it. */ + /* Some of these will fail, ignore it. */ + mkdir(path, __llvm_profile_get_dir_mode()); #endif path[i] = save; } } + +COMPILER_RT_VISIBILITY +void __llvm_profile_set_dir_mode(unsigned Mode) { lprofDirMode = Mode; } + +COMPILER_RT_VISIBILITY +unsigned __llvm_profile_get_dir_mode(void) { return lprofDirMode; } #if COMPILER_RT_HAS_ATOMICS != 1 COMPILER_RT_VISIBILITY uint32_t lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV) { void *R = *Ptr; if (R == OldV) { *Ptr = NewV; return 1; } return 0; } COMPILER_RT_VISIBILITY void *lprofPtrFetchAdd(void **Mem, long ByteIncr) { void *Old = *Mem; *((char **)Mem) += ByteIncr; return Old; } #endif #ifdef _MSC_VER COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) { WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN]; DWORD BufferSize = sizeof(Buffer); BOOL Result = GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize); if (!Result) return -1; if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0) return -1; return 0; } #elif defined(COMPILER_RT_HAS_UNAME) COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) { struct utsname N; int R = uname(&N); if (R >= 0) { strncpy(Name, N.nodename, Len); return 0; } return R; } #endif COMPILER_RT_VISIBILITY int lprofLockFd(int fd) { #ifdef COMPILER_RT_HAS_FCNTL_LCK struct flock s_flock; s_flock.l_whence = SEEK_SET; s_flock.l_start = 0; s_flock.l_len = 0; /* Until EOF. */ s_flock.l_pid = getpid(); s_flock.l_type = F_WRLCK; while (fcntl(fd, F_SETLKW, &s_flock) == -1) { if (errno != EINTR) { if (errno == ENOLCK) { return -1; } break; } } return 0; #else flock(fd, LOCK_EX); return 0; #endif } COMPILER_RT_VISIBILITY int lprofUnlockFd(int fd) { #ifdef COMPILER_RT_HAS_FCNTL_LCK struct flock s_flock; s_flock.l_whence = SEEK_SET; s_flock.l_start = 0; s_flock.l_len = 0; /* Until EOF. */ s_flock.l_pid = getpid(); s_flock.l_type = F_UNLCK; while (fcntl(fd, F_SETLKW, &s_flock) == -1) { if (errno != EINTR) { if (errno == ENOLCK) { return -1; } break; } } return 0; #else flock(fd, LOCK_UN); return 0; #endif } COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) { FILE *f; int fd; #ifdef COMPILER_RT_HAS_FCNTL_LCK fd = open(ProfileName, O_RDWR | O_CREAT, 0666); if (fd < 0) return NULL; if (lprofLockFd(fd) != 0) PROF_WARN("Data may be corrupted during profile merging : %s\n", "Fail to obtain file lock due to system limit."); f = fdopen(fd, "r+b"); #elif defined(_WIN32) // FIXME: Use the wide variants to handle Unicode filenames. HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); if (h == INVALID_HANDLE_VALUE) return NULL; fd = _open_osfhandle((intptr_t)h, 0); if (fd == -1) { CloseHandle(h); return NULL; } f = _fdopen(fd, "r+b"); if (f == 0) { CloseHandle(h); return NULL; } #else /* Worst case no locking applied. */ PROF_WARN("Concurrent file access is not supported : %s\n", "lack file locking"); fd = open(ProfileName, O_RDWR | O_CREAT, 0666); if (fd < 0) return NULL; f = fdopen(fd, "r+b"); #endif return f; } COMPILER_RT_VISIBILITY const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen) { const char *Prefix = getenv("GCOV_PREFIX"); const char *PrefixStripStr = getenv("GCOV_PREFIX_STRIP"); *PrefixLen = 0; *PrefixStrip = 0; if (Prefix == NULL || Prefix[0] == '\0') return NULL; if (PrefixStripStr) { *PrefixStrip = atoi(PrefixStripStr); /* Negative GCOV_PREFIX_STRIP values are ignored */ if (*PrefixStrip < 0) *PrefixStrip = 0; } else { *PrefixStrip = 0; } *PrefixLen = strlen(Prefix); return Prefix; } COMPILER_RT_VISIBILITY void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix, size_t PrefixLen, int PrefixStrip) { const char *Ptr; int Level; const char *StrippedPathStr = PathStr; for (Level = 0, Ptr = PathStr + 1; Level < PrefixStrip; ++Ptr) { if (*Ptr == '\0') break; if (!IS_DIR_SEPARATOR(*Ptr)) continue; StrippedPathStr = Ptr; ++Level; } memcpy(Dest, Prefix, PrefixLen); if (!IS_DIR_SEPARATOR(Prefix[PrefixLen - 1])) Dest[PrefixLen++] = DIR_SEPARATOR; memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1); } COMPILER_RT_VISIBILITY const char * lprofFindFirstDirSeparator(const char *Path) { const char *Sep = strchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) const char *Sep2 = strchr(Path, DIR_SEPARATOR_2); if (Sep2 && (!Sep || Sep2 < Sep)) Sep = Sep2; #endif return Sep; } COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) { const char *Sep = strrchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) const char *Sep2 = strrchr(Path, DIR_SEPARATOR_2); if (Sep2 && (!Sep || Sep2 > Sep)) Sep = Sep2; #endif return Sep; } COMPILER_RT_VISIBILITY int lprofSuspendSigKill() { #if defined(__linux__) int PDeachSig = 0; /* Temporarily suspend getting SIGKILL upon exit of the parent process. */ if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL) prctl(PR_SET_PDEATHSIG, 0); return (PDeachSig == SIGKILL); #else return 0; #endif } COMPILER_RT_VISIBILITY void lprofRestoreSigKill() { #if defined(__linux__) prctl(PR_SET_PDEATHSIG, SIGKILL); #endif } Index: vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.h =================================================================== --- vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.h (revision 337140) +++ vendor/compiler-rt/dist/lib/profile/InstrProfilingUtil.h (revision 337141) @@ -1,65 +1,71 @@ /*===- InstrProfilingUtil.h - Support library for PGO instrumentation -----===*\ |* |* The LLVM Compiler Infrastructure |* |* This file is distributed under the University of Illinois Open Source |* License. See LICENSE.TXT for details. |* \*===----------------------------------------------------------------------===*/ #ifndef PROFILE_INSTRPROFILINGUTIL_H #define PROFILE_INSTRPROFILINGUTIL_H #include #include /*! \brief Create a directory tree. */ void __llvm_profile_recursive_mkdir(char *Pathname); +/*! Set the mode used when creating profile directories. */ +void __llvm_profile_set_dir_mode(unsigned Mode); + +/*! Return the directory creation mode. */ +unsigned __llvm_profile_get_dir_mode(void); + int lprofLockFd(int fd); int lprofUnlockFd(int fd); /*! Open file \c Filename for read+write with write * lock for exclusive access. The caller will block * if the lock is already held by another process. */ FILE *lprofOpenFileEx(const char *Filename); /* PS4 doesn't have getenv. Define a shim. */ #if __ORBIS__ static inline char *getenv(const char *name) { return NULL; } #endif /* #if __ORBIS__ */ /* GCOV_PREFIX and GCOV_PREFIX_STRIP support */ /* Return the path prefix specified by GCOV_PREFIX environment variable. * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value) * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen. */ const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen); /* Apply the path prefix specified in \c Prefix to path string in \c PathStr, * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip * is not zero, path prefixes are stripped from \c PathStr (the level of * stripping is specified by \c PrefixStrip) before \c Prefix is added. */ void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix, size_t PrefixLen, int PrefixStrip); /* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in * the string \c Path, or NULL if the char is not found. */ const char *lprofFindFirstDirSeparator(const char *Path); /* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in * the string \c Path, or NULL if the char is not found. */ const char *lprofFindLastDirSeparator(const char *Path); int lprofGetHostName(char *Name, int Len); unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV); void *lprofPtrFetchAdd(void **Mem, long ByteIncr); /* Temporarily suspend SIGKILL. Return value of 1 means a restore is needed. * Other return values mean no restore is needed. */ int lprofSuspendSigKill(); /* Restore previously suspended SIGKILL. */ void lprofRestoreSigKill(); #endif /* PROFILE_INSTRPROFILINGUTIL_H */ Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mutex.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mutex.h (revision 337140) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mutex.h (revision 337141) @@ -1,229 +1,224 @@ //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer/AddressSanitizer runtime. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_MUTEX_H #define SANITIZER_MUTEX_H #include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" namespace __sanitizer { class StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } void Lock() { if (TryLock()) return; LockSlow(); } bool TryLock() { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } void Unlock() { atomic_store(&state_, 0, memory_order_release); } void CheckLocked() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); } private: atomic_uint8_t state_; void NOINLINE LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); if (atomic_load(&state_, memory_order_relaxed) == 0 && atomic_exchange(&state_, 1, memory_order_acquire) == 0) return; } } }; class SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); } private: SpinMutex(const SpinMutex&); void operator=(const SpinMutex&); }; class BlockingMutex { public: -#if SANITIZER_WINDOWS - // Windows does not currently support LinkerInitialized - explicit BlockingMutex(LinkerInitialized); -#else explicit constexpr BlockingMutex(LinkerInitialized) - : opaque_storage_ {0, }, owner_(0) {} -#endif + : opaque_storage_ {0, }, owner_ {0} {} BlockingMutex(); void Lock(); void Unlock(); // This function does not guarantee an explicit check that the calling thread // is the thread which owns the mutex. This behavior, while more strictly // correct, causes problems in cases like StopTheWorld, where a parent thread // owns the mutex but a child checks that it is locked. Rather than // maintaining complex state to work around those situations, the check only // checks that the mutex is owned, and assumes callers to be generally // well-behaved. void CheckLocked(); private: // Solaris mutex_t has a member that requires 64-bit alignment. ALIGNED(8) uptr opaque_storage_[10]; uptr owner_; // for debugging }; // Reader-writer spin mutex. class RWMutex { public: RWMutex() { atomic_store(&state_, kUnlocked, memory_order_relaxed); } ~RWMutex() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); } void Lock() { u32 cmp = kUnlocked; if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, memory_order_acquire)) return; LockSlow(); } void Unlock() { u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); DCHECK_NE(prev & kWriteLock, 0); (void)prev; } void ReadLock() { u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); if ((prev & kWriteLock) == 0) return; ReadLockSlow(); } void ReadUnlock() { u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); DCHECK_EQ(prev & kWriteLock, 0); DCHECK_GT(prev & ~kWriteLock, 0); (void)prev; } void CheckLocked() { CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); } private: atomic_uint32_t state_; enum { kUnlocked = 0, kWriteLock = 1, kReadLock = 2 }; void NOINLINE LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 cmp = atomic_load(&state_, memory_order_relaxed); if (cmp == kUnlocked && atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, memory_order_acquire)) return; } } void NOINLINE ReadLockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 prev = atomic_load(&state_, memory_order_acquire); if ((prev & kWriteLock) == 0) return; } } RWMutex(const RWMutex&); void operator = (const RWMutex&); }; template class GenericScopedLock { public: explicit GenericScopedLock(MutexType *mu) : mu_(mu) { mu_->Lock(); } ~GenericScopedLock() { mu_->Unlock(); } private: MutexType *mu_; GenericScopedLock(const GenericScopedLock&); void operator=(const GenericScopedLock&); }; template class GenericScopedReadLock { public: explicit GenericScopedReadLock(MutexType *mu) : mu_(mu) { mu_->ReadLock(); } ~GenericScopedReadLock() { mu_->ReadUnlock(); } private: MutexType *mu_; GenericScopedReadLock(const GenericScopedReadLock&); void operator=(const GenericScopedReadLock&); }; typedef GenericScopedLock SpinMutexLock; typedef GenericScopedLock BlockingMutexLock; typedef GenericScopedLock RWMutexLock; typedef GenericScopedReadLock RWMutexReadLock; } // namespace __sanitizer #endif // SANITIZER_MUTEX_H Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc (revision 337141) @@ -1,1077 +1,1056 @@ //===-- sanitizer_win.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements windows-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_WINDOWS #define WIN32_LEAN_AND_MEAN #define NOGDI #include #include #include #include #include "sanitizer_common.h" #include "sanitizer_file.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_win_defs.h" // A macro to tell the compiler that this part of the code cannot be reached, // if the compiler supports this feature. Since we're using this in // code that is called when terminating the process, the expansion of the // macro should not terminate the process to avoid infinite recursion. #if defined(__clang__) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(__GNUC__) && \ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(_MSC_VER) # define BUILTIN_UNREACHABLE() __assume(0) #else # define BUILTIN_UNREACHABLE() #endif namespace __sanitizer { #include "sanitizer_syscall_generic.inc" // --------------------- sanitizer_common.h uptr GetPageSize() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; } uptr GetMmapGranularity() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwAllocationGranularity; } uptr GetMaxUserVirtualAddress() { SYSTEM_INFO si; GetSystemInfo(&si); return (uptr)si.lpMaximumApplicationAddress; } uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } bool FileExists(const char *filename) { return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; } uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } // In contrast to POSIX, on Windows GetCurrentThreadId() // returns a system-unique identifier. tid_t GetTid() { return GetCurrentThreadId(); } uptr GetThreadSelf() { return GetTid(); } #if !SANITIZER_GO void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); MEMORY_BASIC_INFORMATION mbi; CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); // FIXME: is it possible for the stack to not be a single allocation? // Are these values what ASan expects to get (reserved, not committed; // including stack guard page) ? *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; *stack_bottom = (uptr)mbi.AllocationBase; } #endif // #if !SANITIZER_GO void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError(), raw_report); return rv; } void UnmapOrDie(void *addr, uptr size) { if (!size || !addr) return; MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); // MEM_RELEASE can only be used to unmap whole regions previously mapped with // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that // fails try MEM_DECOMMIT. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { Report("ERROR: %s failed to " "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", SanitizerToolName, size, size, addr, GetLastError()); CHECK("unable to unmap" && 0); } } } static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, const char *mmap_type) { error_t last_error = GetLastError(); if (last_error == ERROR_NOT_ENOUGH_MEMORY) return nullptr; ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); } void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); return rv; } // We want to map a chunk of address space aligned to 'alignment'. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); // Windows will align our allocations to at least 64K. alignment = Max(alignment, GetMmapGranularity()); uptr mapped_addr = (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // If we got it right on the first try, return. Otherwise, unmap it and go to // the slow path. if (IsAligned(mapped_addr, alignment)) return (void*)mapped_addr; if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // If we didn't get an aligned address, overallocate, find an aligned address, // unmap, and try to allocate at that aligned address. int retries = 0; const int kMaxRetries = 10; for (; retries < kMaxRetries && (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); retries++) { // Overallocate size + alignment bytes. mapped_addr = (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // Find the aligned address. uptr aligned_addr = RoundUpTo(mapped_addr, alignment); // Free the overallocation. if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // Attempt to allocate exactly the number of bytes we need at the aligned // address. This may fail for a number of reasons, in which case we continue // the loop. mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } // Fail if we can't make this work quickly. if (retries == kMaxRetries && mapped_addr == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); return (void *)mapped_addr; } bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. (void)name; // unsupported #if !SANITIZER_GO && SANITIZER_WINDOWS64 // On asan/Windows64, use MEM_COMMIT would result in error // 1455:ERROR_COMMITMENT_LIMIT. // Asan uses exception handler to commit page on demand. void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); #else void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); #endif if (p == 0) { Report("ERROR: %s failed to " "allocate %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return false; } return true; } // Memory space mapped by 'MmapFixedOrDie' must have been reserved by // 'MmapFixedNoAccess'. void *MmapFixedOrDie(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); } return p; } // Uses fixed_addr for now. // Will use offset instead once we've implemented this function for real. uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size) { return reinterpret_cast(MmapFixedOrDieOnFatalError(fixed_addr, size)); } uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) { return reinterpret_cast(MmapFixedOrDie(fixed_addr, size)); } void ReservedAddressRange::Unmap(uptr addr, uptr size) { // Only unmap if it covers the entire range. CHECK((addr == reinterpret_cast(base_)) && (size == size_)); // We unmap the whole range, just null out the base. base_ = nullptr; size_ = 0; UnmapOrDie(reinterpret_cast(addr), size); } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); } return p; } void *MmapNoReserveOrDie(uptr size, const char *mem_type) { // FIXME: make this really NoReserve? return MmapOrDie(size, mem_type); } uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); size_ = size; name_ = name; (void)os_handle_; // unsupported return reinterpret_cast(base_); } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { (void)name; // unsupported void *res = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return res; } void *MmapNoAccess(uptr size) { void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes (error code: %d)\n", SanitizerToolName, size, size, GetLastError()); return res; } bool MprotectNoAccess(uptr addr, uptr size) { DWORD old_protection; return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); } void ReleaseMemoryPagesToOS(uptr beg, uptr end) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. } bool NoHugePagesInRegion(uptr addr, uptr size) { // FIXME: probably similar to ReleaseMemoryToOS. return true; } bool DontDumpShadowMemory(uptr addr, uptr length) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. return true; } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found, uptr *max_occupied_addr) { uptr address = 0; while (true) { MEMORY_BASIC_INFORMATION info; if (!::VirtualQuery((void*)address, &info, sizeof(info))) return 0; if (info.State == MEM_FREE) { uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, alignment); if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) return shadow_address; } // Move to the next region. address = (uptr)info.BaseAddress + info.RegionSize; } return 0; } bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); return mbi.Protect == PAGE_NOACCESS && (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; } void *MapFileToMemory(const char *file_name, uptr *buff_size) { UNIMPLEMENTED(); } void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { UNIMPLEMENTED(); } static const int kMaxEnvNameLength = 128; static const DWORD kMaxEnvValueLength = 32767; namespace { struct EnvVariable { char name[kMaxEnvNameLength]; char value[kMaxEnvValueLength]; }; } // namespace static const int kEnvVariables = 5; static EnvVariable env_vars[kEnvVariables]; static int num_env_vars; const char *GetEnv(const char *name) { // Note: this implementation caches the values of the environment variables // and limits their quantity. for (int i = 0; i < num_env_vars; i++) { if (0 == internal_strcmp(name, env_vars[i].name)) return env_vars[i].value; } CHECK_LT(num_env_vars, kEnvVariables); DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, kMaxEnvValueLength); if (rv > 0 && rv < kMaxEnvValueLength) { CHECK_LT(internal_strlen(name), kMaxEnvNameLength); internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); num_env_vars++; return env_vars[num_env_vars - 1].value; } return 0; } const char *GetPwd() { UNIMPLEMENTED(); } u32 GetUid() { UNIMPLEMENTED(); } namespace { struct ModuleInfo { const char *filepath; uptr base_address; uptr end_address; }; #if !SANITIZER_GO int CompareModulesBase(const void *pl, const void *pr) { const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; if (l->base_address < r->base_address) return -1; return l->base_address > r->base_address; } #endif } // namespace #if !SANITIZER_GO void DumpProcessMap() { Report("Dumping process modules:\n"); ListOfModules modules; modules.init(); uptr num_modules = modules.size(); InternalMmapVector module_infos(num_modules); for (size_t i = 0; i < num_modules; ++i) { module_infos[i].filepath = modules[i].full_name(); module_infos[i].base_address = modules[i].ranges().front()->beg; module_infos[i].end_address = modules[i].ranges().back()->end; } qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), CompareModulesBase); for (size_t i = 0; i < num_modules; ++i) { const ModuleInfo &mi = module_infos[i]; if (mi.end_address != 0) { Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, mi.filepath[0] ? mi.filepath : "[no name]"); } else if (mi.filepath[0]) { Printf("\t??\?-??? %s\n", mi.filepath); } else { Printf("\t???\n"); } } } #endif void PrintModuleMap() { } void DisableCoreDumperIfNecessary() { // Do nothing. } void ReExec() { UNIMPLEMENTED(); } void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} bool StackSizeIsUnlimited() { UNIMPLEMENTED(); } void SetStackSizeLimitInBytes(uptr limit) { UNIMPLEMENTED(); } bool AddressSpaceIsUnlimited() { UNIMPLEMENTED(); } void SetAddressSpaceUnlimited() { UNIMPLEMENTED(); } bool IsPathSeparator(const char c) { return c == '\\' || c == '/'; } bool IsAbsolutePath(const char *path) { UNIMPLEMENTED(); } void SleepForSeconds(int seconds) { Sleep(seconds * 1000); } void SleepForMillis(int millis) { Sleep(millis); } u64 NanoTime() { static LARGE_INTEGER frequency = {}; LARGE_INTEGER counter; if (UNLIKELY(frequency.QuadPart == 0)) { QueryPerformanceFrequency(&frequency); CHECK_NE(frequency.QuadPart, 0); } QueryPerformanceCounter(&counter); counter.QuadPart *= 1000ULL * 1000000ULL; counter.QuadPart /= frequency.QuadPart; return counter.QuadPart; } u64 MonotonicNanoTime() { return NanoTime(); } void Abort() { internal__exit(3); } #if !SANITIZER_GO // Read the file to extract the ImageBase field from the PE header. If ASLR is // disabled and this virtual address is available, the loader will typically // load the image at this address. Therefore, we call it the preferred base. Any // addresses in the DWARF typically assume that the object has been loaded at // this address. static uptr GetPreferredBase(const char *modname) { fd_t fd = OpenFile(modname, RdOnly, nullptr); if (fd == kInvalidFd) return 0; FileCloser closer(fd); // Read just the DOS header. IMAGE_DOS_HEADER dos_header; uptr bytes_read; if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || bytes_read != sizeof(dos_header)) return 0; // The file should start with the right signature. if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) return 0; // The layout at e_lfanew is: // "PE\0\0" // IMAGE_FILE_HEADER // IMAGE_OPTIONAL_HEADER // Seek to e_lfanew and read all that data. char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)]; if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == INVALID_SET_FILE_POINTER) return 0; if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) || bytes_read != sizeof(buf)) return 0; // Check for "PE\0\0" before the PE header. char *pe_sig = &buf[0]; if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) return 0; // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. IMAGE_OPTIONAL_HEADER *pe_header = (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); // Check for more magic in the PE header. if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) return 0; // Finally, return the ImageBase. return (uptr)pe_header->ImageBase; } void ListOfModules::init() { clearOrInit(); HANDLE cur_process = GetCurrentProcess(); // Query the list of modules. Start by assuming there are no more than 256 // modules and retry if that's not sufficient. HMODULE *hmodules = 0; uptr modules_buffer_size = sizeof(HMODULE) * 256; DWORD bytes_required; while (!hmodules) { hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, &bytes_required)); if (bytes_required > modules_buffer_size) { // Either there turned out to be more than 256 hmodules, or new hmodules // could have loaded since the last try. Retry. UnmapOrDie(hmodules, modules_buffer_size); hmodules = 0; modules_buffer_size = bytes_required; } } // |num_modules| is the number of modules actually present, size_t num_modules = bytes_required / sizeof(HMODULE); for (size_t i = 0; i < num_modules; ++i) { HMODULE handle = hmodules[i]; MODULEINFO mi; if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) continue; // Get the UTF-16 path and convert to UTF-8. wchar_t modname_utf16[kMaxPathLength]; int modname_utf16_len = GetModuleFileNameW(handle, modname_utf16, kMaxPathLength); if (modname_utf16_len == 0) modname_utf16[0] = '\0'; char module_name[kMaxPathLength]; int module_name_len = ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1, &module_name[0], kMaxPathLength, NULL, NULL); module_name[module_name_len] = '\0'; uptr base_address = (uptr)mi.lpBaseOfDll; uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; // Adjust the base address of the module so that we get a VA instead of an // RVA when computing the module offset. This helps llvm-symbolizer find the // right DWARF CU. In the common case that the image is loaded at it's // preferred address, we will now print normal virtual addresses. uptr preferred_base = GetPreferredBase(&module_name[0]); uptr adjusted_base = base_address - preferred_base; LoadedModule cur_module; cur_module.set(module_name, adjusted_base); // We add the whole module as one single address range. cur_module.addAddressRange(base_address, end_address, /*executable*/ true, /*writable*/ true); modules_.push_back(cur_module); } UnmapOrDie(hmodules, modules_buffer_size); } void ListOfModules::fallbackInit() { clear(); } // We can't use atexit() directly at __asan_init time as the CRT is not fully // initialized at this point. Place the functions into a vector and use // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). InternalMmapVectorNoCtor atexit_functions; int Atexit(void (*function)(void)) { atexit_functions.push_back(function); return 0; } static int RunAtexit() { int ret = 0; for (uptr i = 0; i < atexit_functions.size(); ++i) { ret |= atexit(atexit_functions[i]); } return ret; } #pragma section(".CRT$XID", long, read) // NOLINT __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; #endif // ------------------ sanitizer_libc.h fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { // FIXME: Use the wide variants to handle Unicode filenames. fd_t res; if (mode == RdOnly) { res = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); } else if (mode == WrOnly) { res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); } else { UNIMPLEMENTED(); } CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); CHECK(res != kStderrFd || kStderrFd == kInvalidFd); if (res == kInvalidFd && last_error) *last_error = GetLastError(); return res; } void CloseFile(fd_t fd) { CloseHandle(fd); } bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, error_t *error_p) { CHECK(fd != kInvalidFd); // bytes_read can't be passed directly to ReadFile: // uptr is unsigned long long on 64-bit Windows. unsigned long num_read_long; bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); if (!success && error_p) *error_p = GetLastError(); if (bytes_read) *bytes_read = num_read_long; return success; } bool SupportsColoredOutput(fd_t fd) { // FIXME: support colored output. return false; } bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, error_t *error_p) { CHECK(fd != kInvalidFd); // Handle null optional parameters. error_t dummy_error; error_p = error_p ? error_p : &dummy_error; uptr dummy_bytes_written; bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; // Initialize output parameters in case we fail. *error_p = 0; *bytes_written = 0; // Map the conventional Unix fds 1 and 2 to Windows handles. They might be // closed, in which case this will fail. if (fd == kStdoutFd || fd == kStderrFd) { fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); if (fd == 0) { *error_p = ERROR_INVALID_HANDLE; return false; } } DWORD bytes_written_32; if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { *error_p = GetLastError(); return false; } else { *bytes_written = bytes_written_32; return true; } } bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { UNIMPLEMENTED(); } uptr internal_sched_yield() { Sleep(0); return 0; } void internal__exit(int exitcode) { // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, // so add our own breakpoint here. if (::IsDebuggerPresent()) __debugbreak(); TerminateProcess(GetCurrentProcess(), exitcode); BUILTIN_UNREACHABLE(); } uptr internal_ftruncate(fd_t fd, uptr size) { UNIMPLEMENTED(); } uptr GetRSS() { PROCESS_MEMORY_COUNTERS counters; if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) return 0; return counters.WorkingSetSize; } void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } void internal_join_thread(void *th) { } // ---------------------- BlockingMutex ---------------- {{{1 -const uptr LOCK_UNINITIALIZED = 0; -const uptr LOCK_READY = (uptr)-1; -BlockingMutex::BlockingMutex(LinkerInitialized li) { - // FIXME: see comments in BlockingMutex::Lock() for the details. - CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED); - - CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); - InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - owner_ = LOCK_READY; -} - BlockingMutex::BlockingMutex() { - CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); - InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - owner_ = LOCK_READY; + CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_)); + internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { - if (owner_ == LOCK_UNINITIALIZED) { - // FIXME: hm, global BlockingMutex objects are not initialized?!? - // This might be a side effect of the clang+cl+link Frankenbuild... - new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1)); - - // FIXME: If it turns out the linker doesn't invoke our - // constructors, we should probably manually Lock/Unlock all the global - // locks while we're starting in one thread to avoid double-init races. - } - EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - CHECK_EQ(owner_, LOCK_READY); + AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_); + CHECK_EQ(owner_, 0); owner_ = GetThreadSelf(); } void BlockingMutex::Unlock() { - CHECK_EQ(owner_, GetThreadSelf()); - owner_ = LOCK_READY; - LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_); + CheckLocked(); + owner_ = 0; + ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_); } void BlockingMutex::CheckLocked() { CHECK_EQ(owner_, GetThreadSelf()); } uptr GetTlsSize() { return 0; } void InitTlsSize() { } void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if SANITIZER_GO *stk_addr = 0; *stk_size = 0; *tls_addr = 0; *tls_size = 0; #else uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; *tls_addr = 0; *tls_size = 0; #endif } void ReportFile::Write(const char *buffer, uptr length) { SpinMutexLock l(mu); ReopenIfNecessary(); if (!WriteToFile(fd, buffer, length)) { // stderr may be closed, but we may be able to print to the debugger // instead. This is the case when launching a program from Visual Studio, // and the following routine should write to its console. OutputDebugStringA(buffer); } } void SetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void UnsetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void InstallDeadlySignalHandlers(SignalHandlerType handler) { (void)handler; // FIXME: Decide what to do on Windows. } HandleSignalMode GetHandleSignalMode(int signum) { // FIXME: Decide what to do on Windows. return kHandleSignalNo; } // Check based on flags if we should handle this exception. bool IsHandledDeadlyException(DWORD exceptionCode) { switch (exceptionCode) { case EXCEPTION_ACCESS_VIOLATION: case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: case EXCEPTION_STACK_OVERFLOW: case EXCEPTION_DATATYPE_MISALIGNMENT: case EXCEPTION_IN_PAGE_ERROR: return common_flags()->handle_segv; case EXCEPTION_ILLEGAL_INSTRUCTION: case EXCEPTION_PRIV_INSTRUCTION: case EXCEPTION_BREAKPOINT: return common_flags()->handle_sigill; case EXCEPTION_FLT_DENORMAL_OPERAND: case EXCEPTION_FLT_DIVIDE_BY_ZERO: case EXCEPTION_FLT_INEXACT_RESULT: case EXCEPTION_FLT_INVALID_OPERATION: case EXCEPTION_FLT_OVERFLOW: case EXCEPTION_FLT_STACK_CHECK: case EXCEPTION_FLT_UNDERFLOW: case EXCEPTION_INT_DIVIDE_BY_ZERO: case EXCEPTION_INT_OVERFLOW: return common_flags()->handle_sigfpe; } return false; } bool IsAccessibleMemoryRange(uptr beg, uptr size) { SYSTEM_INFO si; GetNativeSystemInfo(&si); uptr page_size = si.dwPageSize; uptr page_mask = ~(page_size - 1); for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; page <= end;) { MEMORY_BASIC_INFORMATION info; if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) return false; if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || info.Protect == PAGE_EXECUTE) return false; if (info.RegionSize == 0) return false; page += info.RegionSize; } return true; } bool SignalContext::IsStackOverflow() const { return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; } void SignalContext::InitPcSpBp() { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; CONTEXT *context_record = (CONTEXT *)context; pc = (uptr)exception_record->ExceptionAddress; #ifdef _WIN64 bp = (uptr)context_record->Rbp; sp = (uptr)context_record->Rsp; #else bp = (uptr)context_record->Ebp; sp = (uptr)context_record->Esp; #endif } uptr SignalContext::GetAddress() const { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; return exception_record->ExceptionInformation[1]; } bool SignalContext::IsMemoryAccess() const { return GetWriteFlag() != SignalContext::UNKNOWN; } SignalContext::WriteFlag SignalContext::GetWriteFlag() const { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; // The contents of this array are documented at // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx // The first element indicates read as 0, write as 1, or execute as 8. The // second element is the faulting address. switch (exception_record->ExceptionInformation[0]) { case 0: return SignalContext::READ; case 1: return SignalContext::WRITE; case 8: return SignalContext::UNKNOWN; } return SignalContext::UNKNOWN; } void SignalContext::DumpAllRegisters(void *context) { // FIXME: Implement this. } int SignalContext::GetType() const { return static_cast(siginfo)->ExceptionCode; } const char *SignalContext::Describe() const { unsigned code = GetType(); // Get the string description of the exception if this is a known deadly // exception. switch (code) { case EXCEPTION_ACCESS_VIOLATION: return "access-violation"; case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded"; case EXCEPTION_STACK_OVERFLOW: return "stack-overflow"; case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment"; case EXCEPTION_IN_PAGE_ERROR: return "in-page-error"; case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction"; case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction"; case EXCEPTION_BREAKPOINT: return "breakpoint"; case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand"; case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero"; case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result"; case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation"; case EXCEPTION_FLT_OVERFLOW: return "flt-overflow"; case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check"; case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow"; case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero"; case EXCEPTION_INT_OVERFLOW: return "int-overflow"; } return "unknown exception"; } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { // FIXME: Actually implement this function. CHECK_GT(buf_len, 0); buf[0] = 0; return 0; } uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { return ReadBinaryName(buf, buf_len); } void CheckVMASize() { // Do nothing. } void MaybeReexec() { // No need to re-exec on Windows. } void CheckASLR() { // Do nothing } char **GetArgv() { // FIXME: Actually implement this function. return 0; } pid_t StartSubprocess(const char *program, const char *const argv[], fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) { // FIXME: implement on this platform // Should be implemented based on // SymbolizerProcess::StarAtSymbolizerSubprocess // from lib/sanitizer_common/sanitizer_symbolizer_win.cc. return -1; } bool IsProcessRunning(pid_t pid) { // FIXME: implement on this platform. return false; } int WaitForProcess(pid_t pid) { return -1; } // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } // FIXME: implement on this platform. bool GetRandom(void *buffer, uptr length, bool blocking) { UNIMPLEMENTED(); } u32 GetNumberOfCPUs() { SYSTEM_INFO sysinfo = {}; GetNativeSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; } } // namespace __sanitizer #endif // _WIN32 Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_checks.inc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_checks.inc (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_checks.inc (revision 337141) @@ -1,47 +1,49 @@ //===-- ubsan_checks.inc ----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // List of checks handled by UBSan runtime. // //===----------------------------------------------------------------------===// #ifndef UBSAN_CHECK # error "Define UBSAN_CHECK prior to including this file!" #endif // UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) // SummaryKind and FSanitizeFlagName should be string literals. UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined") UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null") UBSAN_CHECK(PointerOverflow, "pointer-overflow", "pointer-overflow") UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment") UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size") UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow", "signed-integer-overflow") UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow", "unsigned-integer-overflow") UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero", "integer-divide-by-zero") UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero") UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use") +UBSAN_CHECK(ImplicitIntegerTruncation, "implicit-integer-truncation", + "implicit-integer-truncation") UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base") UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent") UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds") UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable") UBSAN_CHECK(MissingReturn, "missing-return", "return") UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound") UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow", "float-cast-overflow") UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "bool") UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "enum") UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", "function") UBSAN_CHECK(InvalidNullReturn, "invalid-null-return", "returns-nonnull-attribute") UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", "nonnull-attribute") UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "vptr") UBSAN_CHECK(CFIBadType, "cfi-bad-type", "cfi") Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.h =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.h (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.h (revision 337141) @@ -1,270 +1,270 @@ //===-- ubsan_diag.h --------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Diagnostics emission for Clang's undefined behavior sanitizer. // //===----------------------------------------------------------------------===// #ifndef UBSAN_DIAG_H #define UBSAN_DIAG_H #include "ubsan_value.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" namespace __ubsan { class SymbolizedStackHolder { SymbolizedStack *Stack; void clear() { if (Stack) Stack->ClearAll(); } public: explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr) : Stack(Stack) {} ~SymbolizedStackHolder() { clear(); } void reset(SymbolizedStack *S) { if (Stack != S) clear(); Stack = S; } const SymbolizedStack *get() const { return Stack; } }; SymbolizedStack *getSymbolizedLocation(uptr PC); inline SymbolizedStack *getCallerLocation(uptr CallerPC) { CHECK(CallerPC); uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC); return getSymbolizedLocation(PC); } /// A location of some data within the program's address space. typedef uptr MemoryLocation; /// \brief Location at which a diagnostic can be emitted. Either a /// SourceLocation, a MemoryLocation, or a SymbolizedStack. class Location { public: enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized }; private: LocationKind Kind; // FIXME: In C++11, wrap these in an anonymous union. SourceLocation SourceLoc; MemoryLocation MemoryLoc; const SymbolizedStack *SymbolizedLoc; // Not owned. public: Location() : Kind(LK_Null) {} Location(SourceLocation Loc) : Kind(LK_Source), SourceLoc(Loc) {} Location(MemoryLocation Loc) : Kind(LK_Memory), MemoryLoc(Loc) {} // SymbolizedStackHolder must outlive Location object. Location(const SymbolizedStackHolder &Stack) : Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {} LocationKind getKind() const { return Kind; } bool isSourceLocation() const { return Kind == LK_Source; } bool isMemoryLocation() const { return Kind == LK_Memory; } bool isSymbolizedStack() const { return Kind == LK_Symbolized; } SourceLocation getSourceLocation() const { CHECK(isSourceLocation()); return SourceLoc; } MemoryLocation getMemoryLocation() const { CHECK(isMemoryLocation()); return MemoryLoc; } const SymbolizedStack *getSymbolizedStack() const { CHECK(isSymbolizedStack()); return SymbolizedLoc; } }; /// A diagnostic severity level. enum DiagLevel { DL_Error, ///< An error. DL_Note ///< A note, attached to a prior diagnostic. }; /// \brief Annotation for a range of locations in a diagnostic. class Range { Location Start, End; const char *Text; public: Range() : Start(), End(), Text() {} Range(MemoryLocation Start, MemoryLocation End, const char *Text) : Start(Start), End(End), Text(Text) {} Location getStart() const { return Start; } Location getEnd() const { return End; } const char *getText() const { return Text; } }; /// \brief A C++ type name. Really just a strong typedef for 'const char*'. class TypeName { const char *Name; public: TypeName(const char *Name) : Name(Name) {} const char *getName() const { return Name; } }; enum class ErrorType { #define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name, #include "ubsan_checks.inc" #undef UBSAN_CHECK }; /// \brief Representation of an in-flight diagnostic. /// /// Temporary \c Diag instances are created by the handler routines to /// accumulate arguments for a diagnostic. The destructor emits the diagnostic /// message. class Diag { /// The location at which the problem occurred. Location Loc; /// The diagnostic level. DiagLevel Level; /// The error type. ErrorType ET; /// The message which will be emitted, with %0, %1, ... placeholders for /// arguments. const char *Message; public: /// Kinds of arguments, corresponding to members of \c Arg's union. enum ArgKind { AK_String, ///< A string argument, displayed as-is. AK_TypeName,///< A C++ type name, possibly demangled before display. AK_UInt, ///< An unsigned integer argument. AK_SInt, ///< A signed integer argument. AK_Float, ///< A floating-point argument. AK_Pointer ///< A pointer argument, displayed in hexadecimal. }; /// An individual diagnostic message argument. struct Arg { Arg() {} Arg(const char *String) : Kind(AK_String), String(String) {} Arg(TypeName TN) : Kind(AK_TypeName), String(TN.getName()) {} Arg(UIntMax UInt) : Kind(AK_UInt), UInt(UInt) {} Arg(SIntMax SInt) : Kind(AK_SInt), SInt(SInt) {} Arg(FloatMax Float) : Kind(AK_Float), Float(Float) {} Arg(const void *Pointer) : Kind(AK_Pointer), Pointer(Pointer) {} ArgKind Kind; union { const char *String; UIntMax UInt; SIntMax SInt; FloatMax Float; const void *Pointer; }; }; private: - static const unsigned MaxArgs = 5; + static const unsigned MaxArgs = 8; static const unsigned MaxRanges = 1; /// The arguments which have been added to this diagnostic so far. Arg Args[MaxArgs]; unsigned NumArgs; /// The ranges which have been added to this diagnostic so far. Range Ranges[MaxRanges]; unsigned NumRanges; Diag &AddArg(Arg A) { CHECK(NumArgs != MaxArgs); Args[NumArgs++] = A; return *this; } Diag &AddRange(Range A) { CHECK(NumRanges != MaxRanges); Ranges[NumRanges++] = A; return *this; } /// \c Diag objects are not copyable. Diag(const Diag &); // NOT IMPLEMENTED Diag &operator=(const Diag &); public: Diag(Location Loc, DiagLevel Level, ErrorType ET, const char *Message) : Loc(Loc), Level(Level), ET(ET), Message(Message), NumArgs(0), NumRanges(0) {} ~Diag(); Diag &operator<<(const char *Str) { return AddArg(Str); } Diag &operator<<(TypeName TN) { return AddArg(TN); } Diag &operator<<(unsigned long long V) { return AddArg(UIntMax(V)); } Diag &operator<<(const void *V) { return AddArg(V); } Diag &operator<<(const TypeDescriptor &V); Diag &operator<<(const Value &V); Diag &operator<<(const Range &R) { return AddRange(R); } }; struct ReportOptions { // If FromUnrecoverableHandler is specified, UBSan runtime handler is not // expected to return. bool FromUnrecoverableHandler; /// pc/bp are used to unwind the stack trace. uptr pc; uptr bp; }; bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET); #define GET_REPORT_OPTIONS(unrecoverable_handler) \ GET_CALLER_PC_BP; \ ReportOptions Opts = {unrecoverable_handler, pc, bp} void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp, void *context, bool fast); /// \brief Instantiate this class before printing diagnostics in the error /// report. This class ensures that reports from different threads and from /// different sanitizers won't be mixed. class ScopedReport { struct Initializer { Initializer(); }; Initializer initializer_; ScopedErrorReportLock report_lock_; ReportOptions Opts; Location SummaryLoc; ErrorType Type; public: ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type); ~ScopedReport(); static void CheckLocked() { ScopedErrorReportLock::CheckLocked(); } }; void InitializeSuppressions(); bool IsVptrCheckSuppressed(const char *TypeName); // Sometimes UBSan runtime can know filename from handlers arguments, even if // debug info is missing. bool IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename); } // namespace __ubsan #endif // UBSAN_DIAG_H Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc (revision 337141) @@ -1,737 +1,780 @@ //===-- ubsan_handlers.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Error logging entry points for the UBSan runtime. // //===----------------------------------------------------------------------===// #include "ubsan_platform.h" #if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" #include "ubsan_flags.h" #include "ubsan_monitor.h" #include "sanitizer_common/sanitizer_common.h" using namespace __sanitizer; using namespace __ubsan; namespace __ubsan { bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) { // We are not allowed to skip error report: if we are in unrecoverable // handler, we have to terminate the program right now, and therefore // have to print some diagnostic. // // Even if source location is disabled, it doesn't mean that we have // already report an error to the user: some concurrently running // thread could have acquired it, but not yet printed the report. if (Opts.FromUnrecoverableHandler) return false; return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename()); } const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of", "upcast of", "cast to virtual base of", "_Nonnull binding to", "dynamic operation on"}; } static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, ReportOptions Opts) { Location Loc = Data->Loc.acquire(); uptr Alignment = (uptr)1 << Data->LogAlignment; ErrorType ET; if (!Pointer) ET = ErrorType::NullPointerUse; else if (Pointer & (Alignment - 1)) ET = ErrorType::MisalignedPointerUse; else ET = ErrorType::InsufficientObjectSize; // Use the SourceLocation from Data to track deduplication, even if it's // invalid. if (ignoreReport(Loc.getSourceLocation(), Opts, ET)) return; SymbolizedStackHolder FallbackLoc; if (Data->Loc.isInvalid()) { FallbackLoc.reset(getCallerLocation(Opts.pc)); Loc = FallbackLoc; } ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::NullPointerUse: Diag(Loc, DL_Error, ET, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; case ErrorType::MisalignedPointerUse: Diag(Loc, DL_Error, ET, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment << Data->Type; break; case ErrorType::InsufficientObjectSize: Diag(Loc, DL_Error, ET, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type; break; default: UNREACHABLE("unexpected error type!"); } if (Pointer) Diag(Pointer, DL_Note, ET, "pointer points here"); } void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(false); handleTypeMismatchImpl(Data, Pointer, Opts); } void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(true); handleTypeMismatchImpl(Data, Pointer, Opts); Die(); } /// \brief Common diagnostic emission for various forms of integer overflow. template static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, const char *Operator, T RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; if (!IsSigned && flags()->silence_unsigned_overflow) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "%0 integer overflow: " "%1 %2 %3 cannot be represented in type %4") << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) << Operator << RHS << Data->Type; } #define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \ ValueHandle RHS) { \ GET_REPORT_OPTIONS(unrecoverable); \ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \ if (unrecoverable) \ Die(); \ } UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true) static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; if (!IsSigned && flags()->silence_unsigned_overflow) return; ScopedReport R(Opts, Loc, ET); if (IsSigned) Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") << Value(Data->Type, OldVal) << Data->Type; else Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1") << Value(Data->Type, OldVal) << Data->Type; } void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(false); handleNegateOverflowImpl(Data, OldVal, Opts); } void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(true); handleNegateOverflowImpl(Data, OldVal, Opts); Die(); } static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->Type, LHS); Value RHSVal(Data->Type, RHS); ErrorType ET; if (RHSVal.isMinusOne()) ET = ErrorType::SignedIntegerOverflow; else if (Data->Type.isIntegerTy()) ET = ErrorType::IntegerDivideByZero; else ET = ErrorType::FloatDivideByZero; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::SignedIntegerOverflow: Diag(Loc, DL_Error, ET, "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; break; default: Diag(Loc, DL_Error, ET, "division by zero"); break; } } void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleDivremOverflowImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleDivremOverflowImpl(Data, LHS, RHS, Opts); Die(); } static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->LHSType, LHS); Value RHSVal(Data->RHSType, RHS); ErrorType ET; if (RHSVal.isNegative() || RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth()) ET = ErrorType::InvalidShiftExponent; else ET = ErrorType::InvalidShiftBase; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (ET == ErrorType::InvalidShiftExponent) { if (RHSVal.isNegative()) Diag(Loc, DL_Error, ET, "shift exponent %0 is negative") << RHSVal; else Diag(Loc, DL_Error, ET, "shift exponent %0 is too large for %1-bit type %2") << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; } else { if (LHSVal.isNegative()) Diag(Loc, DL_Error, ET, "left shift of negative value %0") << LHSVal; else Diag(Loc, DL_Error, ET, "left shift of %0 by %1 places cannot be represented in type %2") << LHSVal << RHSVal << Data->LHSType; } } void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_shift_out_of_bounds_abort( ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); Die(); } static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::OutOfBoundsIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Value IndexVal(Data->IndexType, Index); Diag(Loc, DL_Error, ET, "index %0 out of bounds for type %1") << IndexVal << Data->ArrayType; } void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(false); handleOutOfBoundsImpl(Data, Index, Opts); } void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(true); handleOutOfBoundsImpl(Data, Index, Opts); Die(); } static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { ErrorType ET = ErrorType::UnreachableCall; ScopedReport R(Opts, Data->Loc, ET); Diag(Data->Loc, DL_Error, ET, "execution reached an unreachable program point"); } void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleBuiltinUnreachableImpl(Data, Opts); Die(); } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { ErrorType ET = ErrorType::MissingReturn; ScopedReport R(Opts, Data->Loc, ET); Diag(Data->Loc, DL_Error, ET, "execution reached the end of a value-returning function " "without returning a value"); } void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleMissingReturnImpl(Data, Opts); Die(); } static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::NonPositiveVLAIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "variable length array bound evaluates to " "non-positive value %0") << Value(Data->Type, Bound); } void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(false); handleVLABoundNotPositive(Data, Bound, Opts); } void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(true); handleVLABoundNotPositive(Data, Bound, Opts); Die(); } static bool looksLikeFloatCastOverflowDataV1(void *Data) { // First field is either a pointer to filename or a pointer to a // TypeDescriptor. u8 *FilenameOrTypeDescriptor; internal_memcpy(&FilenameOrTypeDescriptor, Data, sizeof(FilenameOrTypeDescriptor)); // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known, // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename, // adding two printable characters will not yield such a value. Otherwise, // if one of them is 0xff, this is most likely TK_Unknown type descriptor. u16 MaybeFromTypeKind = FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1]; return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff || FilenameOrTypeDescriptor[1] == 0xff; } static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, ReportOptions Opts) { SymbolizedStackHolder CallerLoc; Location Loc; const TypeDescriptor *FromType, *ToType; ErrorType ET = ErrorType::FloatCastOverflow; if (looksLikeFloatCastOverflowDataV1(DataPtr)) { auto Data = reinterpret_cast(DataPtr); CallerLoc.reset(getCallerLocation(Opts.pc)); Loc = CallerLoc; FromType = &Data->FromType; ToType = &Data->ToType; } else { auto Data = reinterpret_cast(DataPtr); SourceLocation SLoc = Data->Loc.acquire(); if (ignoreReport(SLoc, Opts, ET)) return; Loc = SLoc; FromType = &Data->FromType; ToType = &Data->ToType; } ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "%0 is outside the range of representable values of type %2") << Value(*FromType, From) << *FromType << *ToType; } void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(false); handleFloatCastOverflow(Data, From, Opts); } void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(true); handleFloatCastOverflow(Data, From, Opts); Die(); } static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); // This check could be more precise if we used different handlers for // -fsanitize=bool and -fsanitize=enum. bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) || (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6)); ErrorType ET = IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "load of value %0, which is not a valid value for type %1") << Value(Data->Type, Val) << Data->Type; } void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(false); handleLoadInvalidValue(Data, Val, Opts); } void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(true); handleLoadInvalidValue(Data, Val, Opts); Die(); } +static void handleImplicitConversion(ImplicitConversionData *Data, + ReportOptions Opts, ValueHandle Src, + ValueHandle Dst) { + SourceLocation Loc = Data->Loc.acquire(); + ErrorType ET = ErrorType::GenericUB; + + switch (Data->Kind) { + case ICCK_IntegerTruncation: + ET = ErrorType::ImplicitIntegerTruncation; + break; + } + + if (ignoreReport(Loc, Opts, ET)) + return; + + const TypeDescriptor &SrcTy = Data->FromType; + const TypeDescriptor &DstTy = Data->ToType; + + ScopedReport R(Opts, Loc, ET); + + // FIXME: is it possible to dump the values as hex with fixed width? + + Diag(Loc, DL_Error, ET, + "implicit conversion from type %0 of value %1 (%2-bit, %3signed) to " + "type %4 changed the value to %5 (%6-bit, %7signed)") + << SrcTy << Value(SrcTy, Src) << SrcTy.getIntegerBitWidth() + << (SrcTy.isSignedIntegerTy() ? "" : "un") << DstTy << Value(DstTy, Dst) + << DstTy.getIntegerBitWidth() << (DstTy.isSignedIntegerTy() ? "" : "un"); +} + +void __ubsan::__ubsan_handle_implicit_conversion(ImplicitConversionData *Data, + ValueHandle Src, + ValueHandle Dst) { + GET_REPORT_OPTIONS(false); + handleImplicitConversion(Data, Opts, Src, Dst); +} +void __ubsan::__ubsan_handle_implicit_conversion_abort( + ImplicitConversionData *Data, ValueHandle Src, ValueHandle Dst) { + GET_REPORT_OPTIONS(true); + handleImplicitConversion(Data, Opts, Src, Dst); + Die(); +} + static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidBuiltin; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "passing zero to %0, which is not a valid argument") << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()"); } void __ubsan::__ubsan_handle_invalid_builtin(InvalidBuiltinData *Data) { GET_REPORT_OPTIONS(true); handleInvalidBuiltin(Data, Opts); } void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) { GET_REPORT_OPTIONS(true); handleInvalidBuiltin(Data, Opts); Die(); } static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, ValueHandle Function, ReportOptions Opts) { SourceLocation CallLoc = Data->Loc.acquire(); ErrorType ET = ErrorType::FunctionTypeMismatch; if (ignoreReport(CallLoc, Opts, ET)) return; ScopedReport R(Opts, CallLoc, ET); SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(CallLoc, DL_Error, ET, "call to function %0 through pointer to incorrect function type %1") << FName << Data->Type; Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; } void __ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(false); handleFunctionTypeMismatch(Data, Function, Opts); } void __ubsan::__ubsan_handle_function_type_mismatch_abort( FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(true); handleFunctionTypeMismatch(Data, Function, Opts); Die(); } static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr, ReportOptions Opts, bool IsAttr) { if (!LocPtr) UNREACHABLE("source location pointer is null!"); SourceLocation Loc = LocPtr->acquire(); ErrorType ET = ErrorType::InvalidNullReturn; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "null pointer returned from function declared to never return null"); if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "returns_nonnull attribute" : "_Nonnull return type annotation"); } void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, true); } void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, false); } void __ubsan::__ubsan_handle_nullability_return_v1_abort( NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, false); Die(); } static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidNullArgument; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation"); } void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, true); } void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, false); } void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, false); Die(); } static void handlePointerOverflowImpl(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::PointerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) { if (Base > Result) Diag(Loc, DL_Error, ET, "addition of unsigned offset to %0 overflowed to %1") << (void *)Base << (void *)Result; else Diag(Loc, DL_Error, ET, "subtraction of unsigned offset from %0 overflowed to %1") << (void *)Base << (void *)Result; } else { Diag(Loc, DL_Error, ET, "pointer index expression with base %0 overflowed to %1") << (void *)Base << (void *)Result; } } void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(false); handlePointerOverflowImpl(Data, Base, Result, Opts); } void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(true); handlePointerOverflowImpl(Data, Base, Result, Opts); Die(); } static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ReportOptions Opts) { if (Data->CheckKind != CFITCK_ICall && Data->CheckKind != CFITCK_NVMFCall) Die(); SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::CFIBadType; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); const char *CheckKindStr = Data->CheckKind == CFITCK_NVMFCall ? "non-virtual pointer to member function call" : "indirect function call"; Diag(Loc, DL_Error, ET, "control flow integrity check for type %0 failed during %1") << Data->Type << CheckKindStr; SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; // If the failure involved different DSOs for the check location and icall // target, report the DSO names. const char *DstModule = FLoc.get()->info.module; if (!DstModule) DstModule = "(unknown)"; const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc); if (!SrcModule) SrcModule = "(unknown)"; if (internal_strcmp(SrcModule, DstModule)) Diag(Loc, DL_Note, ET, "check failed in %0, destination function located in %1") << SrcModule << DstModule; } namespace __ubsan { #ifdef UBSAN_CAN_USE_CXXABI #ifdef _WIN32 extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default) #else SANITIZER_WEAK_ATTRIBUTE #endif void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); #else void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } #endif } // namespace __ubsan void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(false); if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); } void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(true); if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); Die(); } #endif // CAN_SANITIZE_UB Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.h =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.h (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.h (revision 337141) @@ -1,206 +1,223 @@ //===-- ubsan_handlers.h ----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Entry points to the runtime library for Clang's undefined behavior sanitizer. // //===----------------------------------------------------------------------===// #ifndef UBSAN_HANDLERS_H #define UBSAN_HANDLERS_H #include "ubsan_value.h" namespace __ubsan { struct TypeMismatchData { SourceLocation Loc; const TypeDescriptor &Type; unsigned char LogAlignment; unsigned char TypeCheckKind; }; #define UNRECOVERABLE(checkname, ...) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \ void __ubsan_handle_ ## checkname( __VA_ARGS__ ); #define RECOVERABLE(checkname, ...) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE \ void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \ void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ ); /// \brief Handle a runtime type check failure, caused by either a misaligned /// pointer, a null pointer, or a pointer to insufficient storage for the /// type. RECOVERABLE(type_mismatch_v1, TypeMismatchData *Data, ValueHandle Pointer) struct OverflowData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle an integer addition overflow. RECOVERABLE(add_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle an integer subtraction overflow. RECOVERABLE(sub_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle an integer multiplication overflow. RECOVERABLE(mul_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle a signed integer overflow for a unary negate operator. RECOVERABLE(negate_overflow, OverflowData *Data, ValueHandle OldVal) /// \brief Handle an INT_MIN/-1 overflow or division by zero. RECOVERABLE(divrem_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) struct ShiftOutOfBoundsData { SourceLocation Loc; const TypeDescriptor &LHSType; const TypeDescriptor &RHSType; }; /// \brief Handle a shift where the RHS is out of bounds or a left shift where /// the LHS is negative or overflows. RECOVERABLE(shift_out_of_bounds, ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) struct OutOfBoundsData { SourceLocation Loc; const TypeDescriptor &ArrayType; const TypeDescriptor &IndexType; }; /// \brief Handle an array index out of bounds error. RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index) struct UnreachableData { SourceLocation Loc; }; /// \brief Handle a __builtin_unreachable which is reached. UNRECOVERABLE(builtin_unreachable, UnreachableData *Data) /// \brief Handle reaching the end of a value-returning function. UNRECOVERABLE(missing_return, UnreachableData *Data) struct VLABoundData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle a VLA with a non-positive bound. RECOVERABLE(vla_bound_not_positive, VLABoundData *Data, ValueHandle Bound) // Keeping this around for binary compatibility with (sanitized) programs // compiled with older compilers. struct FloatCastOverflowData { const TypeDescriptor &FromType; const TypeDescriptor &ToType; }; struct FloatCastOverflowDataV2 { SourceLocation Loc; const TypeDescriptor &FromType; const TypeDescriptor &ToType; }; /// Handle overflow in a conversion to or from a floating-point type. /// void *Data is one of FloatCastOverflowData* or FloatCastOverflowDataV2* RECOVERABLE(float_cast_overflow, void *Data, ValueHandle From) struct InvalidValueData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle a load of an invalid value for the type. RECOVERABLE(load_invalid_value, InvalidValueData *Data, ValueHandle Val) +/// Known implicit conversion check kinds. +/// Keep in sync with the enum of the same name in CGExprScalar.cpp +enum ImplicitConversionCheckKind : unsigned char { + ICCK_IntegerTruncation = 0, +}; + +struct ImplicitConversionData { + SourceLocation Loc; + const TypeDescriptor &FromType; + const TypeDescriptor &ToType; + /* ImplicitConversionCheckKind */ unsigned char Kind; +}; + +/// \brief Implict conversion that changed the value. +RECOVERABLE(implicit_conversion, ImplicitConversionData *Data, ValueHandle Src, + ValueHandle Dst) + /// Known builtin check kinds. /// Keep in sync with the enum of the same name in CodeGenFunction.h enum BuiltinCheckKind : unsigned char { BCK_CTZPassedZero, BCK_CLZPassedZero, }; struct InvalidBuiltinData { SourceLocation Loc; unsigned char Kind; }; /// Handle a builtin called in an invalid way. RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data) struct FunctionTypeMismatchData { SourceLocation Loc; const TypeDescriptor &Type; }; RECOVERABLE(function_type_mismatch, FunctionTypeMismatchData *Data, ValueHandle Val) struct NonNullReturnData { SourceLocation AttrLoc; }; /// \brief Handle returning null from function with the returns_nonnull /// attribute, or a return type annotated with _Nonnull. RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc) RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc) struct NonNullArgData { SourceLocation Loc; SourceLocation AttrLoc; int ArgIndex; }; /// \brief Handle passing null pointer to a function parameter with the nonnull /// attribute, or a _Nonnull type annotation. RECOVERABLE(nonnull_arg, NonNullArgData *Data) RECOVERABLE(nullability_arg, NonNullArgData *Data) struct PointerOverflowData { SourceLocation Loc; }; RECOVERABLE(pointer_overflow, PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) /// \brief Known CFI check kinds. /// Keep in sync with the enum of the same name in CodeGenFunction.h enum CFITypeCheckKind : unsigned char { CFITCK_VCall, CFITCK_NVCall, CFITCK_DerivedCast, CFITCK_UnrelatedCast, CFITCK_ICall, CFITCK_NVMFCall, CFITCK_VMFCall, }; struct CFICheckFailData { CFITypeCheckKind CheckKind; SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle control flow integrity failures. RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function, uptr VtableIsValid) struct ReportOptions; extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type( CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); } #endif // UBSAN_HANDLERS_H Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_interface.inc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_interface.inc (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_interface.inc (revision 337141) @@ -1,56 +1,58 @@ //===-- ubsan_interface.inc -----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Ubsan interface list. //===----------------------------------------------------------------------===// INTERFACE_FUNCTION(__ubsan_handle_add_overflow) INTERFACE_FUNCTION(__ubsan_handle_add_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_builtin_unreachable) INTERFACE_FUNCTION(__ubsan_handle_cfi_bad_type) INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail) INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail_abort) INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow) INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss) INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort) +INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion) +INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort) INTERFACE_FUNCTION(__ubsan_handle_missing_return) INTERFACE_FUNCTION(__ubsan_handle_mul_overflow) INTERFACE_FUNCTION(__ubsan_handle_mul_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_negate_overflow) INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg) INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort) INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1) INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_nullability_arg) INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort) INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1) INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds) INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort) INTERFACE_FUNCTION(__ubsan_handle_sub_overflow) INTERFACE_FUNCTION(__ubsan_handle_sub_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1) INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort) INTERFACE_WEAK_FUNCTION(__ubsan_default_options) INTERFACE_FUNCTION(__ubsan_on_report) INTERFACE_FUNCTION(__ubsan_get_current_report_data) Index: vendor/compiler-rt/dist/lib/ubsan_minimal/ubsan_minimal_handlers.cc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan_minimal/ubsan_minimal_handlers.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/ubsan_minimal/ubsan_minimal_handlers.cc (revision 337141) @@ -1,117 +1,118 @@ #include "sanitizer_common/sanitizer_atomic.h" #include #include #include #include #ifdef KERNEL_USE extern "C" void ubsan_message(const char *msg); static void message(const char *msg) { ubsan_message(msg); } #else static void message(const char *msg) { write(2, msg, strlen(msg)); } #endif static const int kMaxCallerPcs = 20; static __sanitizer::atomic_uintptr_t caller_pcs[kMaxCallerPcs]; // Number of elements in caller_pcs. A special value of kMaxCallerPcs + 1 means // that "too many errors" has already been reported. static __sanitizer::atomic_uint32_t caller_pcs_sz; __attribute__((noinline)) static bool report_this_error(void *caller_p) { uintptr_t caller = reinterpret_cast(caller_p); if (caller == 0) return false; while (true) { unsigned sz = __sanitizer::atomic_load_relaxed(&caller_pcs_sz); if (sz > kMaxCallerPcs) return false; // early exit // when sz==kMaxCallerPcs print "too many errors", but only when cmpxchg // succeeds in order to not print it multiple times. if (sz > 0 && sz < kMaxCallerPcs) { uintptr_t p; for (unsigned i = 0; i < sz; ++i) { p = __sanitizer::atomic_load_relaxed(&caller_pcs[i]); if (p == 0) break; // Concurrent update. if (p == caller) return false; } if (p == 0) continue; // FIXME: yield? } if (!__sanitizer::atomic_compare_exchange_strong( &caller_pcs_sz, &sz, sz + 1, __sanitizer::memory_order_seq_cst)) continue; // Concurrent update! Try again from the start. if (sz == kMaxCallerPcs) { message("ubsan: too many errors\n"); return false; } __sanitizer::atomic_store_relaxed(&caller_pcs[sz], caller); return true; } } #if defined(__ANDROID__) extern "C" __attribute__((weak)) void android_set_abort_message(const char *); static void abort_with_message(const char *msg) { if (&android_set_abort_message) android_set_abort_message(msg); abort(); } #else static void abort_with_message(const char *) { abort(); } #endif #if SANITIZER_DEBUG namespace __sanitizer { // The DCHECK macro needs this symbol to be defined. void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) { message("Sanitizer CHECK failed: "); message(file); message(":?? : "); // FIXME: Show line number. message(cond); abort(); } } // namespace __sanitizer #endif #define INTERFACE extern "C" __attribute__((visibility("default"))) // FIXME: add caller pc to the error message (possibly as "ubsan: error-type // @1234ABCD"). #define HANDLER_RECOVER(name, msg) \ INTERFACE void __ubsan_handle_##name##_minimal() { \ if (!report_this_error(__builtin_return_address(0))) return; \ message("ubsan: " msg "\n"); \ } #define HANDLER_NORECOVER(name, msg) \ INTERFACE void __ubsan_handle_##name##_minimal_abort() { \ message("ubsan: " msg "\n"); \ abort_with_message("ubsan: " msg); \ } #define HANDLER(name, msg) \ HANDLER_RECOVER(name, msg) \ HANDLER_NORECOVER(name, msg) HANDLER(type_mismatch, "type-mismatch") HANDLER(add_overflow, "add-overflow") HANDLER(sub_overflow, "sub-overflow") HANDLER(mul_overflow, "mul-overflow") HANDLER(negate_overflow, "negate-overflow") HANDLER(divrem_overflow, "divrem-overflow") HANDLER(shift_out_of_bounds, "shift-out-of-bounds") HANDLER(out_of_bounds, "out-of-bounds") HANDLER_RECOVER(builtin_unreachable, "builtin-unreachable") HANDLER_RECOVER(missing_return, "missing-return") HANDLER(vla_bound_not_positive, "vla-bound-not-positive") HANDLER(float_cast_overflow, "float-cast-overflow") HANDLER(load_invalid_value, "load-invalid-value") HANDLER(invalid_builtin, "invalid-builtin") HANDLER(function_type_mismatch, "function-type-mismatch") +HANDLER(implicit_conversion, "implicit-conversion") HANDLER(nonnull_arg, "nonnull-arg") HANDLER(nonnull_return, "nonnull-return") HANDLER(nullability_arg, "nullability-arg") HANDLER(nullability_return, "nullability-return") HANDLER(pointer_overflow, "pointer-overflow") HANDLER(cfi_check_fail, "cfi-check-fail") Index: vendor/compiler-rt/dist/lib/xray/tests/unit/profile_collector_test.cc =================================================================== --- vendor/compiler-rt/dist/lib/xray/tests/unit/profile_collector_test.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/xray/tests/unit/profile_collector_test.cc (revision 337141) @@ -1,179 +1,211 @@ //===-- profile_collector_test.cc -----------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a function call tracing system. // //===----------------------------------------------------------------------===// #include "gtest/gtest.h" #include "xray_profile_collector.h" #include "xray_profiling_flags.h" #include +#include +#include #include #include #include namespace __xray { namespace { static constexpr auto kHeaderSize = 16u; +constexpr uptr ExpectedProfilingVersion = 0x20180424; + +struct ExpectedProfilingFileHeader { + const u64 MagicBytes = 0x7872617970726f66; // Identifier for XRay profiling + // files 'xrayprof' in hex. + const u64 Version = ExpectedProfilingVersion; + u64 Timestamp = 0; + u64 PID = 0; +}; + +void ValidateFileHeaderBlock(XRayBuffer B) { + ASSERT_NE(static_cast(B.Data), nullptr); + ASSERT_EQ(B.Size, sizeof(ExpectedProfilingFileHeader)); + typename std::aligned_storage::type + FileHeaderStorage; + ExpectedProfilingFileHeader ExpectedHeader; + std::memcpy(&FileHeaderStorage, B.Data, B.Size); + auto &FileHeader = + *reinterpret_cast(&FileHeaderStorage); + ASSERT_EQ(ExpectedHeader.MagicBytes, FileHeader.MagicBytes); + ASSERT_EQ(ExpectedHeader.Version, FileHeader.Version); +} + void ValidateBlock(XRayBuffer B) { profilingFlags()->setDefaults(); ASSERT_NE(static_cast(B.Data), nullptr); ASSERT_NE(B.Size, 0u); ASSERT_GE(B.Size, kHeaderSize); // We look at the block size, the block number, and the thread ID to ensure // that none of them are zero (or that the header data is laid out as we // expect). char LocalBuffer[kHeaderSize] = {}; internal_memcpy(LocalBuffer, B.Data, kHeaderSize); u32 BlockSize = 0; u32 BlockNumber = 0; u64 ThreadId = 0; internal_memcpy(&BlockSize, LocalBuffer, sizeof(u32)); internal_memcpy(&BlockNumber, LocalBuffer + sizeof(u32), sizeof(u32)); internal_memcpy(&ThreadId, LocalBuffer + (2 * sizeof(u32)), sizeof(u64)); ASSERT_NE(BlockSize, 0u); ASSERT_GE(BlockNumber, 0u); ASSERT_NE(ThreadId, 0u); } std::tuple ParseBlockHeader(XRayBuffer B) { char LocalBuffer[kHeaderSize] = {}; internal_memcpy(LocalBuffer, B.Data, kHeaderSize); u32 BlockSize = 0; u32 BlockNumber = 0; u64 ThreadId = 0; internal_memcpy(&BlockSize, LocalBuffer, sizeof(u32)); internal_memcpy(&BlockNumber, LocalBuffer + sizeof(u32), sizeof(u32)); internal_memcpy(&ThreadId, LocalBuffer + (2 * sizeof(u32)), sizeof(u64)); return std::make_tuple(BlockSize, BlockNumber, ThreadId); } struct Profile { int64_t CallCount; int64_t CumulativeLocalTime; std::vector Path; }; std::tuple ParseProfile(const char *P) { Profile Result; // Read the path first, until we find a sentinel 0. int32_t F; do { internal_memcpy(&F, P, sizeof(int32_t)); P += sizeof(int32_t); Result.Path.push_back(F); } while (F != 0); // Then read the CallCount. internal_memcpy(&Result.CallCount, P, sizeof(int64_t)); P += sizeof(int64_t); // Then read the CumulativeLocalTime. internal_memcpy(&Result.CumulativeLocalTime, P, sizeof(int64_t)); P += sizeof(int64_t); return std::make_tuple(std::move(Result), P); } TEST(profileCollectorServiceTest, PostSerializeCollect) { profilingFlags()->setDefaults(); // The most basic use-case (the one we actually only care about) is the one // where we ensure that we can post FunctionCallTrie instances, which are then // destroyed but serialized properly. // // First, we initialise a set of allocators in the local scope. This ensures // that we're able to copy the contents of the FunctionCallTrie that uses // the local allocators. auto Allocators = FunctionCallTrie::InitAllocators(); FunctionCallTrie T(Allocators); // Then, we populate the trie with some data. T.enterFunction(1, 1); T.enterFunction(2, 2); T.exitFunction(2, 3); T.exitFunction(1, 4); // Then we post the data to the global profile collector service. profileCollectorService::post(T, 1); // Then we serialize the data. profileCollectorService::serialize(); - // Then we go through a single buffer to see whether we're getting the data we - // expect. + // Then we go through two buffers to see whether we're getting the data we + // expect. The first block must always be as large as a file header, which + // will have a fixed size. auto B = profileCollectorService::nextBuffer({nullptr, 0}); + ValidateFileHeaderBlock(B); + + B = profileCollectorService::nextBuffer(B); ValidateBlock(B); u32 BlockSize; u32 BlockNum; u64 ThreadId; std::tie(BlockSize, BlockNum, ThreadId) = ParseBlockHeader(B); // We look at the serialized buffer to see whether the Trie we're expecting // to see is there. auto DStart = static_cast(B.Data) + kHeaderSize; std::vector D(DStart, DStart + BlockSize); B = profileCollectorService::nextBuffer(B); ASSERT_EQ(B.Data, nullptr); ASSERT_EQ(B.Size, 0u); Profile Profile1, Profile2; auto P = static_cast(D.data()); std::tie(Profile1, P) = ParseProfile(P); std::tie(Profile2, P) = ParseProfile(P); ASSERT_NE(Profile1.Path.size(), Profile2.Path.size()); auto &P1 = Profile1.Path.size() < Profile2.Path.size() ? Profile2 : Profile1; auto &P2 = Profile1.Path.size() < Profile2.Path.size() ? Profile1 : Profile2; std::vector P1Expected = {2, 1, 0}; std::vector P2Expected = {1, 0}; ASSERT_EQ(P1.Path.size(), P1Expected.size()); ASSERT_EQ(P2.Path.size(), P2Expected.size()); ASSERT_EQ(P1.Path, P1Expected); ASSERT_EQ(P2.Path, P2Expected); } // We break out a function that will be run in multiple threads, one that will // use a thread local allocator, and will post the FunctionCallTrie to the // profileCollectorService. This simulates what the threads being profiled would // be doing anyway, but through the XRay logging implementation. void threadProcessing() { thread_local auto Allocators = FunctionCallTrie::InitAllocators(); FunctionCallTrie T(Allocators); T.enterFunction(1, 1); T.enterFunction(2, 2); T.exitFunction(2, 3); T.exitFunction(1, 4); profileCollectorService::post(T, GetTid()); } TEST(profileCollectorServiceTest, PostSerializeCollectMultipleThread) { profilingFlags()->setDefaults(); std::thread t1(threadProcessing); std::thread t2(threadProcessing); t1.join(); t2.join(); // At this point, t1 and t2 are already done with what they were doing. profileCollectorService::serialize(); // Ensure that we see two buffers. auto B = profileCollectorService::nextBuffer({nullptr, 0}); + ValidateFileHeaderBlock(B); + + B = profileCollectorService::nextBuffer(B); ValidateBlock(B); B = profileCollectorService::nextBuffer(B); ValidateBlock(B); } } // namespace } // namespace __xray Index: vendor/compiler-rt/dist/lib/xray/xray_buffer_queue.cc =================================================================== --- vendor/compiler-rt/dist/lib/xray/xray_buffer_queue.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/xray/xray_buffer_queue.cc (revision 337141) @@ -1,138 +1,171 @@ //===-- xray_buffer_queue.cc -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instruementation system. // // Defines the interface for a buffer queue implementation. // //===----------------------------------------------------------------------===// #include "xray_buffer_queue.h" -#include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include +#include +#ifndef MAP_NORESERVE +// no-op on NetBSD (at least), unsupported flag on FreeBSD +#define MAP_NORESERVE 0 +#endif + using namespace __xray; using namespace __sanitizer; +template static T *allocRaw(size_t N) { + // TODO: Report errors? + // We use MAP_NORESERVE on platforms where it's supported to ensure that the + // pages we're allocating for XRay never end up in pages that can be swapped + // in/out. We're doing this because for FDR mode, we want to ensure that + // writes to the buffers stay resident in memory to prevent XRay itself from + // causing swapping/thrashing. + // + // In the case when XRay pages cannot be swapped in/out or there's not enough + // RAM to back these pages, we're willing to cause a segmentation fault + // instead of introducing latency in the measurement. We assume here that + // there are enough pages that are swappable in/out outside of the buffers + // being used by FDR mode (which are bounded and configurable anyway) to allow + // us to keep using always-resident memory. + // + // TODO: Make this configurable? + void *A = reinterpret_cast( + internal_mmap(NULL, N * sizeof(T), PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0)); + return (A == MAP_FAILED) ? nullptr : reinterpret_cast(A); +} + +template static void deallocRaw(T *ptr, size_t N) { + // TODO: Report errors? + if (ptr != nullptr) + internal_munmap(ptr, N); +} + template static T *initArray(size_t N) { - auto A = reinterpret_cast( - InternalAlloc(N * sizeof(T), nullptr, kCacheLineSize)); + auto A = allocRaw(N); if (A != nullptr) while (N > 0) new (A + (--N)) T(); return A; } BufferQueue::BufferQueue(size_t B, size_t N, bool &Success) : BufferSize(B), Buffers(initArray(N)), BufferCount(N), Finalizing{0}, OwnedBuffers(initArray(N)), Next(Buffers), First(Buffers), LiveBuffers(0) { if (Buffers == nullptr) { Success = false; return; } if (OwnedBuffers == nullptr) { // Clean up the buffers we've already allocated. for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) B->~BufferRep(); - InternalFree(Buffers); + deallocRaw(Buffers, N); Success = false; return; }; for (size_t i = 0; i < N; ++i) { auto &T = Buffers[i]; - void *Tmp = InternalAlloc(BufferSize, nullptr, 64); + void *Tmp = allocRaw(BufferSize); if (Tmp == nullptr) { Success = false; return; } - void *Extents = InternalAlloc(sizeof(BufferExtents), nullptr, 64); + auto *Extents = allocRaw(1); if (Extents == nullptr) { Success = false; return; } auto &Buf = T.Buff; Buf.Data = Tmp; Buf.Size = B; - Buf.Extents = reinterpret_cast(Extents); + Buf.Extents = Extents; OwnedBuffers[i] = Tmp; } Success = true; } BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) { if (atomic_load(&Finalizing, memory_order_acquire)) return ErrorCode::QueueFinalizing; SpinMutexLock Guard(&Mutex); if (LiveBuffers == BufferCount) return ErrorCode::NotEnoughMemory; auto &T = *Next; auto &B = T.Buff; Buf = B; T.Used = true; ++LiveBuffers; if (++Next == (Buffers + BufferCount)) Next = Buffers; return ErrorCode::Ok; } BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { // Blitz through the buffers array to find the buffer. bool Found = false; for (auto I = OwnedBuffers, E = OwnedBuffers + BufferCount; I != E; ++I) { if (*I == Buf.Data) { Found = true; break; } } if (!Found) return ErrorCode::UnrecognizedBuffer; SpinMutexLock Guard(&Mutex); // This points to a semantic bug, we really ought to not be releasing more // buffers than we actually get. if (LiveBuffers == 0) return ErrorCode::NotEnoughMemory; // Now that the buffer has been released, we mark it as "used". First->Buff = Buf; First->Used = true; Buf.Data = nullptr; Buf.Size = 0; --LiveBuffers; if (++First == (Buffers + BufferCount)) First = Buffers; return ErrorCode::Ok; } BufferQueue::ErrorCode BufferQueue::finalize() { if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel)) return ErrorCode::QueueFinalizing; return ErrorCode::Ok; } BufferQueue::~BufferQueue() { for (auto I = Buffers, E = Buffers + BufferCount; I != E; ++I) { auto &T = *I; auto &Buf = T.Buff; - InternalFree(Buf.Data); - InternalFree(Buf.Extents); + deallocRaw(Buf.Data, Buf.Size); + deallocRaw(Buf.Extents, 1); } for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) B->~BufferRep(); - InternalFree(Buffers); - InternalFree(OwnedBuffers); + deallocRaw(Buffers, BufferCount); + deallocRaw(OwnedBuffers, BufferCount); } Index: vendor/compiler-rt/dist/lib/xray/xray_profile_collector.cc =================================================================== --- vendor/compiler-rt/dist/lib/xray/xray_profile_collector.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/xray/xray_profile_collector.cc (revision 337141) @@ -1,318 +1,346 @@ //===-- xray_profile_collector.cc ------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // This implements the interface for the profileCollectorService. // //===----------------------------------------------------------------------===// #include "xray_profile_collector.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_vector.h" #include "xray_profiling_flags.h" #include #include #include namespace __xray { namespace profileCollectorService { namespace { SpinMutex GlobalMutex; struct ThreadTrie { tid_t TId; FunctionCallTrie *Trie; }; struct ProfileBuffer { void *Data; size_t Size; }; +// Current version of the profile format. +constexpr u64 XRayProfilingVersion = 0x20180424; + +// Identifier for XRay profiling files 'xrayprof' in hex. +constexpr u64 XRayMagicBytes = 0x7872617970726f66; + +struct XRayProfilingFileHeader { + const u64 MagicBytes = XRayMagicBytes; + const u64 Version = XRayProfilingVersion; + u64 Timestamp = 0; // System time in nanoseconds. + u64 PID = 0; // Process ID. +}; + struct BlockHeader { u32 BlockSize; u32 BlockNum; u64 ThreadId; }; // These need to be pointers that point to heap/internal-allocator-allocated // objects because these are accessed even at program exit. Vector *ThreadTries = nullptr; Vector *ProfileBuffers = nullptr; FunctionCallTrie::Allocators *GlobalAllocators = nullptr; } // namespace void post(const FunctionCallTrie &T, tid_t TId) { static pthread_once_t Once = PTHREAD_ONCE_INIT; pthread_once(&Once, +[] { SpinMutexLock Lock(&GlobalMutex); GlobalAllocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (GlobalAllocators) FunctionCallTrie::Allocators(); *GlobalAllocators = FunctionCallTrie::InitAllocatorsCustom( profilingFlags()->global_allocator_max); ThreadTries = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ThreadTries) Vector(); ProfileBuffers = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ProfileBuffers) Vector(); }); DCHECK_NE(GlobalAllocators, nullptr); DCHECK_NE(ThreadTries, nullptr); DCHECK_NE(ProfileBuffers, nullptr); ThreadTrie *Item = nullptr; { SpinMutexLock Lock(&GlobalMutex); if (GlobalAllocators == nullptr) return; Item = ThreadTries->PushBack(); Item->TId = TId; // Here we're using the internal allocator instead of the managed allocator // because: // // 1) We're not using the segmented array data structure to host // FunctionCallTrie objects. We're using a Vector (from sanitizer_common) // which works like a std::vector<...> keeping elements contiguous in // memory. The segmented array data structure assumes that elements are // trivially destructible, where FunctionCallTrie isn't. // // 2) Using a managed allocator means we need to manage that separately, // which complicates the nature of this code. To get around that, we're // using the internal allocator instead, which has its own global state // and is decoupled from the lifetime management required by the managed // allocator we have in XRay. // Item->Trie = reinterpret_cast(InternalAlloc( sizeof(FunctionCallTrie), nullptr, alignof(FunctionCallTrie))); DCHECK_NE(Item->Trie, nullptr); new (Item->Trie) FunctionCallTrie(*GlobalAllocators); } T.deepCopyInto(*Item->Trie); } // A PathArray represents the function id's representing a stack trace. In this // context a path is almost always represented from the leaf function in a call // stack to a root of the call trie. using PathArray = Array; struct ProfileRecord { using PathAllocator = typename PathArray::AllocatorType; // The Path in this record is the function id's from the leaf to the root of // the function call stack as represented from a FunctionCallTrie. PathArray *Path = nullptr; const FunctionCallTrie::Node *Node = nullptr; // Constructor for in-place construction. ProfileRecord(PathAllocator &A, const FunctionCallTrie::Node *N) : Path([&] { auto P = reinterpret_cast(InternalAlloc(sizeof(PathArray))); new (P) PathArray(A); return P; }()), Node(N) {} }; namespace { using ProfileRecordArray = Array; // Walk a depth-first traversal of each root of the FunctionCallTrie to generate // the path(s) and the data associated with the path. static void populateRecords(ProfileRecordArray &PRs, ProfileRecord::PathAllocator &PA, const FunctionCallTrie &Trie) { using StackArray = Array; using StackAllocator = typename StackArray::AllocatorType; StackAllocator StackAlloc(profilingFlags()->stack_allocator_max); StackArray DFSStack(StackAlloc); for (const auto R : Trie.getRoots()) { DFSStack.Append(R); while (!DFSStack.empty()) { auto Node = DFSStack.back(); DFSStack.trim(1); auto Record = PRs.AppendEmplace(PA, Node); if (Record == nullptr) return; DCHECK_NE(Record, nullptr); // Traverse the Node's parents and as we're doing so, get the FIds in // the order they appear. for (auto N = Node; N != nullptr; N = N->Parent) Record->Path->Append(N->FId); DCHECK(!Record->Path->empty()); for (const auto C : Node->Callees) DFSStack.Append(C.NodePtr); } } } static void serializeRecords(ProfileBuffer *Buffer, const BlockHeader &Header, const ProfileRecordArray &ProfileRecords) { auto NextPtr = static_cast( internal_memcpy(Buffer->Data, &Header, sizeof(Header))) + sizeof(Header); for (const auto &Record : ProfileRecords) { // List of IDs follow: for (const auto FId : *Record.Path) NextPtr = static_cast(internal_memcpy(NextPtr, &FId, sizeof(FId))) + sizeof(FId); // Add the sentinel here. constexpr int32_t SentinelFId = 0; NextPtr = static_cast( internal_memset(NextPtr, SentinelFId, sizeof(SentinelFId))) + sizeof(SentinelFId); // Add the node data here. NextPtr = static_cast(internal_memcpy(NextPtr, &Record.Node->CallCount, sizeof(Record.Node->CallCount))) + sizeof(Record.Node->CallCount); NextPtr = static_cast( internal_memcpy(NextPtr, &Record.Node->CumulativeLocalTime, sizeof(Record.Node->CumulativeLocalTime))) + sizeof(Record.Node->CumulativeLocalTime); } DCHECK_EQ(NextPtr - static_cast(Buffer->Data), Buffer->Size); } } // namespace void serialize() { SpinMutexLock Lock(&GlobalMutex); // Clear out the global ProfileBuffers. for (uptr I = 0; I < ProfileBuffers->Size(); ++I) InternalFree((*ProfileBuffers)[I].Data); ProfileBuffers->Reset(); if (ThreadTries->Size() == 0) return; // Then repopulate the global ProfileBuffers. for (u32 I = 0; I < ThreadTries->Size(); ++I) { using ProfileRecordAllocator = typename ProfileRecordArray::AllocatorType; ProfileRecordAllocator PRAlloc(profilingFlags()->global_allocator_max); ProfileRecord::PathAllocator PathAlloc( profilingFlags()->global_allocator_max); ProfileRecordArray ProfileRecords(PRAlloc); // First, we want to compute the amount of space we're going to need. We'll // use a local allocator and an __xray::Array<...> to store the intermediary // data, then compute the size as we're going along. Then we'll allocate the // contiguous space to contain the thread buffer data. const auto &Trie = *(*ThreadTries)[I].Trie; if (Trie.getRoots().empty()) continue; populateRecords(ProfileRecords, PathAlloc, Trie); DCHECK(!Trie.getRoots().empty()); DCHECK(!ProfileRecords.empty()); // Go through each record, to compute the sizes. // // header size = block size (4 bytes) // + block number (4 bytes) // + thread id (8 bytes) // record size = path ids (4 bytes * number of ids + sentinel 4 bytes) // + call count (8 bytes) // + local time (8 bytes) // + end of record (8 bytes) u32 CumulativeSizes = 0; for (const auto &Record : ProfileRecords) CumulativeSizes += 20 + (4 * Record.Path->size()); BlockHeader Header{16 + CumulativeSizes, I, (*ThreadTries)[I].TId}; auto Buffer = ProfileBuffers->PushBack(); Buffer->Size = sizeof(Header) + CumulativeSizes; Buffer->Data = InternalAlloc(Buffer->Size, nullptr, 64); DCHECK_NE(Buffer->Data, nullptr); serializeRecords(Buffer, Header, ProfileRecords); // Now clean up the ProfileRecords array, one at a time. for (auto &Record : ProfileRecords) { Record.Path->~PathArray(); InternalFree(Record.Path); } } } void reset() { SpinMutexLock Lock(&GlobalMutex); if (ProfileBuffers != nullptr) { // Clear out the profile buffers that have been serialized. for (uptr I = 0; I < ProfileBuffers->Size(); ++I) InternalFree((*ProfileBuffers)[I].Data); ProfileBuffers->Reset(); InternalFree(ProfileBuffers); ProfileBuffers = nullptr; } if (ThreadTries != nullptr) { // Clear out the function call tries per thread. for (uptr I = 0; I < ThreadTries->Size(); ++I) { auto &T = (*ThreadTries)[I]; T.Trie->~FunctionCallTrie(); InternalFree(T.Trie); } ThreadTries->Reset(); InternalFree(ThreadTries); ThreadTries = nullptr; } // Reset the global allocators. if (GlobalAllocators != nullptr) { GlobalAllocators->~Allocators(); InternalFree(GlobalAllocators); GlobalAllocators = nullptr; } GlobalAllocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (GlobalAllocators) FunctionCallTrie::Allocators(); *GlobalAllocators = FunctionCallTrie::InitAllocators(); ThreadTries = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ThreadTries) Vector(); ProfileBuffers = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ProfileBuffers) Vector(); } XRayBuffer nextBuffer(XRayBuffer B) { SpinMutexLock Lock(&GlobalMutex); if (ProfileBuffers == nullptr || ProfileBuffers->Size() == 0) return {nullptr, 0}; - if (B.Data == nullptr) + static pthread_once_t Once = PTHREAD_ONCE_INIT; + static typename std::aligned_storage::type + FileHeaderStorage; + pthread_once(&Once, + +[] { new (&FileHeaderStorage) XRayProfilingFileHeader{}; }); + + if (UNLIKELY(B.Data == nullptr)) { + // The first buffer should always contain the file header information. + auto &FileHeader = + *reinterpret_cast(&FileHeaderStorage); + FileHeader.Timestamp = NanoTime(); + FileHeader.PID = internal_getpid(); + return {&FileHeaderStorage, sizeof(XRayProfilingFileHeader)}; + } + + if (UNLIKELY(B.Data == &FileHeaderStorage)) return {(*ProfileBuffers)[0].Data, (*ProfileBuffers)[0].Size}; BlockHeader Header; internal_memcpy(&Header, B.Data, sizeof(BlockHeader)); auto NextBlock = Header.BlockNum + 1; if (NextBlock < ProfileBuffers->Size()) return {(*ProfileBuffers)[NextBlock].Data, (*ProfileBuffers)[NextBlock].Size}; return {nullptr, 0}; } } // namespace profileCollectorService } // namespace __xray Index: vendor/compiler-rt/dist/lib/xray/xray_profiling.cc =================================================================== --- vendor/compiler-rt/dist/lib/xray/xray_profiling.cc (revision 337140) +++ vendor/compiler-rt/dist/lib/xray/xray_profiling.cc (revision 337141) @@ -1,372 +1,355 @@ //===-- xray_profiling.cc ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // This is the implementation of a profiling handler. // //===----------------------------------------------------------------------===// #include #include #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "xray/xray_interface.h" #include "xray/xray_log_interface.h" #include "xray_flags.h" #include "xray_profile_collector.h" #include "xray_profiling_flags.h" #include "xray_recursion_guard.h" #include "xray_tsc.h" #include "xray_utils.h" #include namespace __xray { namespace { -constexpr uptr XRayProfilingVersion = 0x20180424; - -struct XRayProfilingFileHeader { - const u64 MagicBytes = 0x7872617970726f66; // Identifier for XRay profiling - // files 'xrayprof' in hex. - const uptr Version = XRayProfilingVersion; - uptr Timestamp = 0; // System time in nanoseconds. - uptr PID = 0; // Process ID. -}; - atomic_sint32_t ProfilerLogFlushStatus = { XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; atomic_sint32_t ProfilerLogStatus = {XRayLogInitStatus::XRAY_LOG_UNINITIALIZED}; SpinMutex ProfilerOptionsMutex; struct alignas(64) ProfilingData { FunctionCallTrie::Allocators *Allocators = nullptr; FunctionCallTrie *FCT = nullptr; }; static pthread_key_t ProfilingKey; thread_local std::aligned_storage::type ThreadStorage{}; static ProfilingData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { thread_local auto ThreadOnce = [] { new (&ThreadStorage) ProfilingData{}; pthread_setspecific(ProfilingKey, &ThreadStorage); return false; }(); (void)ThreadOnce; auto &TLD = *reinterpret_cast(&ThreadStorage); // We need to check whether the global flag to finalizing/finalized has been // switched. If it is, then we ought to not actually initialise the data. auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); if (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING || Status == XRayLogInitStatus::XRAY_LOG_FINALIZED) return TLD; // If we're live, then we re-initialize TLD if the pointers are not null. if (UNLIKELY(TLD.Allocators == nullptr && TLD.FCT == nullptr)) { TLD.Allocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (TLD.Allocators) FunctionCallTrie::Allocators(); *TLD.Allocators = FunctionCallTrie::InitAllocators(); TLD.FCT = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie))); new (TLD.FCT) FunctionCallTrie(*TLD.Allocators); } return TLD; } static void cleanupTLD() XRAY_NEVER_INSTRUMENT { auto &TLD = *reinterpret_cast(&ThreadStorage); if (TLD.Allocators != nullptr && TLD.FCT != nullptr) { TLD.FCT->~FunctionCallTrie(); TLD.Allocators->~Allocators(); InternalFree(TLD.FCT); InternalFree(TLD.Allocators); TLD.FCT = nullptr; TLD.Allocators = nullptr; } } } // namespace const char *profilingCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT { #ifdef XRAY_PROFILER_DEFAULT_OPTIONS return SANITIZER_STRINGIFY(XRAY_PROFILER_DEFAULT_OPTIONS); #else return ""; #endif } atomic_sint32_t ProfileFlushStatus = { XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT { if (atomic_load(&ProfilerLogStatus, memory_order_acquire) != XRayLogInitStatus::XRAY_LOG_FINALIZED) { if (Verbosity()) Report("Not flushing profiles, profiling not been finalized.\n"); return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; } s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; if (!atomic_compare_exchange_strong(&ProfilerLogFlushStatus, &Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING, memory_order_acq_rel)) { if (Verbosity()) Report("Not flushing profiles, implementation still finalizing.\n"); } // At this point, we'll create the file that will contain the profile, but // only if the options say so. if (!profilingFlags()->no_flush) { // First check whether we have data in the profile collector service // before we try and write anything down. XRayBuffer B = profileCollectorService::nextBuffer({nullptr, 0}); if (B.Data == nullptr) { if (Verbosity()) Report("profiling: No data to flush.\n"); } else { int Fd = getLogFD(); if (Fd == -1) { if (Verbosity()) Report("profiling: Failed to flush to file, dropping data.\n"); } else { - XRayProfilingFileHeader Header; - Header.Timestamp = NanoTime(); - Header.PID = internal_getpid(); - retryingWriteAll(Fd, reinterpret_cast(&Header), - reinterpret_cast(&Header) + - sizeof(Header)); - - // Now for each of the threads, write out the profile data as we would + // Now for each of the buffers, write out the profile data as we would // see it in memory, verbatim. while (B.Data != nullptr && B.Size != 0) { retryingWriteAll(Fd, reinterpret_cast(B.Data), reinterpret_cast(B.Data) + B.Size); B = profileCollectorService::nextBuffer(B); } // Then we close out the file. internal_close(Fd); } } } profileCollectorService::reset(); // Flush the current thread's local data structures as well. cleanupTLD(); atomic_store(&ProfilerLogStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED, memory_order_release); return XRayLogFlushStatus::XRAY_LOG_FLUSHED; } namespace { thread_local atomic_uint8_t ReentranceGuard{0}; static void postCurrentThreadFCT(ProfilingData &TLD) { if (TLD.Allocators == nullptr || TLD.FCT == nullptr) return; profileCollectorService::post(*TLD.FCT, GetTid()); cleanupTLD(); } } // namespace void profilingHandleArg0(int32_t FuncId, XRayEntryType Entry) XRAY_NEVER_INSTRUMENT { unsigned char CPU; auto TSC = readTSC(CPU); RecursionGuard G(ReentranceGuard); if (!G) return; auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); auto &TLD = getThreadLocalData(); if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_FINALIZED || Status == XRayLogInitStatus::XRAY_LOG_FINALIZING)) { postCurrentThreadFCT(TLD); return; } switch (Entry) { case XRayEntryType::ENTRY: case XRayEntryType::LOG_ARGS_ENTRY: TLD.FCT->enterFunction(FuncId, TSC); break; case XRayEntryType::EXIT: case XRayEntryType::TAIL: TLD.FCT->exitFunction(FuncId, TSC); break; default: // FIXME: Handle bugs. break; } } void profilingHandleArg1(int32_t FuncId, XRayEntryType Entry, uint64_t) XRAY_NEVER_INSTRUMENT { return profilingHandleArg0(FuncId, Entry); } XRayLogInitStatus profilingFinalize() XRAY_NEVER_INSTRUMENT { s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED; if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, XRayLogInitStatus::XRAY_LOG_FINALIZING, memory_order_release)) { if (Verbosity()) Report("Cannot finalize profile, the profiling is not initialized.\n"); return static_cast(CurrentStatus); } // Wait a grace period to allow threads to see that we're finalizing. SleepForMillis(profilingFlags()->grace_period_ms); // We also want to make sure that the current thread's data is cleaned up, // if we have any. auto &TLD = getThreadLocalData(); postCurrentThreadFCT(TLD); // Then we force serialize the log data. profileCollectorService::serialize(); atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED, memory_order_release); return XRayLogInitStatus::XRAY_LOG_FINALIZED; } XRayLogInitStatus profilingLoggingInit(size_t BufferSize, size_t BufferMax, void *Options, size_t OptionsSize) XRAY_NEVER_INSTRUMENT { if (BufferSize != 0 || BufferMax != 0) { if (Verbosity()) Report("__xray_log_init() being used, and is unsupported. Use " "__xray_log_init_mode(...) instead. Bailing out."); return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; } s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZING, memory_order_release)) { if (Verbosity()) Report("Cannot initialize already initialised profiling " "implementation.\n"); return static_cast(CurrentStatus); } { SpinMutexLock Lock(&ProfilerOptionsMutex); FlagParser ConfigParser; ProfilerFlags Flags; Flags.setDefaults(); registerProfilerFlags(&ConfigParser, &Flags); ConfigParser.ParseString(profilingCompilerDefinedFlags()); const char *Env = GetEnv("XRAY_PROFILING_OPTIONS"); if (Env == nullptr) Env = ""; ConfigParser.ParseString(Env); // Then parse the configuration string provided. ConfigParser.ParseString(static_cast(Options)); if (Verbosity()) ReportUnrecognizedFlags(); *profilingFlags() = Flags; } // We need to reset the profile data collection implementation now. profileCollectorService::reset(); // We need to set up the exit handlers. static pthread_once_t Once = PTHREAD_ONCE_INIT; pthread_once(&Once, +[] { pthread_key_create(&ProfilingKey, +[](void *P) { // This is the thread-exit handler. auto &TLD = *reinterpret_cast(P); if (TLD.Allocators == nullptr && TLD.FCT == nullptr) return; postCurrentThreadFCT(TLD); }); // We also need to set up an exit handler, so that we can get the profile // information at exit time. We use the C API to do this, to not rely on C++ // ABI functions for registering exit handlers. Atexit(+[] { // Finalize and flush. if (profilingFinalize() != XRAY_LOG_FINALIZED) { cleanupTLD(); return; } if (profilingFlush() != XRAY_LOG_FLUSHED) { cleanupTLD(); return; } if (Verbosity()) Report("XRay Profile flushed at exit."); }); }); __xray_log_set_buffer_iterator(profileCollectorService::nextBuffer); __xray_set_handler(profilingHandleArg0); __xray_set_handler_arg1(profilingHandleArg1); atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED, memory_order_release); if (Verbosity()) Report("XRay Profiling init successful.\n"); return XRayLogInitStatus::XRAY_LOG_INITIALIZED; } bool profilingDynamicInitializer() XRAY_NEVER_INSTRUMENT { // Set up the flag defaults from the static defaults and the // compiler-provided defaults. { SpinMutexLock Lock(&ProfilerOptionsMutex); auto *F = profilingFlags(); F->setDefaults(); FlagParser ProfilingParser; registerProfilerFlags(&ProfilingParser, F); ProfilingParser.ParseString(profilingCompilerDefinedFlags()); } XRayLogImpl Impl{ profilingLoggingInit, profilingFinalize, profilingHandleArg0, profilingFlush, }; auto RegistrationResult = __xray_log_register_mode("xray-profiling", Impl); if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) { if (Verbosity()) Report("Cannot register XRay Profiling mode to 'xray-profiling'; error = " "%d\n", RegistrationResult); return false; } if (!internal_strcmp(flags()->xray_mode, "xray-profiling")) __xray_log_select_mode("xray_profiling"); return true; } } // namespace __xray static auto UNUSED Unused = __xray::profilingDynamicInitializer(); Index: vendor/compiler-rt/dist/test/asan/TestCases/intercept-rethrow-exception.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/intercept-rethrow-exception.cc (revision 337140) +++ vendor/compiler-rt/dist/test/asan/TestCases/intercept-rethrow-exception.cc (revision 337141) @@ -1,64 +1,64 @@ // Regression test for // https://bugs.llvm.org/show_bug.cgi?id=32434 -// RUN: %clangxx_asan -O0 %s -o %t +// RUN: %clangxx_asan -fexceptions -O0 %s -o %t // RUN: %run %t #include #include #include namespace { // Not instrumented because std::rethrow_exception is a [[noreturn]] function, // for which the compiler would emit a call to __asan_handle_no_return which // unpoisons the stack. // We emulate here some code not compiled with asan. This function is not // [[noreturn]] because the scenario we're emulating doesn't always throw. If it // were [[noreturn]], the calling code would emit a call to // __asan_handle_no_return. void __attribute__((no_sanitize("address"))) uninstrumented_rethrow_exception(std::exception_ptr const &exc_ptr) { std::rethrow_exception(exc_ptr); } char *poisoned1; char *poisoned2; // Create redzones for stack variables in shadow memory and call // std::rethrow_exception which should unpoison the entire stack. void create_redzones_and_throw(std::exception_ptr const &exc_ptr) { char a[100]; poisoned1 = a - 1; poisoned2 = a + sizeof(a); assert(__asan_address_is_poisoned(poisoned1)); assert(__asan_address_is_poisoned(poisoned2)); uninstrumented_rethrow_exception(exc_ptr); } } // namespace // Check that std::rethrow_exception is intercepted by asan and the interception // unpoisons the stack. // If std::rethrow_exception is NOT intercepted, then calls to this function // from instrumented code will still unpoison the stack because // std::rethrow_exception is a [[noreturn]] function and any [[noreturn]] // function call will be instrumented with __asan_handle_no_return. // However, calls to std::rethrow_exception from UNinstrumented code will not // unpoison the stack, so we need to intercept std::rethrow_exception to // unpoison the stack. int main() { // In some implementations of std::make_exception_ptr, e.g. libstdc++ prior to // gcc 7, this function calls __cxa_throw. The __cxa_throw is intercepted by // asan to unpoison the entire stack; since this test essentially tests that // the stack is unpoisoned by a call to std::rethrow_exception, we need to // generate the exception_ptr BEFORE we have the local variables poison the // stack. std::exception_ptr my_exception_ptr = std::make_exception_ptr("up"); try { create_redzones_and_throw(my_exception_ptr); } catch(char const *) { assert(!__asan_region_is_poisoned(poisoned1, poisoned2 - poisoned1 + 1)); } } Index: vendor/compiler-rt/dist/test/fuzzer/ImplicitIntegerTruncationTest.cpp =================================================================== --- vendor/compiler-rt/dist/test/fuzzer/ImplicitIntegerTruncationTest.cpp (nonexistent) +++ vendor/compiler-rt/dist/test/fuzzer/ImplicitIntegerTruncationTest.cpp (revision 337141) @@ -0,0 +1,27 @@ +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. + +// Test for signed-integer-overflow. +#include +#include +#include +#include +#include +#include + +static volatile int Sink; +static unsigned char Large = UINT8_MAX; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + assert(Data); + if (Size > 0 && Data[0] == 'H') { + Sink = 1; + if (Size > 1 && Data[1] == 'i') { + Sink = 2; + if (Size > 2 && Data[2] == '!') { + Large = Large + 1; // 'char overflow'. + } + } + } + return 0; +} Property changes on: vendor/compiler-rt/dist/test/fuzzer/ImplicitIntegerTruncationTest.cpp ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/fuzzer/fuzzer-implicit-integer-truncation.test =================================================================== --- vendor/compiler-rt/dist/test/fuzzer/fuzzer-implicit-integer-truncation.test (nonexistent) +++ vendor/compiler-rt/dist/test/fuzzer/fuzzer-implicit-integer-truncation.test (revision 337141) @@ -0,0 +1,5 @@ +RUN: rm -f %t-ImplicitIntegerTruncationTest-Ubsan +RUN: %cpp_compiler -fsanitize=implicit-integer-truncation -fno-sanitize-recover=all %S/ImplicitIntegerTruncationTest.cpp -o %t-ImplicitIntegerTruncationTest-Ubsan +RUN: not %run %t-ImplicitIntegerTruncationTest-Ubsan 2>&1 | FileCheck %s +CHECK: runtime error: implicit conversion from type 'int' of value 256 (32-bit, signed) to type 'unsigned char' changed the value to 0 (8-bit, unsigned) +CHECK: Test unit written to ./crash- Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c (revision 337141) @@ -0,0 +1,15 @@ +#include + +void func1() {} +void func2() {} + +int main(void) +{ + func1(); + + fork(); + + func2(); + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c.gcov =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c.gcov (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-fork.c.gcov (revision 337141) @@ -0,0 +1,23 @@ +// CHECK: -: 0:Source:{{.*}}Inputs/instrprof-gcov-fork.c +// CHECK-NEXT: -: 0:Graph:instrprof-gcov-fork.gcno +// CHECK-NEXT: -: 0:Data:instrprof-gcov-fork.gcda +// CHECK-NEXT: -: 0:Runs:1 +// CHECK-NEXT: -: 0:Programs:1 +// CHECK-NEXT: -: 1:#include +// CHECK-NEXT: -: 2: +// CHECK-NEXT:function func1 called 1 returned 100% blocks executed 100% +// CHECK-NEXT: 1: 3:void func1() {} +// CHECK-NEXT:function func2 called 2 returned 100% blocks executed 100% +// CHECK-NEXT: 2: 4:void func2() {} +// CHECK-NEXT: -: 5: +// CHECK-NEXT:function main called 1 returned 100% blocks executed 100% +// CHECK-NEXT: -: 6:int main(void) +// CHECK-NEXT: -: 7:{ +// CHECK-NEXT: 1: 8: func1(); +// CHECK-NEXT: -: 9: +// CHECK-NEXT: 1: 10: fork(); +// CHECK-NEXT: -: 11: +// CHECK-NEXT: 2: 12: func2(); +// CHECK-NEXT: -: 13: +// CHECK-NEXT: 2: 14: return 0; +// CHECK-NEXT: -: 15:} Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c (revision 337141) @@ -0,0 +1,18 @@ +int main(void) +{ + int i = 22; + + switch (i) { + case 7: + break; + + case 22: + i = 7; + break; + + case 42: + break; + } + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c.gcov =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c.gcov (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch1.c.gcov (revision 337141) @@ -0,0 +1,23 @@ +// CHECK: -: 0:Source:{{.*}}Inputs/instrprof-gcov-switch1.c +// CHECK-NEXT: -: 0:Graph:instrprof-gcov-switch1.gcno +// CHECK-NEXT: -: 0:Data:instrprof-gcov-switch1.gcda +// CHECK-NEXT: -: 0:Runs:1 +// CHECK-NEXT: -: 0:Programs:1 +// CHECK-NEXT: -: 1:int main(void) +// CHECK-NEXT: -: 2:{ +// CHECK-NEXT: 2: 3: int i = 22; +// CHECK-NEXT: -: 4: +// CHECK-NEXT: 2: 5: switch (i) { +// CHECK-NEXT: -: 6: case 7: +// CHECK-NEXT: #####: 7: break; +// CHECK-NEXT: -: 8: +// CHECK-NEXT: -: 9: case 22: +// CHECK-NEXT: 1: 10: i = 7; +// CHECK-NEXT: 1: 11: break; +// CHECK-NEXT: -: 12: +// CHECK-NEXT: -: 13: case 42: +// CHECK-NEXT: #####: 14: break; +// CHECK-NEXT: -: 15: } +// CHECK-NEXT: -: 16: +// CHECK-NEXT: 1: 17: return 0; +// CHECK-NEXT: -: 18:} Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c (revision 337141) @@ -0,0 +1,18 @@ +int main(void) +{ + int i = 22; + + switch (i) { + case 7: + break; + + case 22: + i = 7; + + case 42: + i = 22; + break; + } + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c.gcov =================================================================== --- vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c.gcov (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Inputs/instrprof-gcov-switch2.c.gcov (revision 337141) @@ -0,0 +1,23 @@ +// CHECK: -: 0:Source:{{.*}}Inputs/instrprof-gcov-switch2.c +// CHECK-NEXT: -: 0:Graph:instrprof-gcov-switch2.gcno +// CHECK-NEXT: -: 0:Data:instrprof-gcov-switch2.gcda +// CHECK-NEXT: -: 0:Runs:1 +// CHECK-NEXT: -: 0:Programs:1 +// CHECK-NEXT: -: 1:int main(void) +// CHECK-NEXT: -: 2:{ +// CHECK-NEXT: 3: 3: int i = 22; +// CHECK-NEXT: -: 4: +// CHECK-NEXT: 3: 5: switch (i) { +// CHECK-NEXT: -: 6: case 7: +// CHECK-NEXT: #####: 7: break; +// CHECK-NEXT: -: 8: +// CHECK-NEXT: -: 9: case 22: +// CHECK-NEXT: 1: 10: i = 7; +// CHECK-NEXT: -: 11: +// CHECK-NEXT: -: 12: case 42: +// CHECK-NEXT: 1: 13: i = 22; +// CHECK-NEXT: 1: 14: break; +// CHECK-NEXT: -: 15: } +// CHECK-NEXT: -: 16: +// CHECK-NEXT: 1: 17: return 0; +// CHECK-NEXT: -: 18:} Index: vendor/compiler-rt/dist/test/profile/Posix/instrprof-gcov-fork.test =================================================================== --- vendor/compiler-rt/dist/test/profile/Posix/instrprof-gcov-fork.test (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Posix/instrprof-gcov-fork.test (revision 337141) @@ -0,0 +1,12 @@ +XFAIL: * + +RUN: mkdir -p %t.d +RUN: cd %t.d + +RUN: %clang --coverage -o %t %S/../Inputs/instrprof-gcov-fork.c +RUN: test -f instrprof-gcov-fork.gcno + +RUN: rm -f instrprof-gcov-fork.gcda +RUN: %run %t +RUN: llvm-cov gcov -b -c instrprof-gcov-fork.gcda +RUN: FileCheck --match-full-lines --strict-whitespace --input-file instrprof-gcov-fork.c.gcov %S/../Inputs/instrprof-gcov-fork.c.gcov Index: vendor/compiler-rt/dist/test/profile/instrprof-gcov-switch.test =================================================================== --- vendor/compiler-rt/dist/test/profile/instrprof-gcov-switch.test (nonexistent) +++ vendor/compiler-rt/dist/test/profile/instrprof-gcov-switch.test (revision 337141) @@ -0,0 +1,16 @@ +RUN: mkdir -p %t.d +RUN: cd %t.d + +RUN: %clang --coverage -o %t %S/Inputs/instrprof-gcov-switch1.c +RUN: test -f instrprof-gcov-switch1.gcno +RUN: rm -f instrprof-gcov-switch1.gcda +RUN: %run %t +RUN: llvm-cov gcov instrprof-gcov-switch1.gcda +RUN: FileCheck --match-full-lines --strict-whitespace --input-file instrprof-gcov-switch1.c.gcov %S/Inputs/instrprof-gcov-switch1.c.gcov + +RUN: %clang --coverage -o %t %S/Inputs/instrprof-gcov-switch2.c +RUN: test -f instrprof-gcov-switch2.gcno +RUN: rm -f instrprof-gcov-switch2.gcda +RUN: %run %t +RUN: llvm-cov gcov instrprof-gcov-switch2.gcda +RUN: FileCheck --match-full-lines --strict-whitespace --input-file instrprof-gcov-switch2.c.gcov %S/Inputs/instrprof-gcov-switch2.c.gcov Index: vendor/compiler-rt/dist/test/profile/instrprof-set-dir-mode.c =================================================================== --- vendor/compiler-rt/dist/test/profile/instrprof-set-dir-mode.c (nonexistent) +++ vendor/compiler-rt/dist/test/profile/instrprof-set-dir-mode.c (revision 337141) @@ -0,0 +1,48 @@ +// UNSUPPORTED: windows +// RUN: %clang_pgogen -o %t.bin %s -DTESTPATH=\"%t.dir\" +// RUN: rm -rf %t.dir +// RUN: %run %t.bin + +#include +#include +#include +#include + +void __llvm_profile_set_dir_mode(unsigned Mode); +unsigned __llvm_profile_get_dir_mode(void); +void __llvm_profile_recursive_mkdir(char *Path); + +static int test(unsigned Mode, const char *TestDir) { + int Ret = 0; + + /* Create a dir and set the mode accordingly. */ + char *Dir = strdup(TestDir); + if (!Dir) + return -1; + __llvm_profile_set_dir_mode(Mode); + __llvm_profile_recursive_mkdir(Dir); + + if (Mode != __llvm_profile_get_dir_mode()) + Ret = -1; + else { + const unsigned Expected = ~umask(0) & Mode; + struct stat DirSt; + if (stat(Dir, &DirSt) == -1) + Ret = -1; + else if (DirSt.st_mode != Expected) { + printf("Modes do not match: Expected %o but found %o (%s)\n", Expected, + DirSt.st_mode, Dir); + Ret = -1; + } + } + + free(Dir); + return Ret; +} + +int main(void) { + if (test(S_IFDIR | 0777, TESTPATH "/foo/bar/baz/") || + test(S_IFDIR | 0666, TESTPATH "/foo/bar/qux/")) + return -1; + return 0; +} Property changes on: vendor/compiler-rt/dist/test/profile/instrprof-set-dir-mode.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-blacklist.c =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-blacklist.c (nonexistent) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-blacklist.c (revision 337141) @@ -0,0 +1,20 @@ +// FIXME: https://code.google.com/p/address-sanitizer/issues/detail?id=316 +// I'm not sure this is actually *that* issue, but this seems oddly similar to the other XFAIL'ed cases. +// XFAIL: android +// UNSUPPORTED: ios + +// RUN: rm -f %tmp +// RUN: echo "[implicit-integer-truncation]" >> %tmp +// RUN: echo "fun:*implicitTruncation*" >> %tmp +// RUN: %clang -fsanitize=implicit-integer-truncation -fno-sanitize-recover=implicit-integer-truncation -fsanitize-blacklist=%tmp -O0 %s -o %t && not %run %t 2>&1 +// RUN: %clang -fsanitize=implicit-integer-truncation -fno-sanitize-recover=implicit-integer-truncation -fsanitize-blacklist=%tmp -O1 %s -o %t && not %run %t 2>&1 +// RUN: %clang -fsanitize=implicit-integer-truncation -fno-sanitize-recover=implicit-integer-truncation -fsanitize-blacklist=%tmp -O2 %s -o %t && not %run %t 2>&1 +// RUN: %clang -fsanitize=implicit-integer-truncation -fno-sanitize-recover=implicit-integer-truncation -fsanitize-blacklist=%tmp -O3 %s -o %t && not %run %t 2>&1 + +unsigned char implicitTruncation(unsigned int argc) { + return argc; // BOOM +} + +int main(int argc, char **argv) { + return implicitTruncation(~0U); +} Property changes on: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-blacklist.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-summary.cpp =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-summary.cpp (nonexistent) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-summary.cpp (revision 337141) @@ -0,0 +1,13 @@ +// RUN: %clangxx -fsanitize=implicit-integer-truncation %s -o %t +// RUN: %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-NOTYPE +// RUN: %env_ubsan_opts=report_error_type=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-TYPE +// REQUIRES: !ubsan-standalone && !ubsan-standalone-static + +#include + +int main() { + uint8_t t0 = (~(uint32_t(0))); + // CHECK-NOTYPE: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior {{.*}}summary.cpp:[[@LINE-1]]:16 + // CHECK-TYPE: SUMMARY: UndefinedBehaviorSanitizer: implicit-integer-truncation {{.*}}summary.cpp:[[@LINE-2]]:16 + return 0; +} Property changes on: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation-summary.cpp ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation.c =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation.c (nonexistent) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation.c (revision 337141) @@ -0,0 +1,63 @@ +// RUN: %clang -x c -fsanitize=implicit-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK +// RUN: %clangxx -x c++ -fsanitize=implicit-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK + +#include + +#if !defined(__cplusplus) +#define bool _Bool +#endif + +int main() { +// CHECK-NOT: integer-truncation.c + + // Negative tests. Even if they produce unexpected results, this sanitizer does not care. + int8_t n0 = (~((uint32_t)0)); // ~0 -> -1, but do not warn. + uint8_t n2 = 128; + uint8_t n3 = 255; + // Bools do not count + bool b0 = (~((uint32_t)0)); + bool b1 = 255; + + // Explicit and-ing of bits will silence it. + uint8_t nc0 = (~((uint32_t)0)) & 255; + + // Explicit casts + uint8_t i0 = (uint8_t)(~((uint32_t)0)); + +#if defined(__cplusplus) + uint8_t i1 = uint8_t(~(uint32_t(0))); + uint8_t i2 = static_cast(~(uint32_t(0))); +#endif + + // Positive tests. + + uint8_t t_b0 = (~((uint16_t)(0))); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:18: runtime error: implicit conversion from type 'int' of value -1 (32-bit, signed) to type 'uint8_t' (aka 'unsigned char') changed the value to 255 (8-bit, unsigned) + + uint8_t t_b1 = (~((uint32_t)0)); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:18: runtime error: implicit conversion from type 'uint32_t' (aka 'unsigned int') of value 4294967295 (32-bit, unsigned) to type 'uint8_t' (aka 'unsigned char') changed the value to 255 (8-bit, unsigned) + uint16_t t_b2 = (~((uint32_t)0)); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:19: runtime error: implicit conversion from type 'uint32_t' (aka 'unsigned int') of value 4294967295 (32-bit, unsigned) to type 'uint16_t' (aka 'unsigned short') changed the value to 65535 (16-bit, unsigned) + + uint8_t t_b3 = ~((uint64_t)0); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:18: runtime error: implicit conversion from type 'uint64_t' (aka 'unsigned long{{[^']*}}') of value 18446744073709551615 (64-bit, unsigned) to type 'uint8_t' (aka 'unsigned char') changed the value to 255 (8-bit, unsigned) + uint16_t t_b4 = ~((uint64_t)0); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:19: runtime error: implicit conversion from type 'uint64_t' (aka 'unsigned long{{[^']*}}') of value 18446744073709551615 (64-bit, unsigned) to type 'uint16_t' (aka 'unsigned short') changed the value to 65535 (16-bit, unsigned) + uint32_t t_b5 = ~((uint64_t)0); +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:19: runtime error: implicit conversion from type 'uint64_t' (aka 'unsigned long{{[^']*}}') of value 18446744073709551615 (64-bit, unsigned) to type 'uint32_t' (aka 'unsigned int') changed the value to 4294967295 (32-bit, unsigned) + + int8_t t1 = 255; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:15: runtime error: implicit conversion from type 'int' of value 255 (32-bit, signed) to type 'int8_t' (aka 'signed char') changed the value to -1 (8-bit, signed) + uint8_t t2 = 256; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:16: runtime error: implicit conversion from type 'int' of value 256 (32-bit, signed) to type 'uint8_t' (aka 'unsigned char') changed the value to 0 (8-bit, unsigned) + int8_t t3 = 256; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:15: runtime error: implicit conversion from type 'int' of value 256 (32-bit, signed) to type 'int8_t' (aka 'signed char') changed the value to 0 (8-bit, signed) + uint8_t t4 = 257; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:16: runtime error: implicit conversion from type 'int' of value 257 (32-bit, signed) to type 'uint8_t' (aka 'unsigned char') changed the value to 1 (8-bit, unsigned) + int8_t t5 = 257; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:15: runtime error: implicit conversion from type 'int' of value 257 (32-bit, signed) to type 'int8_t' (aka 'signed char') changed the value to 1 (8-bit, signed) + int8_t t6 = 128; +// CHECK: {{.*}}integer-truncation.c:[[@LINE-1]]:15: runtime error: implicit conversion from type 'int' of value 128 (32-bit, signed) to type 'int8_t' (aka 'signed char') changed the value to -128 (8-bit, signed) + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/ubsan/TestCases/ImplicitConversion/integer-truncation.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/ubsan_minimal/TestCases/implicit-integer-truncation.c =================================================================== --- vendor/compiler-rt/dist/test/ubsan_minimal/TestCases/implicit-integer-truncation.c (nonexistent) +++ vendor/compiler-rt/dist/test/ubsan_minimal/TestCases/implicit-integer-truncation.c (revision 337141) @@ -0,0 +1,24 @@ +// RUN: %clang -fsanitize=implicit-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK + +#include + +int main() { +// CHECK-NOT: integer-truncation.c + + // Negative tests. Even if they produce unexpected results, this sanitizer does not care. + int8_t n0 = (~((uint32_t)(0))); // ~0 -> -1, but do not warn. + uint8_t n2 = 128; + uint8_t n3 = 255; + // Bools do not count + _Bool b0 = (~((uint32_t)(0))); + _Bool b1 = 255; + + // Explicit and-ing of bits will silence it. + uint8_t nc0 = ((~((uint32_t)(0))) & 255); + + // Positive tests. + uint8_t t0 = (~((uint32_t)(0))); +// CHECK: implicit-conversion + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/ubsan_minimal/TestCases/implicit-integer-truncation.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-multi-threaded.cc =================================================================== --- vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-multi-threaded.cc (revision 337140) +++ vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-multi-threaded.cc (revision 337141) @@ -1,57 +1,58 @@ // Check that we can get a profile from a single-threaded application, on // demand through the XRay logging implementation API. // // FIXME: Make -fxray-modes=xray-profiling part of the default? // RUN: %clangxx_xray -std=c++11 %s -o %t -fxray-modes=xray-profiling // RUN: rm -f xray-log.profiling-multi-* // RUN: XRAY_OPTIONS=verbosity=1 \ // RUN: XRAY_PROFILING_OPTIONS=no_flush=1 %run %t // RUN: XRAY_OPTIONS=verbosity=1 %run %t // RUN: PROFILES=`ls xray-log.profiling-multi-* | wc -l` // RUN: [ $PROFILES -eq 1 ] // RUN: rm -f xray-log.profiling-multi-* // // REQUIRES: x86_64-target-arch // REQUIRES: built-in-llvm-tree #include "xray/xray_interface.h" #include "xray/xray_log_interface.h" #include #include #include #include [[clang::xray_always_instrument]] void f2() { return; } [[clang::xray_always_instrument]] void f1() { f2(); } [[clang::xray_always_instrument]] void f0() { f1(); } using namespace std; volatile int buffer_counter = 0; [[clang::xray_never_instrument]] void process_buffer(const char *, XRayBuffer) { // FIXME: Actually assert the contents of the buffer. ++buffer_counter; } [[clang::xray_always_instrument]] int main(int, char **) { assert(__xray_log_select_mode("xray-profiling") == XRayLogRegisterStatus::XRAY_REGISTRATION_OK); assert(__xray_log_get_current_mode() != nullptr); std::string current_mode = __xray_log_get_current_mode(); assert(current_mode == "xray-profiling"); assert(__xray_patch() == XRayPatchingStatus::SUCCESS); assert(__xray_log_init(0, 0, nullptr, 0) == XRayLogInitStatus::XRAY_LOG_INITIALIZED); std::thread t0([] { f0(); }); std::thread t1([] { f0(); }); f0(); t0.join(); t1.join(); assert(__xray_log_finalize() == XRayLogInitStatus::XRAY_LOG_FINALIZED); assert(__xray_log_process_buffers(process_buffer) == XRayLogFlushStatus::XRAY_LOG_FLUSHED); - // We're running three threds, so we expect three buffers. - assert(buffer_counter == 3); + // We're running three threads, so we expect four buffers (including the file + // header buffer). + assert(buffer_counter == 4); assert(__xray_log_flushLog() == XRayLogFlushStatus::XRAY_LOG_FLUSHED); } Index: vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-single-threaded.cc =================================================================== --- vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-single-threaded.cc (revision 337140) +++ vendor/compiler-rt/dist/test/xray/TestCases/Posix/profiling-single-threaded.cc (revision 337141) @@ -1,65 +1,68 @@ // Check that we can get a profile from a single-threaded application, on // demand through the XRay logging implementation API. // // FIXME: Make -fxray-modes=xray-profiling part of the default? // RUN: %clangxx_xray -std=c++11 %s -o %t -fxray-modes=xray-profiling // RUN: rm -f xray-log.profiling-single-* // RUN: XRAY_OPTIONS=verbosity=1 \ // RUN: XRAY_PROFILING_OPTIONS=no_flush=true %run %t // RUN: XRAY_OPTIONS=verbosity=1 %run %t // RUN: PROFILES=`ls xray-log.profiling-single-* | wc -l` // RUN: [ $PROFILES -eq 2 ] // RUN: rm -f xray-log.profiling-single-* // // REQUIRES: x86_64-target-arch // REQUIRES: built-in-llvm-tree #include "xray/xray_interface.h" #include "xray/xray_log_interface.h" #include #include #include [[clang::xray_always_instrument]] void f2() { return; } [[clang::xray_always_instrument]] void f1() { f2(); } [[clang::xray_always_instrument]] void f0() { f1(); } using namespace std; volatile int buffer_counter = 0; [[clang::xray_never_instrument]] void process_buffer(const char *, XRayBuffer) { // FIXME: Actually assert the contents of the buffer. ++buffer_counter; } [[clang::xray_always_instrument]] int main(int, char **) { assert(__xray_log_select_mode("xray-profiling") == XRayLogRegisterStatus::XRAY_REGISTRATION_OK); assert(__xray_log_get_current_mode() != nullptr); std::string current_mode = __xray_log_get_current_mode(); assert(current_mode == "xray-profiling"); assert(__xray_patch() == XRayPatchingStatus::SUCCESS); assert(__xray_log_init_mode("xray-profiling", "") == XRayLogInitStatus::XRAY_LOG_INITIALIZED); f0(); assert(__xray_log_finalize() == XRayLogInitStatus::XRAY_LOG_FINALIZED); f0(); assert(__xray_log_process_buffers(process_buffer) == XRayLogFlushStatus::XRAY_LOG_FLUSHED); - assert(buffer_counter == 1); + // There's always at least one buffer, containing the profile file header. We + // assert that we have two, to indicate that we're expecting exactly one + // thread's worth of data. + assert(buffer_counter == 2); assert(__xray_log_flushLog() == XRayLogFlushStatus::XRAY_LOG_FLUSHED); // Let's reset the counter. buffer_counter = 0; assert(__xray_log_init_mode("xray-profiling", "") == XRayLogInitStatus::XRAY_LOG_INITIALIZED); f0(); assert(__xray_log_finalize() == XRayLogInitStatus::XRAY_LOG_FINALIZED); f0(); assert(__xray_log_process_buffers(process_buffer) == XRayLogFlushStatus::XRAY_LOG_FLUSHED); - assert(buffer_counter == 1); + assert(buffer_counter == 2); assert(__xray_log_flushLog() == XRayLogFlushStatus::XRAY_LOG_FLUSHED); }