Index: projects/clang700-import/contrib/compiler-rt/lib/asan/asan_mapping.h =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/asan/asan_mapping.h (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/asan/asan_mapping.h (revision 337154) @@ -1,401 +1,401 @@ //===-- asan_mapping.h ------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Defines ASan memory mapping. //===----------------------------------------------------------------------===// #ifndef ASAN_MAPPING_H #define ASAN_MAPPING_H #include "asan_internal.h" // The full explanation of the memory mapping could be found here: // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm // // Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000: // || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || // || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || // || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap || // || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || // || `[0x000000000000, 0x00007fff7fff]` || LowMem || // // When SHADOW_OFFSET is zero (-pie): // || `[0x100000000000, 0x7fffffffffff]` || HighMem || // || `[0x020000000000, 0x0fffffffffff]` || HighShadow || // || `[0x000000040000, 0x01ffffffffff]` || ShadowGap || // // Special case when something is already mapped between // 0x003000000000 and 0x005000000000 (e.g. when prelink is installed): // || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || // || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || // || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 || // || `[0x003000000000, 0x004fffffffff]` || MidMem || // || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 || // || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow || // || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap || // || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || // || `[0x000000000000, 0x00007fff7fff]` || LowMem || // // Default Linux/i386 mapping on x86_64 machine: // || `[0x40000000, 0xffffffff]` || HighMem || // || `[0x28000000, 0x3fffffff]` || HighShadow || // || `[0x24000000, 0x27ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/i386 mapping on i386 machine // (addresses starting with 0xc0000000 are reserved // for kernel and thus not sanitized): // || `[0x38000000, 0xbfffffff]` || HighMem || // || `[0x27000000, 0x37ffffff]` || HighShadow || // || `[0x24000000, 0x26ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/MIPS32 mapping: // || `[0x2aaa0000, 0xffffffff]` || HighMem || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || // || `[0x00000000, 0x0aa9ffff]` || LowMem || // // Default Linux/MIPS64 mapping: // || `[0x4000000000, 0xffffffffff]` || HighMem || // || `[0x2800000000, 0x3fffffffff]` || HighShadow || // || `[0x2400000000, 0x27ffffffff]` || ShadowGap || // || `[0x2000000000, 0x23ffffffff]` || LowShadow || // || `[0x0000000000, 0x1fffffffff]` || LowMem || // // Default Linux/AArch64 (39-bit VMA) mapping: // || `[0x2000000000, 0x7fffffffff]` || highmem || // || `[0x1400000000, 0x1fffffffff]` || highshadow || // || `[0x1200000000, 0x13ffffffff]` || shadowgap || // || `[0x1000000000, 0x11ffffffff]` || lowshadow || // || `[0x0000000000, 0x0fffffffff]` || lowmem || // // Default Linux/AArch64 (42-bit VMA) mapping: // || `[0x10000000000, 0x3ffffffffff]` || highmem || // || `[0x0a000000000, 0x0ffffffffff]` || highshadow || // || `[0x09000000000, 0x09fffffffff]` || shadowgap || // || `[0x08000000000, 0x08fffffffff]` || lowshadow || // || `[0x00000000000, 0x07fffffffff]` || lowmem || // // Default Linux/S390 mapping: // || `[0x30000000, 0x7fffffff]` || HighMem || // || `[0x26000000, 0x2fffffff]` || HighShadow || // || `[0x24000000, 0x25ffffff]` || ShadowGap || // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // // Default Linux/SystemZ mapping: // || `[0x14000000000000, 0x1fffffffffffff]` || HighMem || // || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow || // || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap || // || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow || // || `[0x00000000000000, 0x0fffffffffffff]` || LowMem || // // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || // || `[0x480000000000, 0x49ffffffffff]` || ShadowGap || // || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x000000000000, 0x3fffffffffff]` || LowMem || // // Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000: // || `[0x60000000, 0xffffffff]` || HighMem || // || `[0x4c000000, 0x5fffffff]` || HighShadow || // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || // // Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem || // || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow || // || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap || // || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x000000000000, 0x3fffffffffff]` || LowMem || // -// Shadow mapping on NerBSD/i386 with SHADOW_OFFSET == 0x40000000: +// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000: // || `[0x60000000, 0xfffff000]` || HighMem || // || `[0x4c000000, 0x5fffffff]` || HighShadow || // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || // // Default Windows/i386 mapping: // (the exact location of HighShadow/HighMem may vary depending // on WoW64, /LARGEADDRESSAWARE, etc). // || `[0x50000000, 0xffffffff]` || HighMem || // || `[0x3a000000, 0x4fffffff]` || HighShadow || // || `[0x36000000, 0x39ffffff]` || ShadowGap || // || `[0x30000000, 0x35ffffff]` || LowShadow || // || `[0x00000000, 0x2fffffff]` || LowMem || // // Shadow mapping on Myriad2 (for shadow scale 5): // || `[0x9ff80000, 0x9fffffff]` || ShadowGap || // || `[0x9f000000, 0x9ff7ffff]` || LowShadow || // || `[0x80000000, 0x9effffff]` || LowMem || // || `[0x00000000, 0x7fffffff]` || Ignored || #if defined(ASAN_SHADOW_SCALE) static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE; #else static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3; #endif static const u64 kDefaultShadowSentinel = ~(uptr)0; static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShort64bitShadowOffset = 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kIosShadowOffset64 = 0x120200000; static const u64 kIosSimShadowOffset32 = 1ULL << 30; static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 static const u64 kMyriadMemoryOffset32 = 0x80000000ULL; static const u64 kMyriadMemorySize32 = 0x20000000ULL; static const u64 kMyriadMemoryEnd32 = kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1; static const u64 kMyriadShadowOffset32 = (kMyriadMemoryOffset32 + kMyriadMemorySize32 - (kMyriadMemorySize32 >> kDefaultShadowScale)); static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; #define SHADOW_SCALE kDefaultShadowScale #if SANITIZER_FUCHSIA # define SHADOW_OFFSET (0) #elif SANITIZER_WORDSIZE == 32 # if SANITIZER_ANDROID # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # elif defined(__mips__) # define SHADOW_OFFSET kMIPS32_ShadowOffset32 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 # elif SANITIZER_NETBSD # define SHADOW_OFFSET kNetBSD_ShadowOffset32 # elif SANITIZER_WINDOWS # define SHADOW_OFFSET kWindowsShadowOffset32 # elif SANITIZER_IOS # if SANITIZER_IOSSIM # define SHADOW_OFFSET kIosSimShadowOffset32 # else # define SHADOW_OFFSET kIosShadowOffset32 # endif # elif SANITIZER_MYRIAD2 # define SHADOW_OFFSET kMyriadShadowOffset32 # else # define SHADOW_OFFSET kDefaultShadowOffset32 # endif #else # if SANITIZER_IOS # if SANITIZER_IOSSIM # define SHADOW_OFFSET kIosSimShadowOffset64 # else # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # endif # elif defined(__aarch64__) # define SHADOW_OFFSET kAArch64_ShadowOffset64 # elif defined(__powerpc64__) # define SHADOW_OFFSET kPPC64_ShadowOffset64 # elif defined(__s390x__) # define SHADOW_OFFSET kSystemZ_ShadowOffset64 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset64 # elif SANITIZER_NETBSD # define SHADOW_OFFSET kNetBSD_ShadowOffset64 # elif SANITIZER_MAC # define SHADOW_OFFSET kDefaultShadowOffset64 # elif defined(__mips64) # define SHADOW_OFFSET kMIPS64_ShadowOffset64 # elif SANITIZER_WINDOWS64 # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # else # define SHADOW_OFFSET kDefaultShort64bitShadowOffset # endif #endif #if SANITIZER_ANDROID && defined(__arm__) # define ASAN_PREMAP_SHADOW 1 #else # define ASAN_PREMAP_SHADOW 0 #endif #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) #define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. #if DO_ASAN_MAPPING_PROFILE # define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; #else # define PROFILE_ASAN_MAPPING() #endif // If 1, all shadow boundaries are constants. // Don't set to 1 other than for testing. #define ASAN_FIXED_MAPPING 0 namespace __asan { extern uptr AsanMappingProfile[]; #if ASAN_FIXED_MAPPING // Fixed mapping for 64-bit Linux. Mostly used for performance comparison // with non-fixed mapping. As of r175253 (Feb 2013) the performance // difference between fixed and non-fixed mapping is below the noise level. static uptr kHighMemEnd = 0x7fffffffffffULL; static uptr kMidMemBeg = 0x3000000000ULL; static uptr kMidMemEnd = 0x4fffffffffULL; #else extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. #endif } // namespace __asan #if SANITIZER_MYRIAD2 #include "asan_mapping_myriad.h" #else #define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) #define kLowMemBeg 0 #define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) #define kLowShadowBeg SHADOW_OFFSET #define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) #define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) #define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) #define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) # define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) # define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. #define kZeroBaseShadowStart 0 #define kZeroBaseMaxShadowStart (1 << 18) #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ : kZeroBaseShadowStart) #define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) #define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) #define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) #define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) #define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) namespace __asan { static inline bool AddrIsInLowMem(uptr a) { PROFILE_ASAN_MAPPING(); return a <= kLowMemEnd; } static inline bool AddrIsInLowShadow(uptr a) { PROFILE_ASAN_MAPPING(); return a >= kLowShadowBeg && a <= kLowShadowEnd; } static inline bool AddrIsInMidMem(uptr a) { PROFILE_ASAN_MAPPING(); return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; } static inline bool AddrIsInMidShadow(uptr a) { PROFILE_ASAN_MAPPING(); return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd; } static inline bool AddrIsInHighMem(uptr a) { PROFILE_ASAN_MAPPING(); return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; } static inline bool AddrIsInHighShadow(uptr a) { PROFILE_ASAN_MAPPING(); return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; } static inline bool AddrIsInShadowGap(uptr a) { PROFILE_ASAN_MAPPING(); if (kMidMemBeg) { if (a <= kShadowGapEnd) return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; return (a >= kShadowGap2Beg && a <= kShadowGap2End) || (a >= kShadowGap3Beg && a <= kShadowGap3End); } // In zero-based shadow mode we treat addresses near zero as addresses // in shadow gap as well. if (SHADOW_OFFSET == 0) return a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd; } } // namespace __asan #endif // SANITIZER_MYRIAD2 namespace __asan { static inline bool AddrIsInMem(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); } static inline uptr MemToShadow(uptr p) { PROFILE_ASAN_MAPPING(); CHECK(AddrIsInMem(p)); return MEM_TO_SHADOW(p); } static inline bool AddrIsInShadow(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); } static inline bool AddrIsAlignedByGranularity(uptr a) { PROFILE_ASAN_MAPPING(); return (a & (SHADOW_GRANULARITY - 1)) == 0; } static inline bool AddressIsPoisoned(uptr a) { PROFILE_ASAN_MAPPING(); if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a)) return false; const uptr kAccessSize = 1; u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); s8 shadow_value = *shadow_address; if (shadow_value) { u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) + kAccessSize - 1; return (last_accessed_byte >= shadow_value); } return false; } // Must be after all calls to PROFILE_ASAN_MAPPING(). static const uptr kAsanMappingProfileSize = __LINE__; } // namespace __asan #endif // ASAN_MAPPING_H Index: projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.c =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.c (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.c (revision 337154) @@ -1,281 +1,290 @@ /*===- InstrProfilingUtil.c - Support library for PGO instrumentation -----===*\ |* |* The LLVM Compiler Infrastructure |* |* This file is distributed under the University of Illinois Open Source |* License. See LICENSE.TXT for details. |* \*===----------------------------------------------------------------------===*/ #ifdef _WIN32 #include #include #include #include "WindowsMMap.h" #else #include #include #include #include #include #endif #ifdef COMPILER_RT_HAS_UNAME #include #endif #include #include #if defined(__linux__) #include #include #endif #include "InstrProfiling.h" #include "InstrProfilingUtil.h" +COMPILER_RT_WEAK unsigned lprofDirMode = 0755; + COMPILER_RT_VISIBILITY void __llvm_profile_recursive_mkdir(char *path) { int i; for (i = 1; path[i] != '\0'; ++i) { char save = path[i]; if (!IS_DIR_SEPARATOR(path[i])) continue; path[i] = '\0'; #ifdef _WIN32 _mkdir(path); #else - mkdir(path, 0755); /* Some of these will fail, ignore it. */ + /* Some of these will fail, ignore it. */ + mkdir(path, __llvm_profile_get_dir_mode()); #endif path[i] = save; } } + +COMPILER_RT_VISIBILITY +void __llvm_profile_set_dir_mode(unsigned Mode) { lprofDirMode = Mode; } + +COMPILER_RT_VISIBILITY +unsigned __llvm_profile_get_dir_mode(void) { return lprofDirMode; } #if COMPILER_RT_HAS_ATOMICS != 1 COMPILER_RT_VISIBILITY uint32_t lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV) { void *R = *Ptr; if (R == OldV) { *Ptr = NewV; return 1; } return 0; } COMPILER_RT_VISIBILITY void *lprofPtrFetchAdd(void **Mem, long ByteIncr) { void *Old = *Mem; *((char **)Mem) += ByteIncr; return Old; } #endif #ifdef _MSC_VER COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) { WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN]; DWORD BufferSize = sizeof(Buffer); BOOL Result = GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize); if (!Result) return -1; if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0) return -1; return 0; } #elif defined(COMPILER_RT_HAS_UNAME) COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) { struct utsname N; int R = uname(&N); if (R >= 0) { strncpy(Name, N.nodename, Len); return 0; } return R; } #endif COMPILER_RT_VISIBILITY int lprofLockFd(int fd) { #ifdef COMPILER_RT_HAS_FCNTL_LCK struct flock s_flock; s_flock.l_whence = SEEK_SET; s_flock.l_start = 0; s_flock.l_len = 0; /* Until EOF. */ s_flock.l_pid = getpid(); s_flock.l_type = F_WRLCK; while (fcntl(fd, F_SETLKW, &s_flock) == -1) { if (errno != EINTR) { if (errno == ENOLCK) { return -1; } break; } } return 0; #else flock(fd, LOCK_EX); return 0; #endif } COMPILER_RT_VISIBILITY int lprofUnlockFd(int fd) { #ifdef COMPILER_RT_HAS_FCNTL_LCK struct flock s_flock; s_flock.l_whence = SEEK_SET; s_flock.l_start = 0; s_flock.l_len = 0; /* Until EOF. */ s_flock.l_pid = getpid(); s_flock.l_type = F_UNLCK; while (fcntl(fd, F_SETLKW, &s_flock) == -1) { if (errno != EINTR) { if (errno == ENOLCK) { return -1; } break; } } return 0; #else flock(fd, LOCK_UN); return 0; #endif } COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) { FILE *f; int fd; #ifdef COMPILER_RT_HAS_FCNTL_LCK fd = open(ProfileName, O_RDWR | O_CREAT, 0666); if (fd < 0) return NULL; if (lprofLockFd(fd) != 0) PROF_WARN("Data may be corrupted during profile merging : %s\n", "Fail to obtain file lock due to system limit."); f = fdopen(fd, "r+b"); #elif defined(_WIN32) // FIXME: Use the wide variants to handle Unicode filenames. HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); if (h == INVALID_HANDLE_VALUE) return NULL; fd = _open_osfhandle((intptr_t)h, 0); if (fd == -1) { CloseHandle(h); return NULL; } f = _fdopen(fd, "r+b"); if (f == 0) { CloseHandle(h); return NULL; } #else /* Worst case no locking applied. */ PROF_WARN("Concurrent file access is not supported : %s\n", "lack file locking"); fd = open(ProfileName, O_RDWR | O_CREAT, 0666); if (fd < 0) return NULL; f = fdopen(fd, "r+b"); #endif return f; } COMPILER_RT_VISIBILITY const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen) { const char *Prefix = getenv("GCOV_PREFIX"); const char *PrefixStripStr = getenv("GCOV_PREFIX_STRIP"); *PrefixLen = 0; *PrefixStrip = 0; if (Prefix == NULL || Prefix[0] == '\0') return NULL; if (PrefixStripStr) { *PrefixStrip = atoi(PrefixStripStr); /* Negative GCOV_PREFIX_STRIP values are ignored */ if (*PrefixStrip < 0) *PrefixStrip = 0; } else { *PrefixStrip = 0; } *PrefixLen = strlen(Prefix); return Prefix; } COMPILER_RT_VISIBILITY void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix, size_t PrefixLen, int PrefixStrip) { const char *Ptr; int Level; const char *StrippedPathStr = PathStr; for (Level = 0, Ptr = PathStr + 1; Level < PrefixStrip; ++Ptr) { if (*Ptr == '\0') break; if (!IS_DIR_SEPARATOR(*Ptr)) continue; StrippedPathStr = Ptr; ++Level; } memcpy(Dest, Prefix, PrefixLen); if (!IS_DIR_SEPARATOR(Prefix[PrefixLen - 1])) Dest[PrefixLen++] = DIR_SEPARATOR; memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1); } COMPILER_RT_VISIBILITY const char * lprofFindFirstDirSeparator(const char *Path) { const char *Sep = strchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) const char *Sep2 = strchr(Path, DIR_SEPARATOR_2); if (Sep2 && (!Sep || Sep2 < Sep)) Sep = Sep2; #endif return Sep; } COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) { const char *Sep = strrchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) const char *Sep2 = strrchr(Path, DIR_SEPARATOR_2); if (Sep2 && (!Sep || Sep2 > Sep)) Sep = Sep2; #endif return Sep; } COMPILER_RT_VISIBILITY int lprofSuspendSigKill() { #if defined(__linux__) int PDeachSig = 0; /* Temporarily suspend getting SIGKILL upon exit of the parent process. */ if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL) prctl(PR_SET_PDEATHSIG, 0); return (PDeachSig == SIGKILL); #else return 0; #endif } COMPILER_RT_VISIBILITY void lprofRestoreSigKill() { #if defined(__linux__) prctl(PR_SET_PDEATHSIG, SIGKILL); #endif } Index: projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.h =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.h (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/profile/InstrProfilingUtil.h (revision 337154) @@ -1,65 +1,71 @@ /*===- InstrProfilingUtil.h - Support library for PGO instrumentation -----===*\ |* |* The LLVM Compiler Infrastructure |* |* This file is distributed under the University of Illinois Open Source |* License. See LICENSE.TXT for details. |* \*===----------------------------------------------------------------------===*/ #ifndef PROFILE_INSTRPROFILINGUTIL_H #define PROFILE_INSTRPROFILINGUTIL_H #include #include /*! \brief Create a directory tree. */ void __llvm_profile_recursive_mkdir(char *Pathname); +/*! Set the mode used when creating profile directories. */ +void __llvm_profile_set_dir_mode(unsigned Mode); + +/*! Return the directory creation mode. */ +unsigned __llvm_profile_get_dir_mode(void); + int lprofLockFd(int fd); int lprofUnlockFd(int fd); /*! Open file \c Filename for read+write with write * lock for exclusive access. The caller will block * if the lock is already held by another process. */ FILE *lprofOpenFileEx(const char *Filename); /* PS4 doesn't have getenv. Define a shim. */ #if __ORBIS__ static inline char *getenv(const char *name) { return NULL; } #endif /* #if __ORBIS__ */ /* GCOV_PREFIX and GCOV_PREFIX_STRIP support */ /* Return the path prefix specified by GCOV_PREFIX environment variable. * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value) * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen. */ const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen); /* Apply the path prefix specified in \c Prefix to path string in \c PathStr, * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip * is not zero, path prefixes are stripped from \c PathStr (the level of * stripping is specified by \c PrefixStrip) before \c Prefix is added. */ void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix, size_t PrefixLen, int PrefixStrip); /* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in * the string \c Path, or NULL if the char is not found. */ const char *lprofFindFirstDirSeparator(const char *Path); /* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in * the string \c Path, or NULL if the char is not found. */ const char *lprofFindLastDirSeparator(const char *Path); int lprofGetHostName(char *Name, int Len); unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV); void *lprofPtrFetchAdd(void **Mem, long ByteIncr); /* Temporarily suspend SIGKILL. Return value of 1 means a restore is needed. * Other return values mean no restore is needed. */ int lprofSuspendSigKill(); /* Restore previously suspended SIGKILL. */ void lprofRestoreSigKill(); #endif /* PROFILE_INSTRPROFILINGUTIL_H */ Index: projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h (revision 337154) @@ -1,229 +1,224 @@ //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer/AddressSanitizer runtime. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_MUTEX_H #define SANITIZER_MUTEX_H #include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" namespace __sanitizer { class StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } void Lock() { if (TryLock()) return; LockSlow(); } bool TryLock() { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } void Unlock() { atomic_store(&state_, 0, memory_order_release); } void CheckLocked() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); } private: atomic_uint8_t state_; void NOINLINE LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); if (atomic_load(&state_, memory_order_relaxed) == 0 && atomic_exchange(&state_, 1, memory_order_acquire) == 0) return; } } }; class SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); } private: SpinMutex(const SpinMutex&); void operator=(const SpinMutex&); }; class BlockingMutex { public: -#if SANITIZER_WINDOWS - // Windows does not currently support LinkerInitialized - explicit BlockingMutex(LinkerInitialized); -#else explicit constexpr BlockingMutex(LinkerInitialized) - : opaque_storage_ {0, }, owner_(0) {} -#endif + : opaque_storage_ {0, }, owner_ {0} {} BlockingMutex(); void Lock(); void Unlock(); // This function does not guarantee an explicit check that the calling thread // is the thread which owns the mutex. This behavior, while more strictly // correct, causes problems in cases like StopTheWorld, where a parent thread // owns the mutex but a child checks that it is locked. Rather than // maintaining complex state to work around those situations, the check only // checks that the mutex is owned, and assumes callers to be generally // well-behaved. void CheckLocked(); private: // Solaris mutex_t has a member that requires 64-bit alignment. ALIGNED(8) uptr opaque_storage_[10]; uptr owner_; // for debugging }; // Reader-writer spin mutex. class RWMutex { public: RWMutex() { atomic_store(&state_, kUnlocked, memory_order_relaxed); } ~RWMutex() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); } void Lock() { u32 cmp = kUnlocked; if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, memory_order_acquire)) return; LockSlow(); } void Unlock() { u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); DCHECK_NE(prev & kWriteLock, 0); (void)prev; } void ReadLock() { u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); if ((prev & kWriteLock) == 0) return; ReadLockSlow(); } void ReadUnlock() { u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); DCHECK_EQ(prev & kWriteLock, 0); DCHECK_GT(prev & ~kWriteLock, 0); (void)prev; } void CheckLocked() { CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); } private: atomic_uint32_t state_; enum { kUnlocked = 0, kWriteLock = 1, kReadLock = 2 }; void NOINLINE LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 cmp = atomic_load(&state_, memory_order_relaxed); if (cmp == kUnlocked && atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, memory_order_acquire)) return; } } void NOINLINE ReadLockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 prev = atomic_load(&state_, memory_order_acquire); if ((prev & kWriteLock) == 0) return; } } RWMutex(const RWMutex&); void operator = (const RWMutex&); }; template class GenericScopedLock { public: explicit GenericScopedLock(MutexType *mu) : mu_(mu) { mu_->Lock(); } ~GenericScopedLock() { mu_->Unlock(); } private: MutexType *mu_; GenericScopedLock(const GenericScopedLock&); void operator=(const GenericScopedLock&); }; template class GenericScopedReadLock { public: explicit GenericScopedReadLock(MutexType *mu) : mu_(mu) { mu_->ReadLock(); } ~GenericScopedReadLock() { mu_->ReadUnlock(); } private: MutexType *mu_; GenericScopedReadLock(const GenericScopedReadLock&); void operator=(const GenericScopedReadLock&); }; typedef GenericScopedLock SpinMutexLock; typedef GenericScopedLock BlockingMutexLock; typedef GenericScopedLock RWMutexLock; typedef GenericScopedReadLock RWMutexReadLock; } // namespace __sanitizer #endif // SANITIZER_MUTEX_H Index: projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_win.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_win.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/sanitizer_common/sanitizer_win.cc (revision 337154) @@ -1,1077 +1,1056 @@ //===-- sanitizer_win.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements windows-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_WINDOWS #define WIN32_LEAN_AND_MEAN #define NOGDI #include #include #include #include #include "sanitizer_common.h" #include "sanitizer_file.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_win_defs.h" // A macro to tell the compiler that this part of the code cannot be reached, // if the compiler supports this feature. Since we're using this in // code that is called when terminating the process, the expansion of the // macro should not terminate the process to avoid infinite recursion. #if defined(__clang__) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(__GNUC__) && \ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(_MSC_VER) # define BUILTIN_UNREACHABLE() __assume(0) #else # define BUILTIN_UNREACHABLE() #endif namespace __sanitizer { #include "sanitizer_syscall_generic.inc" // --------------------- sanitizer_common.h uptr GetPageSize() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; } uptr GetMmapGranularity() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwAllocationGranularity; } uptr GetMaxUserVirtualAddress() { SYSTEM_INFO si; GetSystemInfo(&si); return (uptr)si.lpMaximumApplicationAddress; } uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } bool FileExists(const char *filename) { return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; } uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } // In contrast to POSIX, on Windows GetCurrentThreadId() // returns a system-unique identifier. tid_t GetTid() { return GetCurrentThreadId(); } uptr GetThreadSelf() { return GetTid(); } #if !SANITIZER_GO void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); MEMORY_BASIC_INFORMATION mbi; CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); // FIXME: is it possible for the stack to not be a single allocation? // Are these values what ASan expects to get (reserved, not committed; // including stack guard page) ? *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; *stack_bottom = (uptr)mbi.AllocationBase; } #endif // #if !SANITIZER_GO void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError(), raw_report); return rv; } void UnmapOrDie(void *addr, uptr size) { if (!size || !addr) return; MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); // MEM_RELEASE can only be used to unmap whole regions previously mapped with // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that // fails try MEM_DECOMMIT. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { Report("ERROR: %s failed to " "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", SanitizerToolName, size, size, addr, GetLastError()); CHECK("unable to unmap" && 0); } } } static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, const char *mmap_type) { error_t last_error = GetLastError(); if (last_error == ERROR_NOT_ENOUGH_MEMORY) return nullptr; ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); } void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); return rv; } // We want to map a chunk of address space aligned to 'alignment'. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); // Windows will align our allocations to at least 64K. alignment = Max(alignment, GetMmapGranularity()); uptr mapped_addr = (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // If we got it right on the first try, return. Otherwise, unmap it and go to // the slow path. if (IsAligned(mapped_addr, alignment)) return (void*)mapped_addr; if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // If we didn't get an aligned address, overallocate, find an aligned address, // unmap, and try to allocate at that aligned address. int retries = 0; const int kMaxRetries = 10; for (; retries < kMaxRetries && (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); retries++) { // Overallocate size + alignment bytes. mapped_addr = (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // Find the aligned address. uptr aligned_addr = RoundUpTo(mapped_addr, alignment); // Free the overallocation. if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // Attempt to allocate exactly the number of bytes we need at the aligned // address. This may fail for a number of reasons, in which case we continue // the loop. mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } // Fail if we can't make this work quickly. if (retries == kMaxRetries && mapped_addr == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); return (void *)mapped_addr; } bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. (void)name; // unsupported #if !SANITIZER_GO && SANITIZER_WINDOWS64 // On asan/Windows64, use MEM_COMMIT would result in error // 1455:ERROR_COMMITMENT_LIMIT. // Asan uses exception handler to commit page on demand. void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); #else void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); #endif if (p == 0) { Report("ERROR: %s failed to " "allocate %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return false; } return true; } // Memory space mapped by 'MmapFixedOrDie' must have been reserved by // 'MmapFixedNoAccess'. void *MmapFixedOrDie(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); } return p; } // Uses fixed_addr for now. // Will use offset instead once we've implemented this function for real. uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size) { return reinterpret_cast(MmapFixedOrDieOnFatalError(fixed_addr, size)); } uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) { return reinterpret_cast(MmapFixedOrDie(fixed_addr, size)); } void ReservedAddressRange::Unmap(uptr addr, uptr size) { // Only unmap if it covers the entire range. CHECK((addr == reinterpret_cast(base_)) && (size == size_)); // We unmap the whole range, just null out the base. base_ = nullptr; size_ = 0; UnmapOrDie(reinterpret_cast(addr), size); } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); } return p; } void *MmapNoReserveOrDie(uptr size, const char *mem_type) { // FIXME: make this really NoReserve? return MmapOrDie(size, mem_type); } uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); size_ = size; name_ = name; (void)os_handle_; // unsupported return reinterpret_cast(base_); } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { (void)name; // unsupported void *res = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return res; } void *MmapNoAccess(uptr size) { void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes (error code: %d)\n", SanitizerToolName, size, size, GetLastError()); return res; } bool MprotectNoAccess(uptr addr, uptr size) { DWORD old_protection; return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); } void ReleaseMemoryPagesToOS(uptr beg, uptr end) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. } bool NoHugePagesInRegion(uptr addr, uptr size) { // FIXME: probably similar to ReleaseMemoryToOS. return true; } bool DontDumpShadowMemory(uptr addr, uptr length) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. return true; } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found, uptr *max_occupied_addr) { uptr address = 0; while (true) { MEMORY_BASIC_INFORMATION info; if (!::VirtualQuery((void*)address, &info, sizeof(info))) return 0; if (info.State == MEM_FREE) { uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, alignment); if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) return shadow_address; } // Move to the next region. address = (uptr)info.BaseAddress + info.RegionSize; } return 0; } bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); return mbi.Protect == PAGE_NOACCESS && (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; } void *MapFileToMemory(const char *file_name, uptr *buff_size) { UNIMPLEMENTED(); } void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { UNIMPLEMENTED(); } static const int kMaxEnvNameLength = 128; static const DWORD kMaxEnvValueLength = 32767; namespace { struct EnvVariable { char name[kMaxEnvNameLength]; char value[kMaxEnvValueLength]; }; } // namespace static const int kEnvVariables = 5; static EnvVariable env_vars[kEnvVariables]; static int num_env_vars; const char *GetEnv(const char *name) { // Note: this implementation caches the values of the environment variables // and limits their quantity. for (int i = 0; i < num_env_vars; i++) { if (0 == internal_strcmp(name, env_vars[i].name)) return env_vars[i].value; } CHECK_LT(num_env_vars, kEnvVariables); DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, kMaxEnvValueLength); if (rv > 0 && rv < kMaxEnvValueLength) { CHECK_LT(internal_strlen(name), kMaxEnvNameLength); internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); num_env_vars++; return env_vars[num_env_vars - 1].value; } return 0; } const char *GetPwd() { UNIMPLEMENTED(); } u32 GetUid() { UNIMPLEMENTED(); } namespace { struct ModuleInfo { const char *filepath; uptr base_address; uptr end_address; }; #if !SANITIZER_GO int CompareModulesBase(const void *pl, const void *pr) { const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; if (l->base_address < r->base_address) return -1; return l->base_address > r->base_address; } #endif } // namespace #if !SANITIZER_GO void DumpProcessMap() { Report("Dumping process modules:\n"); ListOfModules modules; modules.init(); uptr num_modules = modules.size(); InternalMmapVector module_infos(num_modules); for (size_t i = 0; i < num_modules; ++i) { module_infos[i].filepath = modules[i].full_name(); module_infos[i].base_address = modules[i].ranges().front()->beg; module_infos[i].end_address = modules[i].ranges().back()->end; } qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), CompareModulesBase); for (size_t i = 0; i < num_modules; ++i) { const ModuleInfo &mi = module_infos[i]; if (mi.end_address != 0) { Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, mi.filepath[0] ? mi.filepath : "[no name]"); } else if (mi.filepath[0]) { Printf("\t??\?-??? %s\n", mi.filepath); } else { Printf("\t???\n"); } } } #endif void PrintModuleMap() { } void DisableCoreDumperIfNecessary() { // Do nothing. } void ReExec() { UNIMPLEMENTED(); } void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} bool StackSizeIsUnlimited() { UNIMPLEMENTED(); } void SetStackSizeLimitInBytes(uptr limit) { UNIMPLEMENTED(); } bool AddressSpaceIsUnlimited() { UNIMPLEMENTED(); } void SetAddressSpaceUnlimited() { UNIMPLEMENTED(); } bool IsPathSeparator(const char c) { return c == '\\' || c == '/'; } bool IsAbsolutePath(const char *path) { UNIMPLEMENTED(); } void SleepForSeconds(int seconds) { Sleep(seconds * 1000); } void SleepForMillis(int millis) { Sleep(millis); } u64 NanoTime() { static LARGE_INTEGER frequency = {}; LARGE_INTEGER counter; if (UNLIKELY(frequency.QuadPart == 0)) { QueryPerformanceFrequency(&frequency); CHECK_NE(frequency.QuadPart, 0); } QueryPerformanceCounter(&counter); counter.QuadPart *= 1000ULL * 1000000ULL; counter.QuadPart /= frequency.QuadPart; return counter.QuadPart; } u64 MonotonicNanoTime() { return NanoTime(); } void Abort() { internal__exit(3); } #if !SANITIZER_GO // Read the file to extract the ImageBase field from the PE header. If ASLR is // disabled and this virtual address is available, the loader will typically // load the image at this address. Therefore, we call it the preferred base. Any // addresses in the DWARF typically assume that the object has been loaded at // this address. static uptr GetPreferredBase(const char *modname) { fd_t fd = OpenFile(modname, RdOnly, nullptr); if (fd == kInvalidFd) return 0; FileCloser closer(fd); // Read just the DOS header. IMAGE_DOS_HEADER dos_header; uptr bytes_read; if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || bytes_read != sizeof(dos_header)) return 0; // The file should start with the right signature. if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) return 0; // The layout at e_lfanew is: // "PE\0\0" // IMAGE_FILE_HEADER // IMAGE_OPTIONAL_HEADER // Seek to e_lfanew and read all that data. char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)]; if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == INVALID_SET_FILE_POINTER) return 0; if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) || bytes_read != sizeof(buf)) return 0; // Check for "PE\0\0" before the PE header. char *pe_sig = &buf[0]; if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) return 0; // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. IMAGE_OPTIONAL_HEADER *pe_header = (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); // Check for more magic in the PE header. if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) return 0; // Finally, return the ImageBase. return (uptr)pe_header->ImageBase; } void ListOfModules::init() { clearOrInit(); HANDLE cur_process = GetCurrentProcess(); // Query the list of modules. Start by assuming there are no more than 256 // modules and retry if that's not sufficient. HMODULE *hmodules = 0; uptr modules_buffer_size = sizeof(HMODULE) * 256; DWORD bytes_required; while (!hmodules) { hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, &bytes_required)); if (bytes_required > modules_buffer_size) { // Either there turned out to be more than 256 hmodules, or new hmodules // could have loaded since the last try. Retry. UnmapOrDie(hmodules, modules_buffer_size); hmodules = 0; modules_buffer_size = bytes_required; } } // |num_modules| is the number of modules actually present, size_t num_modules = bytes_required / sizeof(HMODULE); for (size_t i = 0; i < num_modules; ++i) { HMODULE handle = hmodules[i]; MODULEINFO mi; if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) continue; // Get the UTF-16 path and convert to UTF-8. wchar_t modname_utf16[kMaxPathLength]; int modname_utf16_len = GetModuleFileNameW(handle, modname_utf16, kMaxPathLength); if (modname_utf16_len == 0) modname_utf16[0] = '\0'; char module_name[kMaxPathLength]; int module_name_len = ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1, &module_name[0], kMaxPathLength, NULL, NULL); module_name[module_name_len] = '\0'; uptr base_address = (uptr)mi.lpBaseOfDll; uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; // Adjust the base address of the module so that we get a VA instead of an // RVA when computing the module offset. This helps llvm-symbolizer find the // right DWARF CU. In the common case that the image is loaded at it's // preferred address, we will now print normal virtual addresses. uptr preferred_base = GetPreferredBase(&module_name[0]); uptr adjusted_base = base_address - preferred_base; LoadedModule cur_module; cur_module.set(module_name, adjusted_base); // We add the whole module as one single address range. cur_module.addAddressRange(base_address, end_address, /*executable*/ true, /*writable*/ true); modules_.push_back(cur_module); } UnmapOrDie(hmodules, modules_buffer_size); } void ListOfModules::fallbackInit() { clear(); } // We can't use atexit() directly at __asan_init time as the CRT is not fully // initialized at this point. Place the functions into a vector and use // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). InternalMmapVectorNoCtor atexit_functions; int Atexit(void (*function)(void)) { atexit_functions.push_back(function); return 0; } static int RunAtexit() { int ret = 0; for (uptr i = 0; i < atexit_functions.size(); ++i) { ret |= atexit(atexit_functions[i]); } return ret; } #pragma section(".CRT$XID", long, read) // NOLINT __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; #endif // ------------------ sanitizer_libc.h fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { // FIXME: Use the wide variants to handle Unicode filenames. fd_t res; if (mode == RdOnly) { res = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); } else if (mode == WrOnly) { res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); } else { UNIMPLEMENTED(); } CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); CHECK(res != kStderrFd || kStderrFd == kInvalidFd); if (res == kInvalidFd && last_error) *last_error = GetLastError(); return res; } void CloseFile(fd_t fd) { CloseHandle(fd); } bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, error_t *error_p) { CHECK(fd != kInvalidFd); // bytes_read can't be passed directly to ReadFile: // uptr is unsigned long long on 64-bit Windows. unsigned long num_read_long; bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); if (!success && error_p) *error_p = GetLastError(); if (bytes_read) *bytes_read = num_read_long; return success; } bool SupportsColoredOutput(fd_t fd) { // FIXME: support colored output. return false; } bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, error_t *error_p) { CHECK(fd != kInvalidFd); // Handle null optional parameters. error_t dummy_error; error_p = error_p ? error_p : &dummy_error; uptr dummy_bytes_written; bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; // Initialize output parameters in case we fail. *error_p = 0; *bytes_written = 0; // Map the conventional Unix fds 1 and 2 to Windows handles. They might be // closed, in which case this will fail. if (fd == kStdoutFd || fd == kStderrFd) { fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); if (fd == 0) { *error_p = ERROR_INVALID_HANDLE; return false; } } DWORD bytes_written_32; if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { *error_p = GetLastError(); return false; } else { *bytes_written = bytes_written_32; return true; } } bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { UNIMPLEMENTED(); } uptr internal_sched_yield() { Sleep(0); return 0; } void internal__exit(int exitcode) { // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, // so add our own breakpoint here. if (::IsDebuggerPresent()) __debugbreak(); TerminateProcess(GetCurrentProcess(), exitcode); BUILTIN_UNREACHABLE(); } uptr internal_ftruncate(fd_t fd, uptr size) { UNIMPLEMENTED(); } uptr GetRSS() { PROCESS_MEMORY_COUNTERS counters; if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) return 0; return counters.WorkingSetSize; } void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } void internal_join_thread(void *th) { } // ---------------------- BlockingMutex ---------------- {{{1 -const uptr LOCK_UNINITIALIZED = 0; -const uptr LOCK_READY = (uptr)-1; -BlockingMutex::BlockingMutex(LinkerInitialized li) { - // FIXME: see comments in BlockingMutex::Lock() for the details. - CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED); - - CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); - InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - owner_ = LOCK_READY; -} - BlockingMutex::BlockingMutex() { - CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); - InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - owner_ = LOCK_READY; + CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_)); + internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { - if (owner_ == LOCK_UNINITIALIZED) { - // FIXME: hm, global BlockingMutex objects are not initialized?!? - // This might be a side effect of the clang+cl+link Frankenbuild... - new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1)); - - // FIXME: If it turns out the linker doesn't invoke our - // constructors, we should probably manually Lock/Unlock all the global - // locks while we're starting in one thread to avoid double-init races. - } - EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - CHECK_EQ(owner_, LOCK_READY); + AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_); + CHECK_EQ(owner_, 0); owner_ = GetThreadSelf(); } void BlockingMutex::Unlock() { - CHECK_EQ(owner_, GetThreadSelf()); - owner_ = LOCK_READY; - LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_); + CheckLocked(); + owner_ = 0; + ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_); } void BlockingMutex::CheckLocked() { CHECK_EQ(owner_, GetThreadSelf()); } uptr GetTlsSize() { return 0; } void InitTlsSize() { } void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if SANITIZER_GO *stk_addr = 0; *stk_size = 0; *tls_addr = 0; *tls_size = 0; #else uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; *tls_addr = 0; *tls_size = 0; #endif } void ReportFile::Write(const char *buffer, uptr length) { SpinMutexLock l(mu); ReopenIfNecessary(); if (!WriteToFile(fd, buffer, length)) { // stderr may be closed, but we may be able to print to the debugger // instead. This is the case when launching a program from Visual Studio, // and the following routine should write to its console. OutputDebugStringA(buffer); } } void SetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void UnsetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void InstallDeadlySignalHandlers(SignalHandlerType handler) { (void)handler; // FIXME: Decide what to do on Windows. } HandleSignalMode GetHandleSignalMode(int signum) { // FIXME: Decide what to do on Windows. return kHandleSignalNo; } // Check based on flags if we should handle this exception. bool IsHandledDeadlyException(DWORD exceptionCode) { switch (exceptionCode) { case EXCEPTION_ACCESS_VIOLATION: case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: case EXCEPTION_STACK_OVERFLOW: case EXCEPTION_DATATYPE_MISALIGNMENT: case EXCEPTION_IN_PAGE_ERROR: return common_flags()->handle_segv; case EXCEPTION_ILLEGAL_INSTRUCTION: case EXCEPTION_PRIV_INSTRUCTION: case EXCEPTION_BREAKPOINT: return common_flags()->handle_sigill; case EXCEPTION_FLT_DENORMAL_OPERAND: case EXCEPTION_FLT_DIVIDE_BY_ZERO: case EXCEPTION_FLT_INEXACT_RESULT: case EXCEPTION_FLT_INVALID_OPERATION: case EXCEPTION_FLT_OVERFLOW: case EXCEPTION_FLT_STACK_CHECK: case EXCEPTION_FLT_UNDERFLOW: case EXCEPTION_INT_DIVIDE_BY_ZERO: case EXCEPTION_INT_OVERFLOW: return common_flags()->handle_sigfpe; } return false; } bool IsAccessibleMemoryRange(uptr beg, uptr size) { SYSTEM_INFO si; GetNativeSystemInfo(&si); uptr page_size = si.dwPageSize; uptr page_mask = ~(page_size - 1); for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; page <= end;) { MEMORY_BASIC_INFORMATION info; if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) return false; if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || info.Protect == PAGE_EXECUTE) return false; if (info.RegionSize == 0) return false; page += info.RegionSize; } return true; } bool SignalContext::IsStackOverflow() const { return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; } void SignalContext::InitPcSpBp() { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; CONTEXT *context_record = (CONTEXT *)context; pc = (uptr)exception_record->ExceptionAddress; #ifdef _WIN64 bp = (uptr)context_record->Rbp; sp = (uptr)context_record->Rsp; #else bp = (uptr)context_record->Ebp; sp = (uptr)context_record->Esp; #endif } uptr SignalContext::GetAddress() const { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; return exception_record->ExceptionInformation[1]; } bool SignalContext::IsMemoryAccess() const { return GetWriteFlag() != SignalContext::UNKNOWN; } SignalContext::WriteFlag SignalContext::GetWriteFlag() const { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; // The contents of this array are documented at // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx // The first element indicates read as 0, write as 1, or execute as 8. The // second element is the faulting address. switch (exception_record->ExceptionInformation[0]) { case 0: return SignalContext::READ; case 1: return SignalContext::WRITE; case 8: return SignalContext::UNKNOWN; } return SignalContext::UNKNOWN; } void SignalContext::DumpAllRegisters(void *context) { // FIXME: Implement this. } int SignalContext::GetType() const { return static_cast(siginfo)->ExceptionCode; } const char *SignalContext::Describe() const { unsigned code = GetType(); // Get the string description of the exception if this is a known deadly // exception. switch (code) { case EXCEPTION_ACCESS_VIOLATION: return "access-violation"; case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded"; case EXCEPTION_STACK_OVERFLOW: return "stack-overflow"; case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment"; case EXCEPTION_IN_PAGE_ERROR: return "in-page-error"; case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction"; case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction"; case EXCEPTION_BREAKPOINT: return "breakpoint"; case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand"; case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero"; case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result"; case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation"; case EXCEPTION_FLT_OVERFLOW: return "flt-overflow"; case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check"; case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow"; case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero"; case EXCEPTION_INT_OVERFLOW: return "int-overflow"; } return "unknown exception"; } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { // FIXME: Actually implement this function. CHECK_GT(buf_len, 0); buf[0] = 0; return 0; } uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { return ReadBinaryName(buf, buf_len); } void CheckVMASize() { // Do nothing. } void MaybeReexec() { // No need to re-exec on Windows. } void CheckASLR() { // Do nothing } char **GetArgv() { // FIXME: Actually implement this function. return 0; } pid_t StartSubprocess(const char *program, const char *const argv[], fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) { // FIXME: implement on this platform // Should be implemented based on // SymbolizerProcess::StarAtSymbolizerSubprocess // from lib/sanitizer_common/sanitizer_symbolizer_win.cc. return -1; } bool IsProcessRunning(pid_t pid) { // FIXME: implement on this platform. return false; } int WaitForProcess(pid_t pid) { return -1; } // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } // FIXME: implement on this platform. bool GetRandom(void *buffer, uptr length, bool blocking) { UNIMPLEMENTED(); } u32 GetNumberOfCPUs() { SYSTEM_INFO sysinfo = {}; GetNativeSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; } } // namespace __sanitizer #endif // _WIN32 Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_checks.inc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_checks.inc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_checks.inc (revision 337154) @@ -1,47 +1,49 @@ //===-- ubsan_checks.inc ----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // List of checks handled by UBSan runtime. // //===----------------------------------------------------------------------===// #ifndef UBSAN_CHECK # error "Define UBSAN_CHECK prior to including this file!" #endif // UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) // SummaryKind and FSanitizeFlagName should be string literals. UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined") UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null") UBSAN_CHECK(PointerOverflow, "pointer-overflow", "pointer-overflow") UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment") UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size") UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow", "signed-integer-overflow") UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow", "unsigned-integer-overflow") UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero", "integer-divide-by-zero") UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero") UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use") +UBSAN_CHECK(ImplicitIntegerTruncation, "implicit-integer-truncation", + "implicit-integer-truncation") UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base") UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent") UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds") UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable") UBSAN_CHECK(MissingReturn, "missing-return", "return") UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound") UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow", "float-cast-overflow") UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "bool") UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "enum") UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", "function") UBSAN_CHECK(InvalidNullReturn, "invalid-null-return", "returns-nonnull-attribute") UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", "nonnull-attribute") UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "vptr") UBSAN_CHECK(CFIBadType, "cfi-bad-type", "cfi") Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_diag.h =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_diag.h (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_diag.h (revision 337154) @@ -1,270 +1,270 @@ //===-- ubsan_diag.h --------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Diagnostics emission for Clang's undefined behavior sanitizer. // //===----------------------------------------------------------------------===// #ifndef UBSAN_DIAG_H #define UBSAN_DIAG_H #include "ubsan_value.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" namespace __ubsan { class SymbolizedStackHolder { SymbolizedStack *Stack; void clear() { if (Stack) Stack->ClearAll(); } public: explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr) : Stack(Stack) {} ~SymbolizedStackHolder() { clear(); } void reset(SymbolizedStack *S) { if (Stack != S) clear(); Stack = S; } const SymbolizedStack *get() const { return Stack; } }; SymbolizedStack *getSymbolizedLocation(uptr PC); inline SymbolizedStack *getCallerLocation(uptr CallerPC) { CHECK(CallerPC); uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC); return getSymbolizedLocation(PC); } /// A location of some data within the program's address space. typedef uptr MemoryLocation; /// \brief Location at which a diagnostic can be emitted. Either a /// SourceLocation, a MemoryLocation, or a SymbolizedStack. class Location { public: enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized }; private: LocationKind Kind; // FIXME: In C++11, wrap these in an anonymous union. SourceLocation SourceLoc; MemoryLocation MemoryLoc; const SymbolizedStack *SymbolizedLoc; // Not owned. public: Location() : Kind(LK_Null) {} Location(SourceLocation Loc) : Kind(LK_Source), SourceLoc(Loc) {} Location(MemoryLocation Loc) : Kind(LK_Memory), MemoryLoc(Loc) {} // SymbolizedStackHolder must outlive Location object. Location(const SymbolizedStackHolder &Stack) : Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {} LocationKind getKind() const { return Kind; } bool isSourceLocation() const { return Kind == LK_Source; } bool isMemoryLocation() const { return Kind == LK_Memory; } bool isSymbolizedStack() const { return Kind == LK_Symbolized; } SourceLocation getSourceLocation() const { CHECK(isSourceLocation()); return SourceLoc; } MemoryLocation getMemoryLocation() const { CHECK(isMemoryLocation()); return MemoryLoc; } const SymbolizedStack *getSymbolizedStack() const { CHECK(isSymbolizedStack()); return SymbolizedLoc; } }; /// A diagnostic severity level. enum DiagLevel { DL_Error, ///< An error. DL_Note ///< A note, attached to a prior diagnostic. }; /// \brief Annotation for a range of locations in a diagnostic. class Range { Location Start, End; const char *Text; public: Range() : Start(), End(), Text() {} Range(MemoryLocation Start, MemoryLocation End, const char *Text) : Start(Start), End(End), Text(Text) {} Location getStart() const { return Start; } Location getEnd() const { return End; } const char *getText() const { return Text; } }; /// \brief A C++ type name. Really just a strong typedef for 'const char*'. class TypeName { const char *Name; public: TypeName(const char *Name) : Name(Name) {} const char *getName() const { return Name; } }; enum class ErrorType { #define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name, #include "ubsan_checks.inc" #undef UBSAN_CHECK }; /// \brief Representation of an in-flight diagnostic. /// /// Temporary \c Diag instances are created by the handler routines to /// accumulate arguments for a diagnostic. The destructor emits the diagnostic /// message. class Diag { /// The location at which the problem occurred. Location Loc; /// The diagnostic level. DiagLevel Level; /// The error type. ErrorType ET; /// The message which will be emitted, with %0, %1, ... placeholders for /// arguments. const char *Message; public: /// Kinds of arguments, corresponding to members of \c Arg's union. enum ArgKind { AK_String, ///< A string argument, displayed as-is. AK_TypeName,///< A C++ type name, possibly demangled before display. AK_UInt, ///< An unsigned integer argument. AK_SInt, ///< A signed integer argument. AK_Float, ///< A floating-point argument. AK_Pointer ///< A pointer argument, displayed in hexadecimal. }; /// An individual diagnostic message argument. struct Arg { Arg() {} Arg(const char *String) : Kind(AK_String), String(String) {} Arg(TypeName TN) : Kind(AK_TypeName), String(TN.getName()) {} Arg(UIntMax UInt) : Kind(AK_UInt), UInt(UInt) {} Arg(SIntMax SInt) : Kind(AK_SInt), SInt(SInt) {} Arg(FloatMax Float) : Kind(AK_Float), Float(Float) {} Arg(const void *Pointer) : Kind(AK_Pointer), Pointer(Pointer) {} ArgKind Kind; union { const char *String; UIntMax UInt; SIntMax SInt; FloatMax Float; const void *Pointer; }; }; private: - static const unsigned MaxArgs = 5; + static const unsigned MaxArgs = 8; static const unsigned MaxRanges = 1; /// The arguments which have been added to this diagnostic so far. Arg Args[MaxArgs]; unsigned NumArgs; /// The ranges which have been added to this diagnostic so far. Range Ranges[MaxRanges]; unsigned NumRanges; Diag &AddArg(Arg A) { CHECK(NumArgs != MaxArgs); Args[NumArgs++] = A; return *this; } Diag &AddRange(Range A) { CHECK(NumRanges != MaxRanges); Ranges[NumRanges++] = A; return *this; } /// \c Diag objects are not copyable. Diag(const Diag &); // NOT IMPLEMENTED Diag &operator=(const Diag &); public: Diag(Location Loc, DiagLevel Level, ErrorType ET, const char *Message) : Loc(Loc), Level(Level), ET(ET), Message(Message), NumArgs(0), NumRanges(0) {} ~Diag(); Diag &operator<<(const char *Str) { return AddArg(Str); } Diag &operator<<(TypeName TN) { return AddArg(TN); } Diag &operator<<(unsigned long long V) { return AddArg(UIntMax(V)); } Diag &operator<<(const void *V) { return AddArg(V); } Diag &operator<<(const TypeDescriptor &V); Diag &operator<<(const Value &V); Diag &operator<<(const Range &R) { return AddRange(R); } }; struct ReportOptions { // If FromUnrecoverableHandler is specified, UBSan runtime handler is not // expected to return. bool FromUnrecoverableHandler; /// pc/bp are used to unwind the stack trace. uptr pc; uptr bp; }; bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET); #define GET_REPORT_OPTIONS(unrecoverable_handler) \ GET_CALLER_PC_BP; \ ReportOptions Opts = {unrecoverable_handler, pc, bp} void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp, void *context, bool fast); /// \brief Instantiate this class before printing diagnostics in the error /// report. This class ensures that reports from different threads and from /// different sanitizers won't be mixed. class ScopedReport { struct Initializer { Initializer(); }; Initializer initializer_; ScopedErrorReportLock report_lock_; ReportOptions Opts; Location SummaryLoc; ErrorType Type; public: ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type); ~ScopedReport(); static void CheckLocked() { ScopedErrorReportLock::CheckLocked(); } }; void InitializeSuppressions(); bool IsVptrCheckSuppressed(const char *TypeName); // Sometimes UBSan runtime can know filename from handlers arguments, even if // debug info is missing. bool IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename); } // namespace __ubsan #endif // UBSAN_DIAG_H Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc (revision 337154) @@ -1,737 +1,780 @@ //===-- ubsan_handlers.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Error logging entry points for the UBSan runtime. // //===----------------------------------------------------------------------===// #include "ubsan_platform.h" #if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" #include "ubsan_flags.h" #include "ubsan_monitor.h" #include "sanitizer_common/sanitizer_common.h" using namespace __sanitizer; using namespace __ubsan; namespace __ubsan { bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) { // We are not allowed to skip error report: if we are in unrecoverable // handler, we have to terminate the program right now, and therefore // have to print some diagnostic. // // Even if source location is disabled, it doesn't mean that we have // already report an error to the user: some concurrently running // thread could have acquired it, but not yet printed the report. if (Opts.FromUnrecoverableHandler) return false; return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename()); } const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of", "upcast of", "cast to virtual base of", "_Nonnull binding to", "dynamic operation on"}; } static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, ReportOptions Opts) { Location Loc = Data->Loc.acquire(); uptr Alignment = (uptr)1 << Data->LogAlignment; ErrorType ET; if (!Pointer) ET = ErrorType::NullPointerUse; else if (Pointer & (Alignment - 1)) ET = ErrorType::MisalignedPointerUse; else ET = ErrorType::InsufficientObjectSize; // Use the SourceLocation from Data to track deduplication, even if it's // invalid. if (ignoreReport(Loc.getSourceLocation(), Opts, ET)) return; SymbolizedStackHolder FallbackLoc; if (Data->Loc.isInvalid()) { FallbackLoc.reset(getCallerLocation(Opts.pc)); Loc = FallbackLoc; } ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::NullPointerUse: Diag(Loc, DL_Error, ET, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; case ErrorType::MisalignedPointerUse: Diag(Loc, DL_Error, ET, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment << Data->Type; break; case ErrorType::InsufficientObjectSize: Diag(Loc, DL_Error, ET, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type; break; default: UNREACHABLE("unexpected error type!"); } if (Pointer) Diag(Pointer, DL_Note, ET, "pointer points here"); } void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(false); handleTypeMismatchImpl(Data, Pointer, Opts); } void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(true); handleTypeMismatchImpl(Data, Pointer, Opts); Die(); } /// \brief Common diagnostic emission for various forms of integer overflow. template static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, const char *Operator, T RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; if (!IsSigned && flags()->silence_unsigned_overflow) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "%0 integer overflow: " "%1 %2 %3 cannot be represented in type %4") << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) << Operator << RHS << Data->Type; } #define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \ ValueHandle RHS) { \ GET_REPORT_OPTIONS(unrecoverable); \ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \ if (unrecoverable) \ Die(); \ } UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true) static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; if (!IsSigned && flags()->silence_unsigned_overflow) return; ScopedReport R(Opts, Loc, ET); if (IsSigned) Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") << Value(Data->Type, OldVal) << Data->Type; else Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1") << Value(Data->Type, OldVal) << Data->Type; } void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(false); handleNegateOverflowImpl(Data, OldVal, Opts); } void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(true); handleNegateOverflowImpl(Data, OldVal, Opts); Die(); } static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->Type, LHS); Value RHSVal(Data->Type, RHS); ErrorType ET; if (RHSVal.isMinusOne()) ET = ErrorType::SignedIntegerOverflow; else if (Data->Type.isIntegerTy()) ET = ErrorType::IntegerDivideByZero; else ET = ErrorType::FloatDivideByZero; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::SignedIntegerOverflow: Diag(Loc, DL_Error, ET, "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; break; default: Diag(Loc, DL_Error, ET, "division by zero"); break; } } void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleDivremOverflowImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleDivremOverflowImpl(Data, LHS, RHS, Opts); Die(); } static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->LHSType, LHS); Value RHSVal(Data->RHSType, RHS); ErrorType ET; if (RHSVal.isNegative() || RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth()) ET = ErrorType::InvalidShiftExponent; else ET = ErrorType::InvalidShiftBase; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (ET == ErrorType::InvalidShiftExponent) { if (RHSVal.isNegative()) Diag(Loc, DL_Error, ET, "shift exponent %0 is negative") << RHSVal; else Diag(Loc, DL_Error, ET, "shift exponent %0 is too large for %1-bit type %2") << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; } else { if (LHSVal.isNegative()) Diag(Loc, DL_Error, ET, "left shift of negative value %0") << LHSVal; else Diag(Loc, DL_Error, ET, "left shift of %0 by %1 places cannot be represented in type %2") << LHSVal << RHSVal << Data->LHSType; } } void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_shift_out_of_bounds_abort( ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); Die(); } static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::OutOfBoundsIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Value IndexVal(Data->IndexType, Index); Diag(Loc, DL_Error, ET, "index %0 out of bounds for type %1") << IndexVal << Data->ArrayType; } void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(false); handleOutOfBoundsImpl(Data, Index, Opts); } void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(true); handleOutOfBoundsImpl(Data, Index, Opts); Die(); } static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { ErrorType ET = ErrorType::UnreachableCall; ScopedReport R(Opts, Data->Loc, ET); Diag(Data->Loc, DL_Error, ET, "execution reached an unreachable program point"); } void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleBuiltinUnreachableImpl(Data, Opts); Die(); } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { ErrorType ET = ErrorType::MissingReturn; ScopedReport R(Opts, Data->Loc, ET); Diag(Data->Loc, DL_Error, ET, "execution reached the end of a value-returning function " "without returning a value"); } void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleMissingReturnImpl(Data, Opts); Die(); } static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::NonPositiveVLAIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "variable length array bound evaluates to " "non-positive value %0") << Value(Data->Type, Bound); } void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(false); handleVLABoundNotPositive(Data, Bound, Opts); } void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(true); handleVLABoundNotPositive(Data, Bound, Opts); Die(); } static bool looksLikeFloatCastOverflowDataV1(void *Data) { // First field is either a pointer to filename or a pointer to a // TypeDescriptor. u8 *FilenameOrTypeDescriptor; internal_memcpy(&FilenameOrTypeDescriptor, Data, sizeof(FilenameOrTypeDescriptor)); // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known, // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename, // adding two printable characters will not yield such a value. Otherwise, // if one of them is 0xff, this is most likely TK_Unknown type descriptor. u16 MaybeFromTypeKind = FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1]; return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff || FilenameOrTypeDescriptor[1] == 0xff; } static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, ReportOptions Opts) { SymbolizedStackHolder CallerLoc; Location Loc; const TypeDescriptor *FromType, *ToType; ErrorType ET = ErrorType::FloatCastOverflow; if (looksLikeFloatCastOverflowDataV1(DataPtr)) { auto Data = reinterpret_cast(DataPtr); CallerLoc.reset(getCallerLocation(Opts.pc)); Loc = CallerLoc; FromType = &Data->FromType; ToType = &Data->ToType; } else { auto Data = reinterpret_cast(DataPtr); SourceLocation SLoc = Data->Loc.acquire(); if (ignoreReport(SLoc, Opts, ET)) return; Loc = SLoc; FromType = &Data->FromType; ToType = &Data->ToType; } ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "%0 is outside the range of representable values of type %2") << Value(*FromType, From) << *FromType << *ToType; } void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(false); handleFloatCastOverflow(Data, From, Opts); } void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(true); handleFloatCastOverflow(Data, From, Opts); Die(); } static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); // This check could be more precise if we used different handlers for // -fsanitize=bool and -fsanitize=enum. bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) || (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6)); ErrorType ET = IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "load of value %0, which is not a valid value for type %1") << Value(Data->Type, Val) << Data->Type; } void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(false); handleLoadInvalidValue(Data, Val, Opts); } void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(true); handleLoadInvalidValue(Data, Val, Opts); Die(); } +static void handleImplicitConversion(ImplicitConversionData *Data, + ReportOptions Opts, ValueHandle Src, + ValueHandle Dst) { + SourceLocation Loc = Data->Loc.acquire(); + ErrorType ET = ErrorType::GenericUB; + + switch (Data->Kind) { + case ICCK_IntegerTruncation: + ET = ErrorType::ImplicitIntegerTruncation; + break; + } + + if (ignoreReport(Loc, Opts, ET)) + return; + + const TypeDescriptor &SrcTy = Data->FromType; + const TypeDescriptor &DstTy = Data->ToType; + + ScopedReport R(Opts, Loc, ET); + + // FIXME: is it possible to dump the values as hex with fixed width? + + Diag(Loc, DL_Error, ET, + "implicit conversion from type %0 of value %1 (%2-bit, %3signed) to " + "type %4 changed the value to %5 (%6-bit, %7signed)") + << SrcTy << Value(SrcTy, Src) << SrcTy.getIntegerBitWidth() + << (SrcTy.isSignedIntegerTy() ? "" : "un") << DstTy << Value(DstTy, Dst) + << DstTy.getIntegerBitWidth() << (DstTy.isSignedIntegerTy() ? "" : "un"); +} + +void __ubsan::__ubsan_handle_implicit_conversion(ImplicitConversionData *Data, + ValueHandle Src, + ValueHandle Dst) { + GET_REPORT_OPTIONS(false); + handleImplicitConversion(Data, Opts, Src, Dst); +} +void __ubsan::__ubsan_handle_implicit_conversion_abort( + ImplicitConversionData *Data, ValueHandle Src, ValueHandle Dst) { + GET_REPORT_OPTIONS(true); + handleImplicitConversion(Data, Opts, Src, Dst); + Die(); +} + static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidBuiltin; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "passing zero to %0, which is not a valid argument") << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()"); } void __ubsan::__ubsan_handle_invalid_builtin(InvalidBuiltinData *Data) { GET_REPORT_OPTIONS(true); handleInvalidBuiltin(Data, Opts); } void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) { GET_REPORT_OPTIONS(true); handleInvalidBuiltin(Data, Opts); Die(); } static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, ValueHandle Function, ReportOptions Opts) { SourceLocation CallLoc = Data->Loc.acquire(); ErrorType ET = ErrorType::FunctionTypeMismatch; if (ignoreReport(CallLoc, Opts, ET)) return; ScopedReport R(Opts, CallLoc, ET); SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(CallLoc, DL_Error, ET, "call to function %0 through pointer to incorrect function type %1") << FName << Data->Type; Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; } void __ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(false); handleFunctionTypeMismatch(Data, Function, Opts); } void __ubsan::__ubsan_handle_function_type_mismatch_abort( FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(true); handleFunctionTypeMismatch(Data, Function, Opts); Die(); } static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr, ReportOptions Opts, bool IsAttr) { if (!LocPtr) UNREACHABLE("source location pointer is null!"); SourceLocation Loc = LocPtr->acquire(); ErrorType ET = ErrorType::InvalidNullReturn; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "null pointer returned from function declared to never return null"); if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "returns_nonnull attribute" : "_Nonnull return type annotation"); } void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, true); } void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, false); } void __ubsan::__ubsan_handle_nullability_return_v1_abort( NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, false); Die(); } static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidNullArgument; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, ET, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation"); } void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, true); } void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, false); } void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, false); Die(); } static void handlePointerOverflowImpl(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::PointerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) { if (Base > Result) Diag(Loc, DL_Error, ET, "addition of unsigned offset to %0 overflowed to %1") << (void *)Base << (void *)Result; else Diag(Loc, DL_Error, ET, "subtraction of unsigned offset from %0 overflowed to %1") << (void *)Base << (void *)Result; } else { Diag(Loc, DL_Error, ET, "pointer index expression with base %0 overflowed to %1") << (void *)Base << (void *)Result; } } void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(false); handlePointerOverflowImpl(Data, Base, Result, Opts); } void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(true); handlePointerOverflowImpl(Data, Base, Result, Opts); Die(); } static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ReportOptions Opts) { if (Data->CheckKind != CFITCK_ICall && Data->CheckKind != CFITCK_NVMFCall) Die(); SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::CFIBadType; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); const char *CheckKindStr = Data->CheckKind == CFITCK_NVMFCall ? "non-virtual pointer to member function call" : "indirect function call"; Diag(Loc, DL_Error, ET, "control flow integrity check for type %0 failed during %1") << Data->Type << CheckKindStr; SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; // If the failure involved different DSOs for the check location and icall // target, report the DSO names. const char *DstModule = FLoc.get()->info.module; if (!DstModule) DstModule = "(unknown)"; const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc); if (!SrcModule) SrcModule = "(unknown)"; if (internal_strcmp(SrcModule, DstModule)) Diag(Loc, DL_Note, ET, "check failed in %0, destination function located in %1") << SrcModule << DstModule; } namespace __ubsan { #ifdef UBSAN_CAN_USE_CXXABI #ifdef _WIN32 extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default) #else SANITIZER_WEAK_ATTRIBUTE #endif void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); #else void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } #endif } // namespace __ubsan void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(false); if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); } void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(true); if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); Die(); } #endif // CAN_SANITIZE_UB Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.h =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.h (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_handlers.h (revision 337154) @@ -1,206 +1,223 @@ //===-- ubsan_handlers.h ----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Entry points to the runtime library for Clang's undefined behavior sanitizer. // //===----------------------------------------------------------------------===// #ifndef UBSAN_HANDLERS_H #define UBSAN_HANDLERS_H #include "ubsan_value.h" namespace __ubsan { struct TypeMismatchData { SourceLocation Loc; const TypeDescriptor &Type; unsigned char LogAlignment; unsigned char TypeCheckKind; }; #define UNRECOVERABLE(checkname, ...) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \ void __ubsan_handle_ ## checkname( __VA_ARGS__ ); #define RECOVERABLE(checkname, ...) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE \ void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \ void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ ); /// \brief Handle a runtime type check failure, caused by either a misaligned /// pointer, a null pointer, or a pointer to insufficient storage for the /// type. RECOVERABLE(type_mismatch_v1, TypeMismatchData *Data, ValueHandle Pointer) struct OverflowData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle an integer addition overflow. RECOVERABLE(add_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle an integer subtraction overflow. RECOVERABLE(sub_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle an integer multiplication overflow. RECOVERABLE(mul_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) /// \brief Handle a signed integer overflow for a unary negate operator. RECOVERABLE(negate_overflow, OverflowData *Data, ValueHandle OldVal) /// \brief Handle an INT_MIN/-1 overflow or division by zero. RECOVERABLE(divrem_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS) struct ShiftOutOfBoundsData { SourceLocation Loc; const TypeDescriptor &LHSType; const TypeDescriptor &RHSType; }; /// \brief Handle a shift where the RHS is out of bounds or a left shift where /// the LHS is negative or overflows. RECOVERABLE(shift_out_of_bounds, ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) struct OutOfBoundsData { SourceLocation Loc; const TypeDescriptor &ArrayType; const TypeDescriptor &IndexType; }; /// \brief Handle an array index out of bounds error. RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index) struct UnreachableData { SourceLocation Loc; }; /// \brief Handle a __builtin_unreachable which is reached. UNRECOVERABLE(builtin_unreachable, UnreachableData *Data) /// \brief Handle reaching the end of a value-returning function. UNRECOVERABLE(missing_return, UnreachableData *Data) struct VLABoundData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle a VLA with a non-positive bound. RECOVERABLE(vla_bound_not_positive, VLABoundData *Data, ValueHandle Bound) // Keeping this around for binary compatibility with (sanitized) programs // compiled with older compilers. struct FloatCastOverflowData { const TypeDescriptor &FromType; const TypeDescriptor &ToType; }; struct FloatCastOverflowDataV2 { SourceLocation Loc; const TypeDescriptor &FromType; const TypeDescriptor &ToType; }; /// Handle overflow in a conversion to or from a floating-point type. /// void *Data is one of FloatCastOverflowData* or FloatCastOverflowDataV2* RECOVERABLE(float_cast_overflow, void *Data, ValueHandle From) struct InvalidValueData { SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle a load of an invalid value for the type. RECOVERABLE(load_invalid_value, InvalidValueData *Data, ValueHandle Val) +/// Known implicit conversion check kinds. +/// Keep in sync with the enum of the same name in CGExprScalar.cpp +enum ImplicitConversionCheckKind : unsigned char { + ICCK_IntegerTruncation = 0, +}; + +struct ImplicitConversionData { + SourceLocation Loc; + const TypeDescriptor &FromType; + const TypeDescriptor &ToType; + /* ImplicitConversionCheckKind */ unsigned char Kind; +}; + +/// \brief Implict conversion that changed the value. +RECOVERABLE(implicit_conversion, ImplicitConversionData *Data, ValueHandle Src, + ValueHandle Dst) + /// Known builtin check kinds. /// Keep in sync with the enum of the same name in CodeGenFunction.h enum BuiltinCheckKind : unsigned char { BCK_CTZPassedZero, BCK_CLZPassedZero, }; struct InvalidBuiltinData { SourceLocation Loc; unsigned char Kind; }; /// Handle a builtin called in an invalid way. RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data) struct FunctionTypeMismatchData { SourceLocation Loc; const TypeDescriptor &Type; }; RECOVERABLE(function_type_mismatch, FunctionTypeMismatchData *Data, ValueHandle Val) struct NonNullReturnData { SourceLocation AttrLoc; }; /// \brief Handle returning null from function with the returns_nonnull /// attribute, or a return type annotated with _Nonnull. RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc) RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc) struct NonNullArgData { SourceLocation Loc; SourceLocation AttrLoc; int ArgIndex; }; /// \brief Handle passing null pointer to a function parameter with the nonnull /// attribute, or a _Nonnull type annotation. RECOVERABLE(nonnull_arg, NonNullArgData *Data) RECOVERABLE(nullability_arg, NonNullArgData *Data) struct PointerOverflowData { SourceLocation Loc; }; RECOVERABLE(pointer_overflow, PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) /// \brief Known CFI check kinds. /// Keep in sync with the enum of the same name in CodeGenFunction.h enum CFITypeCheckKind : unsigned char { CFITCK_VCall, CFITCK_NVCall, CFITCK_DerivedCast, CFITCK_UnrelatedCast, CFITCK_ICall, CFITCK_NVMFCall, CFITCK_VMFCall, }; struct CFICheckFailData { CFITypeCheckKind CheckKind; SourceLocation Loc; const TypeDescriptor &Type; }; /// \brief Handle control flow integrity failures. RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function, uptr VtableIsValid) struct ReportOptions; extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type( CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); } #endif // UBSAN_HANDLERS_H Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_interface.inc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_interface.inc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan/ubsan_interface.inc (revision 337154) @@ -1,56 +1,58 @@ //===-- ubsan_interface.inc -----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Ubsan interface list. //===----------------------------------------------------------------------===// INTERFACE_FUNCTION(__ubsan_handle_add_overflow) INTERFACE_FUNCTION(__ubsan_handle_add_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_builtin_unreachable) INTERFACE_FUNCTION(__ubsan_handle_cfi_bad_type) INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail) INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail_abort) INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow) INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss) INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort) +INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion) +INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort) INTERFACE_FUNCTION(__ubsan_handle_missing_return) INTERFACE_FUNCTION(__ubsan_handle_mul_overflow) INTERFACE_FUNCTION(__ubsan_handle_mul_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_negate_overflow) INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg) INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort) INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1) INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_nullability_arg) INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort) INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1) INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds) INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort) INTERFACE_FUNCTION(__ubsan_handle_sub_overflow) INTERFACE_FUNCTION(__ubsan_handle_sub_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1) INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort) INTERFACE_WEAK_FUNCTION(__ubsan_default_options) INTERFACE_FUNCTION(__ubsan_on_report) INTERFACE_FUNCTION(__ubsan_get_current_report_data) Index: projects/clang700-import/contrib/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cc (revision 337154) @@ -1,117 +1,118 @@ #include "sanitizer_common/sanitizer_atomic.h" #include #include #include #include #ifdef KERNEL_USE extern "C" void ubsan_message(const char *msg); static void message(const char *msg) { ubsan_message(msg); } #else static void message(const char *msg) { write(2, msg, strlen(msg)); } #endif static const int kMaxCallerPcs = 20; static __sanitizer::atomic_uintptr_t caller_pcs[kMaxCallerPcs]; // Number of elements in caller_pcs. A special value of kMaxCallerPcs + 1 means // that "too many errors" has already been reported. static __sanitizer::atomic_uint32_t caller_pcs_sz; __attribute__((noinline)) static bool report_this_error(void *caller_p) { uintptr_t caller = reinterpret_cast(caller_p); if (caller == 0) return false; while (true) { unsigned sz = __sanitizer::atomic_load_relaxed(&caller_pcs_sz); if (sz > kMaxCallerPcs) return false; // early exit // when sz==kMaxCallerPcs print "too many errors", but only when cmpxchg // succeeds in order to not print it multiple times. if (sz > 0 && sz < kMaxCallerPcs) { uintptr_t p; for (unsigned i = 0; i < sz; ++i) { p = __sanitizer::atomic_load_relaxed(&caller_pcs[i]); if (p == 0) break; // Concurrent update. if (p == caller) return false; } if (p == 0) continue; // FIXME: yield? } if (!__sanitizer::atomic_compare_exchange_strong( &caller_pcs_sz, &sz, sz + 1, __sanitizer::memory_order_seq_cst)) continue; // Concurrent update! Try again from the start. if (sz == kMaxCallerPcs) { message("ubsan: too many errors\n"); return false; } __sanitizer::atomic_store_relaxed(&caller_pcs[sz], caller); return true; } } #if defined(__ANDROID__) extern "C" __attribute__((weak)) void android_set_abort_message(const char *); static void abort_with_message(const char *msg) { if (&android_set_abort_message) android_set_abort_message(msg); abort(); } #else static void abort_with_message(const char *) { abort(); } #endif #if SANITIZER_DEBUG namespace __sanitizer { // The DCHECK macro needs this symbol to be defined. void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) { message("Sanitizer CHECK failed: "); message(file); message(":?? : "); // FIXME: Show line number. message(cond); abort(); } } // namespace __sanitizer #endif #define INTERFACE extern "C" __attribute__((visibility("default"))) // FIXME: add caller pc to the error message (possibly as "ubsan: error-type // @1234ABCD"). #define HANDLER_RECOVER(name, msg) \ INTERFACE void __ubsan_handle_##name##_minimal() { \ if (!report_this_error(__builtin_return_address(0))) return; \ message("ubsan: " msg "\n"); \ } #define HANDLER_NORECOVER(name, msg) \ INTERFACE void __ubsan_handle_##name##_minimal_abort() { \ message("ubsan: " msg "\n"); \ abort_with_message("ubsan: " msg); \ } #define HANDLER(name, msg) \ HANDLER_RECOVER(name, msg) \ HANDLER_NORECOVER(name, msg) HANDLER(type_mismatch, "type-mismatch") HANDLER(add_overflow, "add-overflow") HANDLER(sub_overflow, "sub-overflow") HANDLER(mul_overflow, "mul-overflow") HANDLER(negate_overflow, "negate-overflow") HANDLER(divrem_overflow, "divrem-overflow") HANDLER(shift_out_of_bounds, "shift-out-of-bounds") HANDLER(out_of_bounds, "out-of-bounds") HANDLER_RECOVER(builtin_unreachable, "builtin-unreachable") HANDLER_RECOVER(missing_return, "missing-return") HANDLER(vla_bound_not_positive, "vla-bound-not-positive") HANDLER(float_cast_overflow, "float-cast-overflow") HANDLER(load_invalid_value, "load-invalid-value") HANDLER(invalid_builtin, "invalid-builtin") HANDLER(function_type_mismatch, "function-type-mismatch") +HANDLER(implicit_conversion, "implicit-conversion") HANDLER(nonnull_arg, "nonnull-arg") HANDLER(nonnull_return, "nonnull-return") HANDLER(nullability_arg, "nullability-arg") HANDLER(nullability_return, "nullability-return") HANDLER(pointer_overflow, "pointer-overflow") HANDLER(cfi_check_fail, "cfi-check-fail") Index: projects/clang700-import/contrib/compiler-rt/lib/xray/xray_buffer_queue.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/xray/xray_buffer_queue.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/xray/xray_buffer_queue.cc (revision 337154) @@ -1,138 +1,171 @@ //===-- xray_buffer_queue.cc -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instruementation system. // // Defines the interface for a buffer queue implementation. // //===----------------------------------------------------------------------===// #include "xray_buffer_queue.h" -#include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include +#include +#ifndef MAP_NORESERVE +// no-op on NetBSD (at least), unsupported flag on FreeBSD +#define MAP_NORESERVE 0 +#endif + using namespace __xray; using namespace __sanitizer; +template static T *allocRaw(size_t N) { + // TODO: Report errors? + // We use MAP_NORESERVE on platforms where it's supported to ensure that the + // pages we're allocating for XRay never end up in pages that can be swapped + // in/out. We're doing this because for FDR mode, we want to ensure that + // writes to the buffers stay resident in memory to prevent XRay itself from + // causing swapping/thrashing. + // + // In the case when XRay pages cannot be swapped in/out or there's not enough + // RAM to back these pages, we're willing to cause a segmentation fault + // instead of introducing latency in the measurement. We assume here that + // there are enough pages that are swappable in/out outside of the buffers + // being used by FDR mode (which are bounded and configurable anyway) to allow + // us to keep using always-resident memory. + // + // TODO: Make this configurable? + void *A = reinterpret_cast( + internal_mmap(NULL, N * sizeof(T), PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0)); + return (A == MAP_FAILED) ? nullptr : reinterpret_cast(A); +} + +template static void deallocRaw(T *ptr, size_t N) { + // TODO: Report errors? + if (ptr != nullptr) + internal_munmap(ptr, N); +} + template static T *initArray(size_t N) { - auto A = reinterpret_cast( - InternalAlloc(N * sizeof(T), nullptr, kCacheLineSize)); + auto A = allocRaw(N); if (A != nullptr) while (N > 0) new (A + (--N)) T(); return A; } BufferQueue::BufferQueue(size_t B, size_t N, bool &Success) : BufferSize(B), Buffers(initArray(N)), BufferCount(N), Finalizing{0}, OwnedBuffers(initArray(N)), Next(Buffers), First(Buffers), LiveBuffers(0) { if (Buffers == nullptr) { Success = false; return; } if (OwnedBuffers == nullptr) { // Clean up the buffers we've already allocated. for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) B->~BufferRep(); - InternalFree(Buffers); + deallocRaw(Buffers, N); Success = false; return; }; for (size_t i = 0; i < N; ++i) { auto &T = Buffers[i]; - void *Tmp = InternalAlloc(BufferSize, nullptr, 64); + void *Tmp = allocRaw(BufferSize); if (Tmp == nullptr) { Success = false; return; } - void *Extents = InternalAlloc(sizeof(BufferExtents), nullptr, 64); + auto *Extents = allocRaw(1); if (Extents == nullptr) { Success = false; return; } auto &Buf = T.Buff; Buf.Data = Tmp; Buf.Size = B; - Buf.Extents = reinterpret_cast(Extents); + Buf.Extents = Extents; OwnedBuffers[i] = Tmp; } Success = true; } BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) { if (atomic_load(&Finalizing, memory_order_acquire)) return ErrorCode::QueueFinalizing; SpinMutexLock Guard(&Mutex); if (LiveBuffers == BufferCount) return ErrorCode::NotEnoughMemory; auto &T = *Next; auto &B = T.Buff; Buf = B; T.Used = true; ++LiveBuffers; if (++Next == (Buffers + BufferCount)) Next = Buffers; return ErrorCode::Ok; } BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { // Blitz through the buffers array to find the buffer. bool Found = false; for (auto I = OwnedBuffers, E = OwnedBuffers + BufferCount; I != E; ++I) { if (*I == Buf.Data) { Found = true; break; } } if (!Found) return ErrorCode::UnrecognizedBuffer; SpinMutexLock Guard(&Mutex); // This points to a semantic bug, we really ought to not be releasing more // buffers than we actually get. if (LiveBuffers == 0) return ErrorCode::NotEnoughMemory; // Now that the buffer has been released, we mark it as "used". First->Buff = Buf; First->Used = true; Buf.Data = nullptr; Buf.Size = 0; --LiveBuffers; if (++First == (Buffers + BufferCount)) First = Buffers; return ErrorCode::Ok; } BufferQueue::ErrorCode BufferQueue::finalize() { if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel)) return ErrorCode::QueueFinalizing; return ErrorCode::Ok; } BufferQueue::~BufferQueue() { for (auto I = Buffers, E = Buffers + BufferCount; I != E; ++I) { auto &T = *I; auto &Buf = T.Buff; - InternalFree(Buf.Data); - InternalFree(Buf.Extents); + deallocRaw(Buf.Data, Buf.Size); + deallocRaw(Buf.Extents, 1); } for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) B->~BufferRep(); - InternalFree(Buffers); - InternalFree(OwnedBuffers); + deallocRaw(Buffers, BufferCount); + deallocRaw(OwnedBuffers, BufferCount); } Index: projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profile_collector.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profile_collector.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profile_collector.cc (revision 337154) @@ -1,318 +1,346 @@ //===-- xray_profile_collector.cc ------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // This implements the interface for the profileCollectorService. // //===----------------------------------------------------------------------===// #include "xray_profile_collector.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_vector.h" #include "xray_profiling_flags.h" #include #include #include namespace __xray { namespace profileCollectorService { namespace { SpinMutex GlobalMutex; struct ThreadTrie { tid_t TId; FunctionCallTrie *Trie; }; struct ProfileBuffer { void *Data; size_t Size; }; +// Current version of the profile format. +constexpr u64 XRayProfilingVersion = 0x20180424; + +// Identifier for XRay profiling files 'xrayprof' in hex. +constexpr u64 XRayMagicBytes = 0x7872617970726f66; + +struct XRayProfilingFileHeader { + const u64 MagicBytes = XRayMagicBytes; + const u64 Version = XRayProfilingVersion; + u64 Timestamp = 0; // System time in nanoseconds. + u64 PID = 0; // Process ID. +}; + struct BlockHeader { u32 BlockSize; u32 BlockNum; u64 ThreadId; }; // These need to be pointers that point to heap/internal-allocator-allocated // objects because these are accessed even at program exit. Vector *ThreadTries = nullptr; Vector *ProfileBuffers = nullptr; FunctionCallTrie::Allocators *GlobalAllocators = nullptr; } // namespace void post(const FunctionCallTrie &T, tid_t TId) { static pthread_once_t Once = PTHREAD_ONCE_INIT; pthread_once(&Once, +[] { SpinMutexLock Lock(&GlobalMutex); GlobalAllocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (GlobalAllocators) FunctionCallTrie::Allocators(); *GlobalAllocators = FunctionCallTrie::InitAllocatorsCustom( profilingFlags()->global_allocator_max); ThreadTries = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ThreadTries) Vector(); ProfileBuffers = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ProfileBuffers) Vector(); }); DCHECK_NE(GlobalAllocators, nullptr); DCHECK_NE(ThreadTries, nullptr); DCHECK_NE(ProfileBuffers, nullptr); ThreadTrie *Item = nullptr; { SpinMutexLock Lock(&GlobalMutex); if (GlobalAllocators == nullptr) return; Item = ThreadTries->PushBack(); Item->TId = TId; // Here we're using the internal allocator instead of the managed allocator // because: // // 1) We're not using the segmented array data structure to host // FunctionCallTrie objects. We're using a Vector (from sanitizer_common) // which works like a std::vector<...> keeping elements contiguous in // memory. The segmented array data structure assumes that elements are // trivially destructible, where FunctionCallTrie isn't. // // 2) Using a managed allocator means we need to manage that separately, // which complicates the nature of this code. To get around that, we're // using the internal allocator instead, which has its own global state // and is decoupled from the lifetime management required by the managed // allocator we have in XRay. // Item->Trie = reinterpret_cast(InternalAlloc( sizeof(FunctionCallTrie), nullptr, alignof(FunctionCallTrie))); DCHECK_NE(Item->Trie, nullptr); new (Item->Trie) FunctionCallTrie(*GlobalAllocators); } T.deepCopyInto(*Item->Trie); } // A PathArray represents the function id's representing a stack trace. In this // context a path is almost always represented from the leaf function in a call // stack to a root of the call trie. using PathArray = Array; struct ProfileRecord { using PathAllocator = typename PathArray::AllocatorType; // The Path in this record is the function id's from the leaf to the root of // the function call stack as represented from a FunctionCallTrie. PathArray *Path = nullptr; const FunctionCallTrie::Node *Node = nullptr; // Constructor for in-place construction. ProfileRecord(PathAllocator &A, const FunctionCallTrie::Node *N) : Path([&] { auto P = reinterpret_cast(InternalAlloc(sizeof(PathArray))); new (P) PathArray(A); return P; }()), Node(N) {} }; namespace { using ProfileRecordArray = Array; // Walk a depth-first traversal of each root of the FunctionCallTrie to generate // the path(s) and the data associated with the path. static void populateRecords(ProfileRecordArray &PRs, ProfileRecord::PathAllocator &PA, const FunctionCallTrie &Trie) { using StackArray = Array; using StackAllocator = typename StackArray::AllocatorType; StackAllocator StackAlloc(profilingFlags()->stack_allocator_max); StackArray DFSStack(StackAlloc); for (const auto R : Trie.getRoots()) { DFSStack.Append(R); while (!DFSStack.empty()) { auto Node = DFSStack.back(); DFSStack.trim(1); auto Record = PRs.AppendEmplace(PA, Node); if (Record == nullptr) return; DCHECK_NE(Record, nullptr); // Traverse the Node's parents and as we're doing so, get the FIds in // the order they appear. for (auto N = Node; N != nullptr; N = N->Parent) Record->Path->Append(N->FId); DCHECK(!Record->Path->empty()); for (const auto C : Node->Callees) DFSStack.Append(C.NodePtr); } } } static void serializeRecords(ProfileBuffer *Buffer, const BlockHeader &Header, const ProfileRecordArray &ProfileRecords) { auto NextPtr = static_cast( internal_memcpy(Buffer->Data, &Header, sizeof(Header))) + sizeof(Header); for (const auto &Record : ProfileRecords) { // List of IDs follow: for (const auto FId : *Record.Path) NextPtr = static_cast(internal_memcpy(NextPtr, &FId, sizeof(FId))) + sizeof(FId); // Add the sentinel here. constexpr int32_t SentinelFId = 0; NextPtr = static_cast( internal_memset(NextPtr, SentinelFId, sizeof(SentinelFId))) + sizeof(SentinelFId); // Add the node data here. NextPtr = static_cast(internal_memcpy(NextPtr, &Record.Node->CallCount, sizeof(Record.Node->CallCount))) + sizeof(Record.Node->CallCount); NextPtr = static_cast( internal_memcpy(NextPtr, &Record.Node->CumulativeLocalTime, sizeof(Record.Node->CumulativeLocalTime))) + sizeof(Record.Node->CumulativeLocalTime); } DCHECK_EQ(NextPtr - static_cast(Buffer->Data), Buffer->Size); } } // namespace void serialize() { SpinMutexLock Lock(&GlobalMutex); // Clear out the global ProfileBuffers. for (uptr I = 0; I < ProfileBuffers->Size(); ++I) InternalFree((*ProfileBuffers)[I].Data); ProfileBuffers->Reset(); if (ThreadTries->Size() == 0) return; // Then repopulate the global ProfileBuffers. for (u32 I = 0; I < ThreadTries->Size(); ++I) { using ProfileRecordAllocator = typename ProfileRecordArray::AllocatorType; ProfileRecordAllocator PRAlloc(profilingFlags()->global_allocator_max); ProfileRecord::PathAllocator PathAlloc( profilingFlags()->global_allocator_max); ProfileRecordArray ProfileRecords(PRAlloc); // First, we want to compute the amount of space we're going to need. We'll // use a local allocator and an __xray::Array<...> to store the intermediary // data, then compute the size as we're going along. Then we'll allocate the // contiguous space to contain the thread buffer data. const auto &Trie = *(*ThreadTries)[I].Trie; if (Trie.getRoots().empty()) continue; populateRecords(ProfileRecords, PathAlloc, Trie); DCHECK(!Trie.getRoots().empty()); DCHECK(!ProfileRecords.empty()); // Go through each record, to compute the sizes. // // header size = block size (4 bytes) // + block number (4 bytes) // + thread id (8 bytes) // record size = path ids (4 bytes * number of ids + sentinel 4 bytes) // + call count (8 bytes) // + local time (8 bytes) // + end of record (8 bytes) u32 CumulativeSizes = 0; for (const auto &Record : ProfileRecords) CumulativeSizes += 20 + (4 * Record.Path->size()); BlockHeader Header{16 + CumulativeSizes, I, (*ThreadTries)[I].TId}; auto Buffer = ProfileBuffers->PushBack(); Buffer->Size = sizeof(Header) + CumulativeSizes; Buffer->Data = InternalAlloc(Buffer->Size, nullptr, 64); DCHECK_NE(Buffer->Data, nullptr); serializeRecords(Buffer, Header, ProfileRecords); // Now clean up the ProfileRecords array, one at a time. for (auto &Record : ProfileRecords) { Record.Path->~PathArray(); InternalFree(Record.Path); } } } void reset() { SpinMutexLock Lock(&GlobalMutex); if (ProfileBuffers != nullptr) { // Clear out the profile buffers that have been serialized. for (uptr I = 0; I < ProfileBuffers->Size(); ++I) InternalFree((*ProfileBuffers)[I].Data); ProfileBuffers->Reset(); InternalFree(ProfileBuffers); ProfileBuffers = nullptr; } if (ThreadTries != nullptr) { // Clear out the function call tries per thread. for (uptr I = 0; I < ThreadTries->Size(); ++I) { auto &T = (*ThreadTries)[I]; T.Trie->~FunctionCallTrie(); InternalFree(T.Trie); } ThreadTries->Reset(); InternalFree(ThreadTries); ThreadTries = nullptr; } // Reset the global allocators. if (GlobalAllocators != nullptr) { GlobalAllocators->~Allocators(); InternalFree(GlobalAllocators); GlobalAllocators = nullptr; } GlobalAllocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (GlobalAllocators) FunctionCallTrie::Allocators(); *GlobalAllocators = FunctionCallTrie::InitAllocators(); ThreadTries = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ThreadTries) Vector(); ProfileBuffers = reinterpret_cast *>( InternalAlloc(sizeof(Vector))); new (ProfileBuffers) Vector(); } XRayBuffer nextBuffer(XRayBuffer B) { SpinMutexLock Lock(&GlobalMutex); if (ProfileBuffers == nullptr || ProfileBuffers->Size() == 0) return {nullptr, 0}; - if (B.Data == nullptr) + static pthread_once_t Once = PTHREAD_ONCE_INIT; + static typename std::aligned_storage::type + FileHeaderStorage; + pthread_once(&Once, + +[] { new (&FileHeaderStorage) XRayProfilingFileHeader{}; }); + + if (UNLIKELY(B.Data == nullptr)) { + // The first buffer should always contain the file header information. + auto &FileHeader = + *reinterpret_cast(&FileHeaderStorage); + FileHeader.Timestamp = NanoTime(); + FileHeader.PID = internal_getpid(); + return {&FileHeaderStorage, sizeof(XRayProfilingFileHeader)}; + } + + if (UNLIKELY(B.Data == &FileHeaderStorage)) return {(*ProfileBuffers)[0].Data, (*ProfileBuffers)[0].Size}; BlockHeader Header; internal_memcpy(&Header, B.Data, sizeof(BlockHeader)); auto NextBlock = Header.BlockNum + 1; if (NextBlock < ProfileBuffers->Size()) return {(*ProfileBuffers)[NextBlock].Data, (*ProfileBuffers)[NextBlock].Size}; return {nullptr, 0}; } } // namespace profileCollectorService } // namespace __xray Index: projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profiling.cc =================================================================== --- projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profiling.cc (revision 337153) +++ projects/clang700-import/contrib/compiler-rt/lib/xray/xray_profiling.cc (revision 337154) @@ -1,372 +1,355 @@ //===-- xray_profiling.cc ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // This is the implementation of a profiling handler. // //===----------------------------------------------------------------------===// #include #include #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "xray/xray_interface.h" #include "xray/xray_log_interface.h" #include "xray_flags.h" #include "xray_profile_collector.h" #include "xray_profiling_flags.h" #include "xray_recursion_guard.h" #include "xray_tsc.h" #include "xray_utils.h" #include namespace __xray { namespace { -constexpr uptr XRayProfilingVersion = 0x20180424; - -struct XRayProfilingFileHeader { - const u64 MagicBytes = 0x7872617970726f66; // Identifier for XRay profiling - // files 'xrayprof' in hex. - const uptr Version = XRayProfilingVersion; - uptr Timestamp = 0; // System time in nanoseconds. - uptr PID = 0; // Process ID. -}; - atomic_sint32_t ProfilerLogFlushStatus = { XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; atomic_sint32_t ProfilerLogStatus = {XRayLogInitStatus::XRAY_LOG_UNINITIALIZED}; SpinMutex ProfilerOptionsMutex; struct alignas(64) ProfilingData { FunctionCallTrie::Allocators *Allocators = nullptr; FunctionCallTrie *FCT = nullptr; }; static pthread_key_t ProfilingKey; thread_local std::aligned_storage::type ThreadStorage{}; static ProfilingData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { thread_local auto ThreadOnce = [] { new (&ThreadStorage) ProfilingData{}; pthread_setspecific(ProfilingKey, &ThreadStorage); return false; }(); (void)ThreadOnce; auto &TLD = *reinterpret_cast(&ThreadStorage); // We need to check whether the global flag to finalizing/finalized has been // switched. If it is, then we ought to not actually initialise the data. auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); if (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING || Status == XRayLogInitStatus::XRAY_LOG_FINALIZED) return TLD; // If we're live, then we re-initialize TLD if the pointers are not null. if (UNLIKELY(TLD.Allocators == nullptr && TLD.FCT == nullptr)) { TLD.Allocators = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie::Allocators))); new (TLD.Allocators) FunctionCallTrie::Allocators(); *TLD.Allocators = FunctionCallTrie::InitAllocators(); TLD.FCT = reinterpret_cast( InternalAlloc(sizeof(FunctionCallTrie))); new (TLD.FCT) FunctionCallTrie(*TLD.Allocators); } return TLD; } static void cleanupTLD() XRAY_NEVER_INSTRUMENT { auto &TLD = *reinterpret_cast(&ThreadStorage); if (TLD.Allocators != nullptr && TLD.FCT != nullptr) { TLD.FCT->~FunctionCallTrie(); TLD.Allocators->~Allocators(); InternalFree(TLD.FCT); InternalFree(TLD.Allocators); TLD.FCT = nullptr; TLD.Allocators = nullptr; } } } // namespace const char *profilingCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT { #ifdef XRAY_PROFILER_DEFAULT_OPTIONS return SANITIZER_STRINGIFY(XRAY_PROFILER_DEFAULT_OPTIONS); #else return ""; #endif } atomic_sint32_t ProfileFlushStatus = { XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT { if (atomic_load(&ProfilerLogStatus, memory_order_acquire) != XRayLogInitStatus::XRAY_LOG_FINALIZED) { if (Verbosity()) Report("Not flushing profiles, profiling not been finalized.\n"); return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; } s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; if (!atomic_compare_exchange_strong(&ProfilerLogFlushStatus, &Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING, memory_order_acq_rel)) { if (Verbosity()) Report("Not flushing profiles, implementation still finalizing.\n"); } // At this point, we'll create the file that will contain the profile, but // only if the options say so. if (!profilingFlags()->no_flush) { // First check whether we have data in the profile collector service // before we try and write anything down. XRayBuffer B = profileCollectorService::nextBuffer({nullptr, 0}); if (B.Data == nullptr) { if (Verbosity()) Report("profiling: No data to flush.\n"); } else { int Fd = getLogFD(); if (Fd == -1) { if (Verbosity()) Report("profiling: Failed to flush to file, dropping data.\n"); } else { - XRayProfilingFileHeader Header; - Header.Timestamp = NanoTime(); - Header.PID = internal_getpid(); - retryingWriteAll(Fd, reinterpret_cast(&Header), - reinterpret_cast(&Header) + - sizeof(Header)); - - // Now for each of the threads, write out the profile data as we would + // Now for each of the buffers, write out the profile data as we would // see it in memory, verbatim. while (B.Data != nullptr && B.Size != 0) { retryingWriteAll(Fd, reinterpret_cast(B.Data), reinterpret_cast(B.Data) + B.Size); B = profileCollectorService::nextBuffer(B); } // Then we close out the file. internal_close(Fd); } } } profileCollectorService::reset(); // Flush the current thread's local data structures as well. cleanupTLD(); atomic_store(&ProfilerLogStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED, memory_order_release); return XRayLogFlushStatus::XRAY_LOG_FLUSHED; } namespace { thread_local atomic_uint8_t ReentranceGuard{0}; static void postCurrentThreadFCT(ProfilingData &TLD) { if (TLD.Allocators == nullptr || TLD.FCT == nullptr) return; profileCollectorService::post(*TLD.FCT, GetTid()); cleanupTLD(); } } // namespace void profilingHandleArg0(int32_t FuncId, XRayEntryType Entry) XRAY_NEVER_INSTRUMENT { unsigned char CPU; auto TSC = readTSC(CPU); RecursionGuard G(ReentranceGuard); if (!G) return; auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); auto &TLD = getThreadLocalData(); if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_FINALIZED || Status == XRayLogInitStatus::XRAY_LOG_FINALIZING)) { postCurrentThreadFCT(TLD); return; } switch (Entry) { case XRayEntryType::ENTRY: case XRayEntryType::LOG_ARGS_ENTRY: TLD.FCT->enterFunction(FuncId, TSC); break; case XRayEntryType::EXIT: case XRayEntryType::TAIL: TLD.FCT->exitFunction(FuncId, TSC); break; default: // FIXME: Handle bugs. break; } } void profilingHandleArg1(int32_t FuncId, XRayEntryType Entry, uint64_t) XRAY_NEVER_INSTRUMENT { return profilingHandleArg0(FuncId, Entry); } XRayLogInitStatus profilingFinalize() XRAY_NEVER_INSTRUMENT { s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED; if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, XRayLogInitStatus::XRAY_LOG_FINALIZING, memory_order_release)) { if (Verbosity()) Report("Cannot finalize profile, the profiling is not initialized.\n"); return static_cast(CurrentStatus); } // Wait a grace period to allow threads to see that we're finalizing. SleepForMillis(profilingFlags()->grace_period_ms); // We also want to make sure that the current thread's data is cleaned up, // if we have any. auto &TLD = getThreadLocalData(); postCurrentThreadFCT(TLD); // Then we force serialize the log data. profileCollectorService::serialize(); atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED, memory_order_release); return XRayLogInitStatus::XRAY_LOG_FINALIZED; } XRayLogInitStatus profilingLoggingInit(size_t BufferSize, size_t BufferMax, void *Options, size_t OptionsSize) XRAY_NEVER_INSTRUMENT { if (BufferSize != 0 || BufferMax != 0) { if (Verbosity()) Report("__xray_log_init() being used, and is unsupported. Use " "__xray_log_init_mode(...) instead. Bailing out."); return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; } s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZING, memory_order_release)) { if (Verbosity()) Report("Cannot initialize already initialised profiling " "implementation.\n"); return static_cast(CurrentStatus); } { SpinMutexLock Lock(&ProfilerOptionsMutex); FlagParser ConfigParser; ProfilerFlags Flags; Flags.setDefaults(); registerProfilerFlags(&ConfigParser, &Flags); ConfigParser.ParseString(profilingCompilerDefinedFlags()); const char *Env = GetEnv("XRAY_PROFILING_OPTIONS"); if (Env == nullptr) Env = ""; ConfigParser.ParseString(Env); // Then parse the configuration string provided. ConfigParser.ParseString(static_cast(Options)); if (Verbosity()) ReportUnrecognizedFlags(); *profilingFlags() = Flags; } // We need to reset the profile data collection implementation now. profileCollectorService::reset(); // We need to set up the exit handlers. static pthread_once_t Once = PTHREAD_ONCE_INIT; pthread_once(&Once, +[] { pthread_key_create(&ProfilingKey, +[](void *P) { // This is the thread-exit handler. auto &TLD = *reinterpret_cast(P); if (TLD.Allocators == nullptr && TLD.FCT == nullptr) return; postCurrentThreadFCT(TLD); }); // We also need to set up an exit handler, so that we can get the profile // information at exit time. We use the C API to do this, to not rely on C++ // ABI functions for registering exit handlers. Atexit(+[] { // Finalize and flush. if (profilingFinalize() != XRAY_LOG_FINALIZED) { cleanupTLD(); return; } if (profilingFlush() != XRAY_LOG_FLUSHED) { cleanupTLD(); return; } if (Verbosity()) Report("XRay Profile flushed at exit."); }); }); __xray_log_set_buffer_iterator(profileCollectorService::nextBuffer); __xray_set_handler(profilingHandleArg0); __xray_set_handler_arg1(profilingHandleArg1); atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED, memory_order_release); if (Verbosity()) Report("XRay Profiling init successful.\n"); return XRayLogInitStatus::XRAY_LOG_INITIALIZED; } bool profilingDynamicInitializer() XRAY_NEVER_INSTRUMENT { // Set up the flag defaults from the static defaults and the // compiler-provided defaults. { SpinMutexLock Lock(&ProfilerOptionsMutex); auto *F = profilingFlags(); F->setDefaults(); FlagParser ProfilingParser; registerProfilerFlags(&ProfilingParser, F); ProfilingParser.ParseString(profilingCompilerDefinedFlags()); } XRayLogImpl Impl{ profilingLoggingInit, profilingFinalize, profilingHandleArg0, profilingFlush, }; auto RegistrationResult = __xray_log_register_mode("xray-profiling", Impl); if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) { if (Verbosity()) Report("Cannot register XRay Profiling mode to 'xray-profiling'; error = " "%d\n", RegistrationResult); return false; } if (!internal_strcmp(flags()->xray_mode, "xray-profiling")) __xray_log_select_mode("xray_profiling"); return true; } } // namespace __xray static auto UNUSED Unused = __xray::profilingDynamicInitializer(); Index: projects/clang700-import/contrib/compiler-rt =================================================================== --- projects/clang700-import/contrib/compiler-rt (revision 337153) +++ projects/clang700-import/contrib/compiler-rt (revision 337154) Property changes on: projects/clang700-import/contrib/compiler-rt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/compiler-rt/dist:r337132-337153