diff --git a/CMakeLists.txt b/CMakeLists.txt index 3195de1e5d1e..f997c53410c1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,271 +1,277 @@ # CMake build for CompilerRT. # # This build assumes that CompilerRT is checked out into the # 'projects/compiler-rt' or 'runtimes/compiler-rt' inside of an LLVM tree. # Standalone build system for CompilerRT is not yet ready. # # An important constraint of the build is that it only produces libraries # based on the ability of the host toolchain to target various platforms. cmake_minimum_required(VERSION 3.4.3) # Check if compiler-rt is built as a standalone project. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR COMPILER_RT_STANDALONE_BUILD) project(CompilerRT C CXX ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) endif() # Add path for custom compiler-rt modules. list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_SOURCE_DIR}/cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules" ) include(base-config-ix) include(CompilerRTUtils) option(COMPILER_RT_BUILD_BUILTINS "Build builtins" ON) mark_as_advanced(COMPILER_RT_BUILD_BUILTINS) option(COMPILER_RT_BUILD_SANITIZERS "Build sanitizers" ON) mark_as_advanced(COMPILER_RT_BUILD_SANITIZERS) option(COMPILER_RT_BUILD_XRAY "Build xray" ON) mark_as_advanced(COMPILER_RT_BUILD_XRAY) set(COMPILER_RT_BAREMETAL_BUILD OFF CACHE BOOLEAN "Build for a bare-metal target.") if (COMPILER_RT_STANDALONE_BUILD) load_llvm_config() # Find Python interpreter. set(Python_ADDITIONAL_VERSIONS 2.7 2.6 2.5) include(FindPythonInterp) if(NOT PYTHONINTERP_FOUND) message(FATAL_ERROR " Unable to find Python interpreter required testing. Please install Python or specify the PYTHON_EXECUTABLE CMake variable.") endif() # Define default arguments to lit. set(LIT_ARGS_DEFAULT "-sv") if (MSVC OR XCODE) set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar") endif() set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit") endif() construct_compiler_rt_default_triple() if ("${COMPILER_RT_DEFAULT_TARGET_ABI}" STREQUAL "androideabi") set(ANDROID 1) endif() set(COMPILER_RT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(COMPILER_RT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) # We support running instrumented tests when we're not cross compiling # and target a UNIX-like system or Windows. # We can run tests on Android even when we are cross-compiling. if(("${CMAKE_HOST_SYSTEM}" STREQUAL "${CMAKE_SYSTEM}" AND (UNIX OR WIN32)) OR ANDROID OR COMPILER_RT_EMULATOR) option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" ON) else() option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" OFF) endif() option(COMPILER_RT_DEBUG "Build runtimes with full debug info" OFF) option(COMPILER_RT_EXTERNALIZE_DEBUGINFO "Generate dSYM files and strip executables and libraries (Darwin Only)" OFF) # COMPILER_RT_DEBUG_PYBOOL is used by lit.common.configured.in. pythonize_bool(COMPILER_RT_DEBUG) include(config-ix) if(APPLE AND SANITIZER_MIN_OSX_VERSION AND SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.9") # Mac OS X prior to 10.9 had problems with exporting symbols from # libc++/libc++abi. set(use_cxxabi_default OFF) elseif(MSVC) set(use_cxxabi_default OFF) else() set(use_cxxabi_default ON) endif() option(SANITIZER_CAN_USE_CXXABI "Sanitizers can use cxxabi" ${use_cxxabi_default}) pythonize_bool(SANITIZER_CAN_USE_CXXABI) #================================ # Setup Compiler Flags #================================ if(MSVC) # Override any existing /W flags with /W4. This is what LLVM does. Failing to # remove other /W[0-4] flags will result in a warning about overriding a # previous flag. if (COMPILER_RT_HAS_W4_FLAG) string(REGEX REPLACE " /W[0-4]" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REGEX REPLACE " /W[0-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") append_string_if(COMPILER_RT_HAS_W4_FLAG /W4 CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() else() append_string_if(COMPILER_RT_HAS_WALL_FLAG -Wall CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() if(COMPILER_RT_ENABLE_WERROR) append_string_if(COMPILER_RT_HAS_WERROR_FLAG -Werror CMAKE_C_FLAGS CMAKE_CXX_FLAGS) append_string_if(COMPILER_RT_HAS_WX_FLAG /WX CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() append_string_if(COMPILER_RT_HAS_STD_CXX11_FLAG -std=c++11 CMAKE_CXX_FLAGS) # Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP. if(NOT COMPILER_RT_HAS_FUNC_SYMBOL) add_definitions(-D__func__=__FUNCTION__) endif() # Provide some common commmandline flags for Sanitizer runtimes. if(NOT WIN32) append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG -fno-exceptions SANITIZER_COMMON_CFLAGS) if(NOT COMPILER_RT_DEBUG AND NOT APPLE) append_list_if(COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG -fomit-frame-pointer SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FUNWIND_TABLES_FLAG -funwind-tables SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG -fno-stack-protector SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_SANITIZE_SAFE_STACK_FLAG -fno-sanitize=safe-stack SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_INLINES_HIDDEN_FLAG -fvisibility-inlines-hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SANITIZER_COMMON_CFLAGS) # The following is a workaround for powerpc64le. This is the only architecture # that requires -fno-function-sections to work properly. If lacking, the ASan # Linux test function-sections-are-bad.cc fails with the following error: # 'undefined symbol: __sanitizer_unaligned_load32'. if(DEFINED TARGET_powerpc64le_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections TARGET_powerpc64le_CFLAGS) endif() if(MSVC) # Replace the /M[DT][d] flags with /MT, and strip any definitions of _DEBUG, # which cause definition mismatches at link time. # FIXME: In fact, sanitizers should support both /MT and /MD, see PR20214. if(COMPILER_RT_HAS_MT_FLAG) foreach(flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) string(REGEX REPLACE "/M[DT]d" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/D_DEBUG" "" ${flag_var} "${${flag_var}}") endforeach() endif() append_list_if(COMPILER_RT_HAS_Oy_FLAG /Oy- SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_GS_FLAG /GS- SANITIZER_COMMON_CFLAGS) # VS 2015 (version 1900) added support for thread safe static initialization. # However, ASan interceptors run before CRT initialization, which causes the # new thread safe code to crash. Disable this feature for now. if (MSVC_VERSION GREATER 1899 OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") list(APPEND SANITIZER_COMMON_CFLAGS /Zc:threadSafeInit-) endif() endif() append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS) -# Build with optimization, unless we're in debug mode. If we're using MSVC, +# If we're using MSVC, # always respect the optimization flags set by CMAKE_BUILD_TYPE instead. -if(NOT COMPILER_RT_DEBUG AND NOT MSVC) - list(APPEND SANITIZER_COMMON_CFLAGS -O3) +if (NOT MSVC) + + # Build with optimization, unless we're in debug mode. + if(COMPILER_RT_DEBUG) + list(APPEND SANITIZER_COMMON_CFLAGS -O0) + else() + list(APPEND SANITIZER_COMMON_CFLAGS -O3) + endif() endif() # Determine if we should restrict stack frame sizes. # Stack frames on PowerPC and Mips and in debug biuld can be much larger than # anticipated. # FIXME: Fix all sanitizers and add -Wframe-larger-than to # SANITIZER_COMMON_FLAGS if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG AND NOT COMPILER_RT_DEBUG AND NOT ${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "powerpc|mips") set(SANITIZER_LIMIT_FRAME_SIZE TRUE) else() set(SANITIZER_LIMIT_FRAME_SIZE FALSE) endif() # Build sanitizer runtimes with debug info. if(COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG AND NOT COMPILER_RT_DEBUG) list(APPEND SANITIZER_COMMON_CFLAGS -gline-tables-only) elseif(COMPILER_RT_HAS_G_FLAG) list(APPEND SANITIZER_COMMON_CFLAGS -g) elseif(MSVC) # Use /Z7 instead of /Zi for the asan runtime. This avoids the LNK4099 # warning from the MS linker complaining that it can't find the 'vc140.pdb' # file used by our object library compilations. list(APPEND SANITIZER_COMMON_CFLAGS /Z7) llvm_replace_compiler_option(CMAKE_CXX_FLAGS "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_DEBUG "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Z[i7I]" "/Z7") endif() if(LLVM_ENABLE_MODULES) # Sanitizers cannot be built with -fmodules. The interceptors intentionally # don't include system headers, which is incompatible with modules. list(APPEND SANITIZER_COMMON_CFLAGS -fno-modules) endif() # Turn off several warnings. append_list_if(COMPILER_RT_HAS_WGNU_FLAG -Wno-gnu SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG -Wno-c99-extensions SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WNON_VIRTUAL_DTOR_FLAG -Wno-non-virtual-dtor SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4146_FLAG /wd4146 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4291_FLAG /wd4291 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4391_FLAG /wd4391 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4722_FLAG /wd4722 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4800_FLAG /wd4800 SANITIZER_COMMON_CFLAGS) # Warnings to turn off for all libraries, not just sanitizers. append_string_if(COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG -Wno-unused-parameter CMAKE_C_FLAGS CMAKE_CXX_FLAGS) if (CMAKE_LINKER MATCHES "link.exe$") # Silence MSVC linker warnings caused by empty object files. The # sanitizer libraries intentionally use ifdefs that result in empty # files, rather than skipping these files in the build system. # Ideally, we would pass this flag only for the libraries that need # it, but CMake doesn't seem to have a way to set linker flags for # individual static libraries, so we enable the suppression flag for # the whole compiler-rt project. append("/IGNORE:4221" CMAKE_STATIC_LINKER_FLAGS) endif() add_subdirectory(include) set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/projects/libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/../libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_HAS_LIBCXX_SOURCES FALSE) endif() endif() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/tools/lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/../lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_HAS_LLD FALSE) endif() endif() pythonize_bool(COMPILER_RT_HAS_LLD) add_subdirectory(lib) if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(unittests) add_subdirectory(test) endif() diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index de6613f56727..92963ddfc4da 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -1,1009 +1,1021 @@ //===-- asan_allocator.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Implementation of ASan's memory allocator, 2-nd version. // This variant uses the allocator from sanitizer_common, i.e. the one shared // with ThreadSanitizer and MemorySanitizer. // //===----------------------------------------------------------------------===// #include "asan_allocator.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_list.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_quarantine.h" #include "lsan/lsan_common.h" namespace __asan { // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. // We use adaptive redzones: for larger allocation larger redzones are used. static u32 RZLog2Size(u32 rz_log) { CHECK_LT(rz_log, 8); return 16 << rz_log; } static u32 RZSize2Log(u32 rz_size) { CHECK_GE(rz_size, 16); CHECK_LE(rz_size, 2048); CHECK(IsPowerOfTwo(rz_size)); u32 res = Log2(rz_size) - 4; CHECK_EQ(rz_size, RZLog2Size(res)); return res; } static AsanAllocator &get_allocator(); // The memory chunk allocated from the underlying allocator looks like this: // L L L L L L H H U U U U U U R R // L -- left redzone words (0 or more bytes) // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. // U -- user memory. // R -- right redzone (0 or more bytes) // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. // If the left redzone is greater than the ChunkHeader size we store a magic // value in the first uptr word of the memory block and store the address of // ChunkBase in the next uptr. // M B L L L L L L L L L H H U U U U U U // | ^ // ---------------------| // M -- magic value kAllocBegMagic // B -- address of ChunkHeader pointing to the first 'H' static const uptr kAllocBegMagic = 0xCC6E96B9; struct ChunkHeader { // 1-st 8 bytes. u32 chunk_state : 8; // Must be first. u32 alloc_tid : 24; u32 free_tid : 24; u32 from_memalign : 1; u32 alloc_type : 2; u32 rz_log : 3; u32 lsan_tag : 2; // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to // SizeClassMap::kMaxSize and the actual size is stored in the // SecondaryAllocator's metadata. u32 user_requested_size; u32 alloc_context_id; }; struct ChunkBase : ChunkHeader { // Header2, intersects with user memory. u32 free_context_id; }; static const uptr kChunkHeaderSize = sizeof(ChunkHeader); static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; COMPILER_CHECK(kChunkHeaderSize == 16); COMPILER_CHECK(kChunkHeader2Size <= 16); // Every chunk of memory allocated by this allocator can be in one of 3 states: // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. enum { CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. CHUNK_ALLOCATED = 2, CHUNK_QUARANTINE = 3 }; struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } uptr UsedSize(bool locked_version = false) { if (user_requested_size != SizeClassMap::kMaxSize) return user_requested_size; return *reinterpret_cast( get_allocator().GetMetaData(AllocBeg(locked_version))); } void *AllocBeg(bool locked_version = false) { if (from_memalign) { if (locked_version) return get_allocator().GetBlockBeginFastLocked( reinterpret_cast(this)); return get_allocator().GetBlockBegin(reinterpret_cast(this)); } return reinterpret_cast(Beg() - RZLog2Size(rz_log)); } bool AddrIsInside(uptr addr, bool locked_version = false) { return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); } }; struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *cache) : cache_(cache) { } void Recycle(AsanChunk *m) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); CHECK_NE(m->alloc_tid, kInvalidTid); CHECK_NE(m->free_tid, kInvalidTid); PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); void *p = reinterpret_cast(m->AllocBeg()); if (p != m) { uptr *alloc_magic = reinterpret_cast(p); CHECK_EQ(alloc_magic[0], kAllocBegMagic); // Clear the magic value, as allocator internals may overwrite the // contents of deallocated chunk, confusing GetAsanChunk lookup. alloc_magic[0] = 0; CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); } // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; thread_stats.really_freed += m->UsedSize(); get_allocator().Deallocate(cache_, p); } void *Allocate(uptr size) { void *res = get_allocator().Allocate(cache_, size, 1); // TODO(alekseys): Consider making quarantine OOM-friendly. if (UNLIKELY(!res)) return DieOnFailure::OnOOM(); return res; } void Deallocate(void *p) { get_allocator().Deallocate(cache_, p); } AllocatorCache *cache_; }; typedef Quarantine AsanQuarantine; typedef AsanQuarantine::Cache QuarantineCache; void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { PoisonShadow(p, size, 0); // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. FlushUnneededASanShadowMemory(p, size); // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.munmaps++; thread_stats.munmaped += size; } // We can not use THREADLOCAL because it is not supported on some of the // platforms we care about (OSX 10.6, Android). // static THREADLOCAL AllocatorCache cache; AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); return &ms->allocator_cache; } QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); return reinterpret_cast(ms->quarantine_cache); } void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { quarantine_size_mb = f->quarantine_size_mb; thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; min_redzone = f->redzone; max_redzone = f->max_redzone; may_return_null = cf->allocator_may_return_null; alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; } void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { f->quarantine_size_mb = quarantine_size_mb; f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; f->redzone = min_redzone; f->max_redzone = max_redzone; cf->allocator_may_return_null = may_return_null; f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; } struct Allocator { static const uptr kMaxAllowedMallocSize = FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); AsanAllocator allocator; AsanQuarantine quarantine; StaticSpinMutex fallback_mutex; AllocatorCache fallback_allocator_cache; QuarantineCache fallback_quarantine_cache; atomic_uint8_t rss_limit_exceeded; // ------------------- Options -------------------------- atomic_uint16_t min_redzone; atomic_uint16_t max_redzone; atomic_uint8_t alloc_dealloc_mismatch; // ------------------- Initialization ------------------------ explicit Allocator(LinkerInitialized) : quarantine(LINKER_INITIALIZED), fallback_quarantine_cache(LINKER_INITIALIZED) {} void CheckOptions(const AllocatorOptions &options) const { CHECK_GE(options.min_redzone, 16); CHECK_GE(options.max_redzone, options.min_redzone); CHECK_LE(options.max_redzone, 2048); CHECK(IsPowerOfTwo(options.min_redzone)); CHECK(IsPowerOfTwo(options.max_redzone)); } void SharedInitCode(const AllocatorOptions &options) { CheckOptions(options); quarantine.Init((uptr)options.quarantine_size_mb << 20, (uptr)options.thread_local_quarantine_size_kb << 10); atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, memory_order_release); atomic_store(&min_redzone, options.min_redzone, memory_order_release); atomic_store(&max_redzone, options.max_redzone, memory_order_release); } void Initialize(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); allocator.Init(options.release_to_os_interval_ms); SharedInitCode(options); } bool RssLimitExceeded() { return atomic_load(&rss_limit_exceeded, memory_order_relaxed); } void SetRssLimitExceeded(bool limit_exceeded) { atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); } void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. AsanChunk *ac = GetAsanChunk((void *)chunk); uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); uptr beg = ac->Beg(); uptr end = ac->Beg() + ac->UsedSize(true); uptr chunk_end = chunk + allocated_size; if (chunk < beg && beg < end && end <= chunk_end && ac->chunk_state == CHUNK_ALLOCATED) { // Looks like a valid AsanChunk in use, poison redzones only. PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); FastPoisonShadowPartialRightRedzone( end_aligned_down, end - end_aligned_down, chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); } else { // This is either not an AsanChunk or freed or quarantined AsanChunk. // In either case, poison everything. PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); } } void ReInitialize(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { allocator.ForceLock(); allocator.ForEachChunk( [](uptr chunk, void *alloc) { ((Allocator *)alloc)->RePoisonChunk(chunk); }, this); allocator.ForceUnlock(); } } void GetOptions(AllocatorOptions *options) const { options->quarantine_size_mb = quarantine.GetSize() >> 20; options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); options->may_return_null = AllocatorMayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); } // -------------------- Helper methods. ------------------------- uptr ComputeRZLog(uptr user_requested_size) { u32 rz_log = user_requested_size <= 64 - 16 ? 0 : user_requested_size <= 128 - 32 ? 1 : user_requested_size <= 512 - 64 ? 2 : user_requested_size <= 4096 - 128 ? 3 : user_requested_size <= (1 << 14) - 256 ? 4 : user_requested_size <= (1 << 15) - 512 ? 5 : user_requested_size <= (1 << 16) - 1024 ? 6 : 7; u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); } // We have an address between two chunks, and we want to report just one. AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, AsanChunk *right_chunk) { // Prefer an allocated chunk over freed chunk and freed chunk // over available chunk. if (left_chunk->chunk_state != right_chunk->chunk_state) { if (left_chunk->chunk_state == CHUNK_ALLOCATED) return left_chunk; if (right_chunk->chunk_state == CHUNK_ALLOCATED) return right_chunk; if (left_chunk->chunk_state == CHUNK_QUARANTINE) return left_chunk; if (right_chunk->chunk_state == CHUNK_QUARANTINE) return right_chunk; } // Same chunk_state: choose based on offset. sptr l_offset = 0, r_offset = 0; CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); if (l_offset < r_offset) return left_chunk; return right_chunk; } // -------------------- Allocation/Deallocation routines --------------- void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) return AsanAllocator::FailureHandler::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; if (alignment < min_alignment) alignment = min_alignment; if (size == 0) { // We'd be happy to avoid allocating memory for zero-size requests, but // some programs/tests depend on this behavior and assume that malloc // would not return NULL even for zero-size allocations. Moreover, it // looks like operator new should never return NULL, and results of // consecutive "new" calls must be different even if the allocated size // is zero. size = 1; } CHECK(IsPowerOfTwo(alignment)); uptr rz_log = ComputeRZLog(size); uptr rz_size = RZLog2Size(rz_log); uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); uptr needed_size = rounded_size + rz_size; if (alignment > min_alignment) needed_size += alignment; bool using_primary_allocator = true; // If we are allocating from the secondary allocator, there will be no // automatic right redzone, so add the right redzone manually. if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { needed_size += rz_size; using_primary_allocator = false; } CHECK(IsAligned(needed_size, min_alignment)); if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); return AsanAllocator::FailureHandler::OnBadRequest(); } AsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocated = allocator.Allocate(cache, needed_size, 8); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8); } if (!allocated) return nullptr; if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned // chunk. This is possible if CanPoisonMemory() was false for some // time, for example, due to flags()->start_disabled. // Anyway, poison the block before using it for anything else. uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); } uptr alloc_beg = reinterpret_cast(allocated); uptr alloc_end = alloc_beg + needed_size; uptr beg_plus_redzone = alloc_beg + rz_size; uptr user_beg = beg_plus_redzone; if (!IsAligned(user_beg, alignment)) user_beg = RoundUpTo(user_beg, alignment); uptr user_end = user_beg + size; CHECK_LE(user_end, alloc_end); uptr chunk_beg = user_beg - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); m->alloc_type = alloc_type; m->rz_log = rz_log; u32 alloc_tid = t ? t->tid() : 0; m->alloc_tid = alloc_tid; CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? m->free_tid = kInvalidTid; m->from_memalign = user_beg != beg_plus_redzone; if (alloc_beg != chunk_beg) { CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; reinterpret_cast(alloc_beg)[1] = chunk_beg; } if (using_primary_allocator) { CHECK(size); m->user_requested_size = size; CHECK(allocator.FromPrimary(allocated)); } else { CHECK(!allocator.FromPrimary(allocated)); m->user_requested_size = SizeClassMap::kMaxSize; uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta[0] = size; meta[1] = chunk_beg; } m->alloc_context_id = StackDepotPut(*stack); uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); // Unpoison the bulk of the memory region. if (size_rounded_down_to_granularity) PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); // Deal with the end of the region if size is not aligned to granularity. if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { u8 *shadow = (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; } AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; if (needed_size > SizeClassMap::kMaxSize) thread_stats.malloc_large++; else thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; void *res = reinterpret_cast(user_beg); if (can_fill && fl.max_malloc_fill_size) { uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); REAL(memset)(res, fl.malloc_fill_byte, fill_size); } #if CAN_SANITIZE_LEAKS m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored : __lsan::kDirectlyLeaked; #endif // Must be the last mutation of metadata in this function. atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); ASAN_MALLOC_HOOK(res, size); return res; } // Set quarantine flag if chunk is allocated, issue ASan error report on // available and quarantined chunks. Return true on success, false otherwise. bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, CHUNK_QUARANTINE, memory_order_acquire)) { ReportInvalidFree(ptr, old_chunk_state, stack); // It's not safe to push a chunk in quarantine on invalid free. return false; } CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); return true; } // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); CHECK_GE(m->alloc_tid, 0); if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. CHECK_EQ(m->free_tid, kInvalidTid); AsanThread *t = GetCurrentThread(); m->free_tid = t ? t->tid() : 0; m->free_context_id = StackDepotPut(*stack); Flags &fl = *flags(); if (fl.max_free_fill_size > 0) { // We have to skip the chunk header, it contains free_context_id. uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); } } // Poison the region. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapFreeMagic); AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.frees++; thread_stats.freed += m->UsedSize(); // Push into quarantine. if (t) { AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m, m->UsedSize()); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *ac = &fallback_allocator_cache; quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, m->UsedSize()); } } void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, AllocType alloc_type) { uptr p = reinterpret_cast(ptr); if (p == 0) return; uptr chunk_beg = p - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); // On Windows, uninstrumented DLLs may allocate memory before ASan hooks // malloc. Don't report an invalid free in this case. if (SANITIZER_WINDOWS && !get_allocator().PointerIsMine(ptr)) { if (!IsSystemHeapAddress(p)) ReportFreeNotMalloced(p, stack); return; } ASAN_FREE_HOOK(ptr); // Must mark the chunk as quarantined before any changes to its metadata. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; if (m->alloc_type != alloc_type) { if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, (AllocType)alloc_type); } } if (delete_size && flags()->new_delete_type_mismatch && delete_size != m->UsedSize()) { ReportNewDeleteSizeMismatch(p, delete_size, stack); } QuarantineChunk(m, ptr, stack); } void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { CHECK(old_ptr && new_size); uptr p = reinterpret_cast(old_ptr); uptr chunk_beg = p - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.reallocs++; thread_stats.realloced += new_size; void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { u8 chunk_state = m->chunk_state; if (chunk_state != CHUNK_ALLOCATED) ReportInvalidFree(old_ptr, chunk_state, stack); CHECK_NE(REAL(memcpy), nullptr); uptr memcpy_size = Min(new_size, m->UsedSize()); // If realloc() races with free(), we may start copying freed memory. // However, we will report racy double-free later anyway. REAL(memcpy)(new_ptr, old_ptr, memcpy_size); Deallocate(old_ptr, 0, stack, FROM_MALLOC); } return new_ptr; } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (CheckForCallocOverflow(size, nmemb)) return AsanAllocator::FailureHandler::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. if (ptr && allocator.FromPrimary(ptr)) REAL(memset)(ptr, 0, nmemb * size); return ptr; } void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else ReportFreeNotMalloced((uptr)ptr, stack); } void CommitBack(AsanThreadLocalMallocStorage *ms) { AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac)); allocator.SwallowCache(ac); } // -------------------------- Chunk lookup ---------------------- // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; if (!allocator.FromPrimary(alloc_beg)) { uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); AsanChunk *m = reinterpret_cast(meta[1]); return m; } uptr *alloc_magic = reinterpret_cast(alloc_beg); if (alloc_magic[0] == kAllocBegMagic) return reinterpret_cast(alloc_magic[1]); return reinterpret_cast(alloc_beg); } AsanChunk *GetAsanChunkByAddr(uptr p) { void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } // Allocator must be locked when this function is called. AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { void *alloc_beg = allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } uptr AllocationSize(uptr p) { AsanChunk *m = GetAsanChunkByAddr(p); if (!m) return 0; if (m->chunk_state != CHUNK_ALLOCATED) return 0; if (m->Beg() != p) return 0; return m->UsedSize(); } AsanChunkView FindHeapChunkByAddress(uptr addr) { AsanChunk *m1 = GetAsanChunkByAddr(addr); if (!m1) return AsanChunkView(m1); sptr offset = 0; if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { // The address is in the chunk's left redzone, so maybe it is actually // a right buffer overflow from the other chunk to the left. // Search a bit to the left to see if there is another chunk. AsanChunk *m2 = nullptr; for (uptr l = 1; l < GetPageSizeCached(); l++) { m2 = GetAsanChunkByAddr(addr - l); if (m2 == m1) continue; // Still the same chunk. break; } if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) m1 = ChooseChunk(addr, m2, m1); } return AsanChunkView(m1); } void PrintStats() { allocator.PrintStats(); quarantine.PrintStats(); } void ForceLock() { allocator.ForceLock(); fallback_mutex.Lock(); } void ForceUnlock() { fallback_mutex.Unlock(); allocator.ForceUnlock(); } }; static Allocator instance(LINKER_INITIALIZED); static AsanAllocator &get_allocator() { return instance.allocator; } bool AsanChunkView::IsValid() const { return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; } bool AsanChunkView::IsAllocated() const { return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; } bool AsanChunkView::IsQuarantined() const { return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; } uptr AsanChunkView::Beg() const { return chunk_->Beg(); } uptr AsanChunkView::End() const { return Beg() + UsedSize(); } uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } AllocType AsanChunkView::GetAllocType() const { return (AllocType)chunk_->alloc_type; } static StackTrace GetStackTraceFromId(u32 id) { CHECK(id); StackTrace res = StackDepotGet(id); CHECK(res.trace); return res; } u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } StackTrace AsanChunkView::GetAllocStack() const { return GetStackTraceFromId(GetAllocStackId()); } StackTrace AsanChunkView::GetFreeStack() const { return GetStackTraceFromId(GetFreeStackId()); } void InitializeAllocator(const AllocatorOptions &options) { instance.Initialize(options); } void ReInitializeAllocator(const AllocatorOptions &options) { instance.ReInitialize(options); } void GetAllocatorOptions(AllocatorOptions *options) { instance.GetOptions(options); } AsanChunkView FindHeapChunkByAddress(uptr addr) { return instance.FindHeapChunkByAddress(addr); } AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { return AsanChunkView(instance.GetAsanChunk(reinterpret_cast(addr))); } void AsanThreadLocalMallocStorage::CommitBack() { instance.CommitBack(this); } void PrintInternalAllocatorStats() { instance.PrintStats(); } -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - return instance.Allocate(size, alignment, stack, alloc_type, true); -} - void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { instance.Deallocate(ptr, 0, stack, alloc_type); } void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { instance.Deallocate(ptr, size, stack, alloc_type); } void *asan_malloc(uptr size, BufferedStackTrace *stack) { - return instance.Allocate(size, 8, stack, FROM_MALLOC, true); + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); } void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - return instance.Calloc(nmemb, size, stack); + return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); } void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { if (!p) - return instance.Allocate(size, 8, stack, FROM_MALLOC, true); + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); if (size == 0) { if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { instance.Deallocate(p, 0, stack, FROM_MALLOC); return nullptr; } // Allocate a size of 1 if we shouldn't free() on Realloc to 0 size = 1; } - return instance.Reallocate(p, size, stack); + return SetErrnoOnNull(instance.Reallocate(p, size, stack)); } void *asan_valloc(uptr size, BufferedStackTrace *stack) { - return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); + return SetErrnoOnNull( + instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); } void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull( + instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + return AsanAllocator::FailureHandler::OnBadRequest(); } - return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true); + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, alloc_type, true)); } int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + AsanAllocator::FailureHandler::OnBadRequest(); + return errno_EINVAL; + } void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + if (UNLIKELY(!ptr)) + return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; return 0; } uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { if (!ptr) return 0; uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); if (flags()->check_malloc_usable_size && (usable_size == 0)) { GET_STACK_TRACE_FATAL(pc, bp); ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); } return usable_size; } uptr asan_mz_size(const void *ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } void asan_mz_force_lock() { instance.ForceLock(); } void asan_mz_force_unlock() { instance.ForceUnlock(); } void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { instance.SetRssLimitExceeded(limit_exceeded); } } // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { void LockAllocator() { __asan::get_allocator().ForceLock(); } void UnlockAllocator() { __asan::get_allocator().ForceUnlock(); } void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *begin = (uptr)&__asan::get_allocator(); *end = *begin + sizeof(__asan::get_allocator()); } uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); if (!m) return 0; uptr chunk = m->Beg(); if (m->chunk_state != __asan::CHUNK_ALLOCATED) return 0; if (m->AddrIsInside(addr, /*locked_version=*/true)) return chunk; if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), addr)) return chunk; return 0; } uptr GetUserBegin(uptr chunk) { __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); CHECK(m); return m->Beg(); } LsanMetadata::LsanMetadata(uptr chunk) { metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); } bool LsanMetadata::allocated() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->chunk_state == __asan::CHUNK_ALLOCATED; } ChunkTag LsanMetadata::tag() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return static_cast(m->lsan_tag); } void LsanMetadata::set_tag(ChunkTag value) { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); m->lsan_tag = value; } uptr LsanMetadata::requested_size() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->UsedSize(/*locked_version=*/true); } u32 LsanMetadata::stack_trace_id() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->alloc_context_id; } void ForEachChunk(ForEachChunkCallback callback, void *arg) { __asan::get_allocator().ForEachChunk(callback, arg); } IgnoreObjectResult IgnoreObjectLocked(const void *p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); if (!m) return kIgnoreObjectInvalid; if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { if (m->lsan_tag == kIgnored) return kIgnoreObjectAlreadyIgnored; m->lsan_tag = __lsan::kIgnored; return kIgnoreObjectSuccess; } else { return kIgnoreObjectInvalid; } } } // namespace __lsan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT // ASan allocator doesn't reserve extra bytes, so normally we would // just return "size". We don't want to expose our redzone sizes, etc here. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *p) { uptr ptr = reinterpret_cast(p); return instance.AllocationSize(ptr) > 0; } uptr __sanitizer_get_allocated_size(const void *p) { if (!p) return 0; uptr ptr = reinterpret_cast(p); uptr allocated_size = instance.AllocationSize(ptr); // Die if p is not malloced or if it is already freed. if (allocated_size == 0) { GET_STACK_TRACE_FATAL_HERE; ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); } return allocated_size; } #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default (no-op) implementation of malloc hooks. SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr, uptr size) { (void)ptr; (void)size; } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { (void)ptr; } #endif diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc index ed12a9ac9015..34ca22b8616e 100644 --- a/lib/asan/asan_interceptors.cc +++ b/lib/asan/asan_interceptors.cc @@ -1,783 +1,787 @@ //===-- asan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Intercept various libc functions. //===----------------------------------------------------------------------===// #include "asan_interceptors.h" #include "asan_allocator.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_suppressions.h" #include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_libc.h" #if SANITIZER_POSIX #include "sanitizer_common/sanitizer_posix.h" #endif #if defined(__i386) && SANITIZER_LINUX #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" #elif defined(__mips__) && SANITIZER_LINUX #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" #endif namespace __asan { // Return true if we can quickly decide that the region is unpoisoned. // We assume that a redzone is at least 16 bytes. static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { if (size == 0) return true; if (size <= 32) return !AddressIsPoisoned(beg) && !AddressIsPoisoned(beg + size - 1) && !AddressIsPoisoned(beg + size / 2); if (size <= 64) return !AddressIsPoisoned(beg) && !AddressIsPoisoned(beg + size / 4) && !AddressIsPoisoned(beg + size - 1) && !AddressIsPoisoned(beg + 3 * size / 4) && !AddressIsPoisoned(beg + size / 2); return false; } struct AsanInterceptorContext { const char *interceptor_name; }; // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // and ASAN_WRITE_RANGE as macro instead of function so // that no extra frames are created, and stack trace contains // relevant information only. // We check all shadow bytes. #define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ uptr __offset = (uptr)(offset); \ uptr __size = (uptr)(size); \ uptr __bad = 0; \ if (__offset > __offset + __size) { \ GET_STACK_TRACE_FATAL_HERE; \ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \ } \ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ (__bad = __asan_region_is_poisoned(__offset, __size))) { \ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ bool suppressed = false; \ if (_ctx) { \ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ if (!suppressed && HaveStackTraceBasedSuppressions()) { \ GET_STACK_TRACE_FATAL_HERE; \ suppressed = IsStackTraceSuppressed(&stack); \ } \ } \ if (!suppressed) { \ GET_CURRENT_PC_BP_SP; \ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\ } \ } \ } while (0) // memcpy is called during __asan_init() from the internals of printf(...). // We do not treat memcpy with to==from as a bug. // See http://llvm.org/bugs/show_bug.cgi?id=11763. #define ASAN_MEMCPY_IMPL(ctx, to, from, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ if (asan_init_is_running) { \ return REAL(memcpy)(to, from, size); \ } \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ if (to != from) { \ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ } \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ } \ return REAL(memcpy)(to, from, size); \ } while (0) // memset is called inside Printf. #define ASAN_MEMSET_IMPL(ctx, block, c, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ if (asan_init_is_running) { \ return REAL(memset)(block, c, size); \ } \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ ASAN_WRITE_RANGE(ctx, block, size); \ } \ return REAL(memset)(block, c, size); \ } while (0) #define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ } \ return internal_memmove(to, from, size); \ } while (0) #define ASAN_READ_RANGE(ctx, offset, size) \ ACCESS_MEMORY_RANGE(ctx, offset, size, false) #define ASAN_WRITE_RANGE(ctx, offset, size) \ ACCESS_MEMORY_RANGE(ctx, offset, size, true) #define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ ASAN_READ_RANGE((ctx), (s), \ common_flags()->strict_string_checks ? (len) + 1 : (n)) #define ASAN_READ_STRING(ctx, s, n) \ ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) // Behavior of functions like "memcpy" or "strcpy" is undefined // if memory intervals overlap. We report error in this case. // Macro is used to avoid creation of new frames. static inline bool RangesOverlap(const char *offset1, uptr length1, const char *offset2, uptr length2) { return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); } #define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \ const char *offset1 = (const char*)_offset1; \ const char *offset2 = (const char*)_offset2; \ if (RangesOverlap(offset1, length1, offset2, length2)) { \ GET_STACK_TRACE_FATAL_HERE; \ ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ offset2, length2, &stack); \ } \ } while (0) static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if SANITIZER_INTERCEPT_STRNLEN if (REAL(strnlen)) { return REAL(strnlen)(s, maxlen); } #endif return internal_strnlen(s, maxlen); } void SetThreadName(const char *name) { AsanThread *t = GetCurrentThread(); if (t) asanThreadRegistry().SetThreadName(t->tid(), name); } int OnExit() { + if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks && + __lsan::HasReportedLeaks()) { + return common_flags()->exitcode; + } // FIXME: ask frontend whether we need to return failure. return 0; } } // namespace __asan // ---------------------- Wrappers ---------------- {{{1 using namespace __asan; // NOLINT DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) #define ASAN_INTERCEPTOR_ENTER(ctx, func) \ AsanInterceptorContext _ctx = {#func}; \ ctx = (void *)&_ctx; \ (void) ctx; \ #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ ASAN_INTERCEPT_FUNC_VER(name, ver) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ ASAN_WRITE_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ ASAN_READ_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ ASAN_INTERCEPTOR_ENTER(ctx, func); \ do { \ if (asan_init_is_running) \ return REAL(func)(__VA_ARGS__); \ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ return REAL(func)(__VA_ARGS__); \ ENSURE_ASAN_INITED(); \ } while (false) #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) // Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) // But asan does not remember UserId's for threads (pthread_t); // and remembers all ever existed threads, so the linear search by UserId // can be slow. #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ do { \ } while (false) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) // Strict init-order checking is dlopen-hostile: // https://github.com/google/sanitizers/issues/178 #define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ do { \ if (flags()->strict_init_order) \ StopInitOrderChecking(); \ CheckNoDeepBind(filename, flag); \ } while (false) #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (AsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ *end = t->tls_end(); \ } else { \ *begin = *end = 0; \ } #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \ } while (false) #define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \ ASAN_MEMCPY_IMPL(ctx, to, from, size); \ } while (false) #define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memset); \ ASAN_MEMSET_IMPL(ctx, block, c, size); \ } while (false) #include "sanitizer_common/sanitizer_common_interceptors.inc" // Syscall interceptors don't have contexts, we don't support suppressions // for them. #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" struct ThreadStartParam { atomic_uintptr_t t; atomic_uintptr_t is_registered; }; #if ASAN_INTERCEPT_PTHREAD_CREATE static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { ThreadStartParam *param = reinterpret_cast(arg); AsanThread *t = nullptr; while ((t = reinterpret_cast( atomic_load(¶m->t, memory_order_acquire))) == nullptr) internal_sched_yield(); SetCurrentThread(t); return t->ThreadStart(GetTid(), ¶m->is_registered); } INTERCEPTOR(int, pthread_create, void *thread, void *attr, void *(*start_routine)(void*), void *arg) { EnsureMainThreadIDIsCorrect(); // Strict init-order checking is thread-hostile. if (flags()->strict_init_order) StopInitOrderChecking(); GET_STACK_TRACE_THREAD; int detached = 0; if (attr) REAL(pthread_attr_getdetachstate)(attr, &detached); ThreadStartParam param; atomic_store(¶m.t, 0, memory_order_relaxed); atomic_store(¶m.is_registered, 0, memory_order_relaxed); int result; { // Ignore all allocations made by pthread_create: thread stack/TLS may be // stored by pthread for future reuse even after thread destruction, and // the linked list it's stored in doesn't even hold valid pointers to the // objects, the latter are calculated by obscure pointer arithmetic. #if CAN_SANITIZE_LEAKS __lsan::ScopedInterceptorDisabler disabler; #endif result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); } if (result == 0) { u32 current_tid = GetCurrentTidOrInvalid(); AsanThread *t = AsanThread::Create(start_routine, arg, current_tid, &stack, detached); atomic_store(¶m.t, reinterpret_cast(t), memory_order_release); // Wait until the AsanThread object is initialized and the ThreadRegistry // entry is in "started" state. One reason for this is that after this // interceptor exits, the child thread's stack may be the only thing holding // the |arg| pointer. This may cause LSan to report a leak if leak checking // happens at a point when the interceptor has already exited, but the stack // range for the child thread is not yet known. while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) internal_sched_yield(); } return result; } INTERCEPTOR(int, pthread_join, void *t, void **arg) { return real_pthread_join(t, arg); } DEFINE_REAL_PTHREAD_FUNCTIONS #endif // ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if SANITIZER_ANDROID INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { if (GetHandleSignalMode(signum) != kHandleSignalExclusive) return REAL(bsd_signal)(signum, handler); return 0; } #endif INTERCEPTOR(void*, signal, int signum, void *handler) { if (GetHandleSignalMode(signum) != kHandleSignalExclusive) return REAL(signal)(signum, handler); return nullptr; } INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) { if (GetHandleSignalMode(signum) != kHandleSignalExclusive) return REAL(sigaction)(signum, act, oldact); return 0; } namespace __sanitizer { int real_sigaction(int signum, const void *act, void *oldact) { return REAL(sigaction)(signum, (const struct sigaction *)act, (struct sigaction *)oldact); } } // namespace __sanitizer #elif SANITIZER_POSIX // We need to have defined REAL(sigaction) on posix systems. DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) #endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if ASAN_INTERCEPT_SWAPCONTEXT static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { // Align to page size. uptr PageSize = GetPageSizeCached(); uptr bottom = stack & ~(PageSize - 1); ssize += stack - bottom; ssize = RoundUpTo(ssize, PageSize); static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { PoisonShadow(bottom, ssize, 0); } } INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, struct ucontext_t *ucp) { static bool reported_warning = false; if (!reported_warning) { Report("WARNING: ASan doesn't fully support makecontext/swapcontext " "functions and may produce false positives in some cases!\n"); reported_warning = true; } // Clear shadow memory for new context (it may share stack // with current context). uptr stack, ssize; ReadContextStack(ucp, &stack, &ssize); ClearShadowMemoryForContextStack(stack, ssize); int res = REAL(swapcontext)(oucp, ucp); // swapcontext technically does not return, but program may swap context to // "oucp" later, that would look as if swapcontext() returned 0. // We need to clear shadow for ucp once again, as it may be in arbitrary // state. ClearShadowMemoryForContextStack(stack, ssize); return res; } #endif // ASAN_INTERCEPT_SWAPCONTEXT INTERCEPTOR(void, longjmp, void *env, int val) { __asan_handle_no_return(); REAL(longjmp)(env, val); } #if ASAN_INTERCEPT__LONGJMP INTERCEPTOR(void, _longjmp, void *env, int val) { __asan_handle_no_return(); REAL(_longjmp)(env, val); } #endif #if ASAN_INTERCEPT___LONGJMP_CHK INTERCEPTOR(void, __longjmp_chk, void *env, int val) { __asan_handle_no_return(); REAL(__longjmp_chk)(env, val); } #endif #if ASAN_INTERCEPT_SIGLONGJMP INTERCEPTOR(void, siglongjmp, void *env, int val) { __asan_handle_no_return(); REAL(siglongjmp)(env, val); } #endif #if ASAN_INTERCEPT___CXA_THROW INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { CHECK(REAL(__cxa_throw)); __asan_handle_no_return(); REAL(__cxa_throw)(a, b, c); } #endif void *__asan_memcpy(void *to, const void *from, uptr size) { ASAN_MEMCPY_IMPL(nullptr, to, from, size); } void *__asan_memset(void *block, int c, uptr size) { ASAN_MEMSET_IMPL(nullptr, block, c, size); } void *__asan_memmove(void *to, const void *from, uptr size) { ASAN_MEMMOVE_IMPL(nullptr, to, from, size); } #if ASAN_INTERCEPT_INDEX # if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX INTERCEPTOR(char*, index, const char *string, int c) ALIAS(WRAPPER_NAME(strchr)); # else # if SANITIZER_MAC DECLARE_REAL(char*, index, const char *string, int c) OVERRIDE_FUNCTION(index, strchr); # else DEFINE_REAL(char*, index, const char *string, int c) # endif # endif #endif // ASAN_INTERCEPT_INDEX // For both strcat() and strncat() we need to check the validity of |to| // argument irrespective of the |from| length. INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = REAL(strlen)(from); ASAN_READ_RANGE(ctx, from, from_length + 1); uptr to_length = REAL(strlen)(to); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); // If the copying actually happens, the |from| string should not overlap // with the resulting string starting at |to|, which has a length of // to_length + from_length + 1. if (from_length > 0) { CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, from, from_length + 1); } } return REAL(strcat)(to, from); // NOLINT } INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strncat); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = MaybeRealStrnlen(from, size); uptr copy_length = Min(size, from_length + 1); ASAN_READ_RANGE(ctx, from, copy_length); uptr to_length = REAL(strlen)(to); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); if (from_length > 0) { CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, from, copy_length); } } return REAL(strncat)(to, from, size); } INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT #endif // strcpy is called from malloc_default_purgeable_zone() // in __asan::ReplaceSystemAlloc() on Mac. if (asan_init_is_running) { return REAL(strcpy)(to, from); // NOLINT } ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = REAL(strlen)(from) + 1; CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); ASAN_READ_RANGE(ctx, from, from_size); ASAN_WRITE_RANGE(ctx, to, from_size); } return REAL(strcpy)(to, from); // NOLINT } INTERCEPTOR(char*, strdup, const char *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); REAL(memcpy)(new_mem, s, length + 1); return reinterpret_cast(new_mem); } #if ASAN_INTERCEPT___STRDUP INTERCEPTOR(char*, __strdup, const char *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); REAL(memcpy)(new_mem, s, length + 1); return reinterpret_cast(new_mem); } #endif // ASAN_INTERCEPT___STRDUP INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strncpy); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); ASAN_READ_RANGE(ctx, from, from_size); ASAN_WRITE_RANGE(ctx, to, size); } return REAL(strncpy)(to, from, size); } INTERCEPTOR(long, strtol, const char *nptr, // NOLINT char **endptr, int base) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strtol); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtol)(nptr, endptr, base); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(int, atoi, const char *nptr) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atoi); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); #endif ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoi)(nptr); } char *real_endptr; // "man atoi" tells that behavior of atoi(nptr) is the same as // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the // parsed integer can't be stored in *long* type (even if it's // different from int). So, we just imitate this behavior. int result = REAL(strtol)(nptr, &real_endptr, 10); FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } INTERCEPTOR(long, atol, const char *nptr) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atol); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); #endif ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atol)(nptr); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT char **endptr, int base) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strtoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtoll)(nptr, endptr, base); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoll)(nptr); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL #if ASAN_INTERCEPT___CXA_ATEXIT static void AtCxaAtexit(void *unused) { (void)unused; StopInitOrderChecking(); } INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, void *dso_handle) { #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); #endif ENSURE_ASAN_INITED(); int res = REAL(__cxa_atexit)(func, arg, dso_handle); REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); return res; } #endif // ASAN_INTERCEPT___CXA_ATEXIT #if ASAN_INTERCEPT_FORK INTERCEPTOR(int, fork, void) { ENSURE_ASAN_INITED(); int pid = REAL(fork)(); return pid; } #endif // ASAN_INTERCEPT_FORK // ---------------------- InitializeAsanInterceptors ---------------- {{{1 namespace __asan { void InitializeAsanInterceptors() { static bool was_called_once; CHECK(!was_called_once); was_called_once = true; InitializeCommonInterceptors(); // Intercept str* functions. ASAN_INTERCEPT_FUNC(strcat); // NOLINT ASAN_INTERCEPT_FUNC(strcpy); // NOLINT ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncpy); ASAN_INTERCEPT_FUNC(strdup); #if ASAN_INTERCEPT___STRDUP ASAN_INTERCEPT_FUNC(__strdup); #endif #if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX ASAN_INTERCEPT_FUNC(index); #endif ASAN_INTERCEPT_FUNC(atoi); ASAN_INTERCEPT_FUNC(atol); ASAN_INTERCEPT_FUNC(strtol); #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL ASAN_INTERCEPT_FUNC(atoll); ASAN_INTERCEPT_FUNC(strtoll); #endif // Intecept signal- and jump-related functions. ASAN_INTERCEPT_FUNC(longjmp); #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION ASAN_INTERCEPT_FUNC(sigaction); #if SANITIZER_ANDROID ASAN_INTERCEPT_FUNC(bsd_signal); #endif ASAN_INTERCEPT_FUNC(signal); #endif #if ASAN_INTERCEPT_SWAPCONTEXT ASAN_INTERCEPT_FUNC(swapcontext); #endif #if ASAN_INTERCEPT__LONGJMP ASAN_INTERCEPT_FUNC(_longjmp); #endif #if ASAN_INTERCEPT___LONGJMP_CHK ASAN_INTERCEPT_FUNC(__longjmp_chk); #endif #if ASAN_INTERCEPT_SIGLONGJMP ASAN_INTERCEPT_FUNC(siglongjmp); #endif // Intercept exception handling functions. #if ASAN_INTERCEPT___CXA_THROW ASAN_INTERCEPT_FUNC(__cxa_throw); #endif // Intercept threading-related functions #if ASAN_INTERCEPT_PTHREAD_CREATE #if defined(ASAN_PTHREAD_CREATE_VERSION) ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); #else ASAN_INTERCEPT_FUNC(pthread_create); #endif ASAN_INTERCEPT_FUNC(pthread_join); #endif // Intercept atexit function. #if ASAN_INTERCEPT___CXA_ATEXIT ASAN_INTERCEPT_FUNC(__cxa_atexit); #endif #if ASAN_INTERCEPT_FORK ASAN_INTERCEPT_FUNC(fork); #endif InitializePlatformInterceptors(); VReport(1, "AddressSanitizer: libc interceptors initialized\n"); } } // namespace __asan diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc index d0128e34de8d..7e9cf3babc67 100644 --- a/lib/asan/tests/asan_test.cc +++ b/lib/asan/tests/asan_test.cc @@ -1,1342 +1,1346 @@ //===-- asan_test.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // //===----------------------------------------------------------------------===// #include "asan_test_utils.h" +#include + NOINLINE void *malloc_fff(size_t size) { void *res = malloc/**/(size); break_optimization(0); return res;} NOINLINE void *malloc_eee(size_t size) { void *res = malloc_fff(size); break_optimization(0); return res;} NOINLINE void *malloc_ddd(size_t size) { void *res = malloc_eee(size); break_optimization(0); return res;} NOINLINE void *malloc_ccc(size_t size) { void *res = malloc_ddd(size); break_optimization(0); return res;} NOINLINE void *malloc_bbb(size_t size) { void *res = malloc_ccc(size); break_optimization(0); return res;} NOINLINE void *malloc_aaa(size_t size) { void *res = malloc_bbb(size); break_optimization(0); return res;} NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);} NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);} NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);} template NOINLINE void uaf_test(int size, int off) { void *p = malloc_aaa(size); free_aaa(p); for (int i = 1; i < 100; i++) free_aaa(malloc_aaa(i)); fprintf(stderr, "writing %ld byte(s) at %p with offset %d\n", (long)sizeof(T), p, off); asan_write((T *)((char *)p + off)); } TEST(AddressSanitizer, HasFeatureAddressSanitizerTest) { #if defined(__has_feature) && __has_feature(address_sanitizer) bool asan = 1; #elif defined(__SANITIZE_ADDRESS__) bool asan = 1; #else bool asan = 0; #endif EXPECT_EQ(true, asan); } TEST(AddressSanitizer, SimpleDeathTest) { EXPECT_DEATH(exit(1), ""); } TEST(AddressSanitizer, VariousMallocsTest) { int *a = (int*)malloc(100 * sizeof(int)); a[50] = 0; free(a); int *r = (int*)malloc(10); r = (int*)realloc(r, 2000 * sizeof(int)); r[1000] = 0; free(r); int *b = new int[100]; b[50] = 0; delete [] b; int *c = new int; *c = 0; delete c; #if SANITIZER_TEST_HAS_POSIX_MEMALIGN - int *pm; - int pm_res = posix_memalign((void**)&pm, kPageSize, kPageSize); + void *pm = 0; + // Valid allocation. + int pm_res = posix_memalign(&pm, kPageSize, kPageSize); EXPECT_EQ(0, pm_res); + EXPECT_NE(nullptr, pm); free(pm); #endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN #if SANITIZER_TEST_HAS_MEMALIGN int *ma = (int*)memalign(kPageSize, kPageSize); EXPECT_EQ(0U, (uintptr_t)ma % kPageSize); ma[123] = 0; free(ma); #endif // SANITIZER_TEST_HAS_MEMALIGN } TEST(AddressSanitizer, CallocTest) { int *a = (int*)calloc(100, sizeof(int)); EXPECT_EQ(0, a[10]); free(a); } TEST(AddressSanitizer, CallocReturnsZeroMem) { size_t sizes[] = {16, 1000, 10000, 100000, 2100000}; for (size_t s = 0; s < sizeof(sizes)/sizeof(sizes[0]); s++) { size_t size = sizes[s]; for (size_t iter = 0; iter < 5; iter++) { char *x = Ident((char*)calloc(1, size)); EXPECT_EQ(x[0], 0); EXPECT_EQ(x[size - 1], 0); EXPECT_EQ(x[size / 2], 0); EXPECT_EQ(x[size / 3], 0); EXPECT_EQ(x[size / 4], 0); memset(x, 0x42, size); free(Ident(x)); #if !defined(_WIN32) // FIXME: OOM on Windows. We should just make this a lit test // with quarantine size set to 1. free(Ident(malloc(Ident(1 << 27)))); // Try to drain the quarantine. #endif } } } // No valloc on Windows or Android. #if !defined(_WIN32) && !defined(__ANDROID__) TEST(AddressSanitizer, VallocTest) { void *a = valloc(100); EXPECT_EQ(0U, (uintptr_t)a % kPageSize); free(a); } #endif #if SANITIZER_TEST_HAS_PVALLOC TEST(AddressSanitizer, PvallocTest) { char *a = (char*)pvalloc(kPageSize + 100); EXPECT_EQ(0U, (uintptr_t)a % kPageSize); a[kPageSize + 101] = 1; // we should not report an error here. free(a); a = (char*)pvalloc(0); // pvalloc(0) should allocate at least one page. EXPECT_EQ(0U, (uintptr_t)a % kPageSize); a[101] = 1; // we should not report an error here. free(a); } #endif // SANITIZER_TEST_HAS_PVALLOC #if !defined(_WIN32) // FIXME: Use an equivalent of pthread_setspecific on Windows. void *TSDWorker(void *test_key) { if (test_key) { pthread_setspecific(*(pthread_key_t*)test_key, (void*)0xfeedface); } return NULL; } void TSDDestructor(void *tsd) { // Spawning a thread will check that the current thread id is not -1. pthread_t th; PTHREAD_CREATE(&th, NULL, TSDWorker, NULL); PTHREAD_JOIN(th, NULL); } // This tests triggers the thread-specific data destruction fiasco which occurs // if we don't manage the TSD destructors ourselves. We create a new pthread // key with a non-NULL destructor which is likely to be put after the destructor // of AsanThread in the list of destructors. // In this case the TSD for AsanThread will be destroyed before TSDDestructor // is called for the child thread, and a CHECK will fail when we call // pthread_create() to spawn the grandchild. TEST(AddressSanitizer, DISABLED_TSDTest) { pthread_t th; pthread_key_t test_key; pthread_key_create(&test_key, TSDDestructor); PTHREAD_CREATE(&th, NULL, TSDWorker, &test_key); PTHREAD_JOIN(th, NULL); pthread_key_delete(test_key); } #endif TEST(AddressSanitizer, UAF_char) { const char *uaf_string = "AddressSanitizer:.*heap-use-after-free"; EXPECT_DEATH(uaf_test(1, 0), uaf_string); EXPECT_DEATH(uaf_test(10, 0), uaf_string); EXPECT_DEATH(uaf_test(10, 10), uaf_string); EXPECT_DEATH(uaf_test(kLargeMalloc, 0), uaf_string); EXPECT_DEATH(uaf_test(kLargeMalloc, kLargeMalloc / 2), uaf_string); } TEST(AddressSanitizer, UAF_long_double) { if (sizeof(long double) == sizeof(double)) return; long double *p = Ident(new long double[10]); EXPECT_DEATH(Ident(p)[12] = 0, "WRITE of size 1[026]"); EXPECT_DEATH(Ident(p)[0] = Ident(p)[12], "READ of size 1[026]"); delete [] Ident(p); } #if !defined(_WIN32) struct Packed5 { int x; char c; } __attribute__((packed)); #else # pragma pack(push, 1) struct Packed5 { int x; char c; }; # pragma pack(pop) #endif TEST(AddressSanitizer, UAF_Packed5) { static_assert(sizeof(Packed5) == 5, "Please check the keywords used"); Packed5 *p = Ident(new Packed5[2]); EXPECT_DEATH(p[0] = p[3], "READ of size 5"); EXPECT_DEATH(p[3] = p[0], "WRITE of size 5"); delete [] Ident(p); } #if ASAN_HAS_BLACKLIST TEST(AddressSanitizer, IgnoreTest) { int *x = Ident(new int); delete Ident(x); *x = 0; } #endif // ASAN_HAS_BLACKLIST struct StructWithBitField { int bf1:1; int bf2:1; int bf3:1; int bf4:29; }; TEST(AddressSanitizer, BitFieldPositiveTest) { StructWithBitField *x = new StructWithBitField; delete Ident(x); EXPECT_DEATH(x->bf1 = 0, "use-after-free"); EXPECT_DEATH(x->bf2 = 0, "use-after-free"); EXPECT_DEATH(x->bf3 = 0, "use-after-free"); EXPECT_DEATH(x->bf4 = 0, "use-after-free"); } struct StructWithBitFields_8_24 { int a:8; int b:24; }; TEST(AddressSanitizer, BitFieldNegativeTest) { StructWithBitFields_8_24 *x = Ident(new StructWithBitFields_8_24); x->a = 0; x->b = 0; delete Ident(x); } #if ASAN_NEEDS_SEGV namespace { const char kSEGVCrash[] = "AddressSanitizer: SEGV on unknown address"; const char kOverriddenSigactionHandler[] = "Test sigaction handler\n"; const char kOverriddenSignalHandler[] = "Test signal handler\n"; TEST(AddressSanitizer, WildAddressTest) { char *c = (char*)0x123; EXPECT_DEATH(*c = 0, kSEGVCrash); } void my_sigaction_sighandler(int, siginfo_t*, void*) { fprintf(stderr, kOverriddenSigactionHandler); exit(1); } void my_signal_sighandler(int signum) { fprintf(stderr, kOverriddenSignalHandler); exit(1); } TEST(AddressSanitizer, SignalTest) { struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_sigaction = my_sigaction_sighandler; sigact.sa_flags = SA_SIGINFO; char *c = (char *)0x123; EXPECT_DEATH(*c = 0, kSEGVCrash); // ASan should allow to set sigaction()... EXPECT_EQ(0, sigaction(SIGSEGV, &sigact, 0)); #ifdef __APPLE__ EXPECT_EQ(0, sigaction(SIGBUS, &sigact, 0)); #endif EXPECT_DEATH(*c = 0, kOverriddenSigactionHandler); // ... and signal(). EXPECT_NE(SIG_ERR, signal(SIGSEGV, my_signal_sighandler)); EXPECT_DEATH(*c = 0, kOverriddenSignalHandler); } } // namespace #endif static void TestLargeMalloc(size_t size) { char buff[1024]; sprintf(buff, "is located 1 bytes to the left of %lu-byte", (long)size); EXPECT_DEATH(Ident((char*)malloc(size))[-1] = 0, buff); } TEST(AddressSanitizer, LargeMallocTest) { const int max_size = (SANITIZER_WORDSIZE == 32) ? 1 << 26 : 1 << 28; for (int i = 113; i < max_size; i = i * 2 + 13) { TestLargeMalloc(i); } } #if !GTEST_USES_SIMPLE_RE TEST(AddressSanitizer, HugeMallocTest) { if (SANITIZER_WORDSIZE != 64 || ASAN_AVOID_EXPENSIVE_TESTS) return; size_t n_megs = 4100; EXPECT_DEATH(Ident((char*)malloc(n_megs << 20))[-1] = 0, "is located 1 bytes to the left|" "AddressSanitizer failed to allocate"); } #endif #if SANITIZER_TEST_HAS_MEMALIGN void MemalignRun(size_t align, size_t size, int idx) { char *p = (char *)memalign(align, size); Ident(p)[idx] = 0; free(p); } TEST(AddressSanitizer, memalign) { for (int align = 16; align <= (1 << 23); align *= 2) { size_t size = align * 5; EXPECT_DEATH(MemalignRun(align, size, -1), "is located 1 bytes to the left"); EXPECT_DEATH(MemalignRun(align, size, size + 1), "is located 1 bytes to the right"); } } #endif // SANITIZER_TEST_HAS_MEMALIGN void *ManyThreadsWorker(void *a) { for (int iter = 0; iter < 100; iter++) { for (size_t size = 100; size < 2000; size *= 2) { free(Ident(malloc(size))); } } return 0; } #if !defined(__aarch64__) && !defined(__powerpc64__) // FIXME: Infinite loop in AArch64 (PR24389). // FIXME: Also occasional hang on powerpc. Maybe same problem as on AArch64? TEST(AddressSanitizer, ManyThreadsTest) { const size_t kNumThreads = (SANITIZER_WORDSIZE == 32 || ASAN_AVOID_EXPENSIVE_TESTS) ? 30 : 1000; pthread_t t[kNumThreads]; for (size_t i = 0; i < kNumThreads; i++) { PTHREAD_CREATE(&t[i], 0, ManyThreadsWorker, (void*)i); } for (size_t i = 0; i < kNumThreads; i++) { PTHREAD_JOIN(t[i], 0); } } #endif TEST(AddressSanitizer, ReallocTest) { const int kMinElem = 5; int *ptr = (int*)malloc(sizeof(int) * kMinElem); ptr[3] = 3; for (int i = 0; i < 10000; i++) { ptr = (int*)realloc(ptr, (my_rand() % 1000 + kMinElem) * sizeof(int)); EXPECT_EQ(3, ptr[3]); } free(ptr); // Realloc pointer returned by malloc(0). int *ptr2 = Ident((int*)malloc(0)); ptr2 = Ident((int*)realloc(ptr2, sizeof(*ptr2))); *ptr2 = 42; EXPECT_EQ(42, *ptr2); free(ptr2); } TEST(AddressSanitizer, ReallocFreedPointerTest) { void *ptr = Ident(malloc(42)); ASSERT_TRUE(NULL != ptr); free(ptr); EXPECT_DEATH(ptr = realloc(ptr, 77), "attempting double-free"); } TEST(AddressSanitizer, ReallocInvalidPointerTest) { void *ptr = Ident(malloc(42)); EXPECT_DEATH(ptr = realloc((int*)ptr + 1, 77), "attempting free.*not malloc"); free(ptr); } TEST(AddressSanitizer, ZeroSizeMallocTest) { // Test that malloc(0) and similar functions don't return NULL. void *ptr = Ident(malloc(0)); EXPECT_TRUE(NULL != ptr); free(ptr); #if SANITIZER_TEST_HAS_POSIX_MEMALIGN int pm_res = posix_memalign(&ptr, 1<<20, 0); EXPECT_EQ(0, pm_res); EXPECT_TRUE(NULL != ptr); free(ptr); #endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN int *int_ptr = new int[0]; int *int_ptr2 = new int[0]; EXPECT_TRUE(NULL != int_ptr); EXPECT_TRUE(NULL != int_ptr2); EXPECT_NE(int_ptr, int_ptr2); delete[] int_ptr; delete[] int_ptr2; } #if SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE static const char *kMallocUsableSizeErrorMsg = "AddressSanitizer: attempting to call malloc_usable_size()"; TEST(AddressSanitizer, MallocUsableSizeTest) { const size_t kArraySize = 100; char *array = Ident((char*)malloc(kArraySize)); int *int_ptr = Ident(new int); EXPECT_EQ(0U, malloc_usable_size(NULL)); EXPECT_EQ(kArraySize, malloc_usable_size(array)); EXPECT_EQ(sizeof(int), malloc_usable_size(int_ptr)); EXPECT_DEATH(malloc_usable_size((void*)0x123), kMallocUsableSizeErrorMsg); EXPECT_DEATH(malloc_usable_size(array + kArraySize / 2), kMallocUsableSizeErrorMsg); free(array); EXPECT_DEATH(malloc_usable_size(array), kMallocUsableSizeErrorMsg); delete int_ptr; } #endif // SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE void WrongFree() { int *x = (int*)malloc(100 * sizeof(int)); // Use the allocated memory, otherwise Clang will optimize it out. Ident(x); free(x + 1); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, WrongFreeTest) { EXPECT_DEATH(WrongFree(), ASAN_PCRE_DOTALL "ERROR: AddressSanitizer: attempting free.*not malloc" ".*is located 4 bytes inside of 400-byte region" ".*allocated by thread"); } #endif void DoubleFree() { int *x = (int*)malloc(100 * sizeof(int)); fprintf(stderr, "DoubleFree: x=%p\n", (void *)x); free(x); free(x); fprintf(stderr, "should have failed in the second free(%p)\n", (void *)x); abort(); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, DoubleFreeTest) { EXPECT_DEATH(DoubleFree(), ASAN_PCRE_DOTALL "ERROR: AddressSanitizer: attempting double-free" ".*is located 0 bytes inside of 400-byte region" ".*freed by thread T0 here" ".*previously allocated by thread T0 here"); } #endif template NOINLINE void SizedStackTest() { char a[kSize]; char *A = Ident((char*)&a); const char *expected_death = "AddressSanitizer: stack-buffer-"; for (size_t i = 0; i < kSize; i++) A[i] = i; EXPECT_DEATH(A[-1] = 0, expected_death); EXPECT_DEATH(A[-5] = 0, expected_death); EXPECT_DEATH(A[kSize] = 0, expected_death); EXPECT_DEATH(A[kSize + 1] = 0, expected_death); EXPECT_DEATH(A[kSize + 5] = 0, expected_death); if (kSize > 16) EXPECT_DEATH(A[kSize + 31] = 0, expected_death); } TEST(AddressSanitizer, SimpleStackTest) { SizedStackTest<1>(); SizedStackTest<2>(); SizedStackTest<3>(); SizedStackTest<4>(); SizedStackTest<5>(); SizedStackTest<6>(); SizedStackTest<7>(); SizedStackTest<16>(); SizedStackTest<25>(); SizedStackTest<34>(); SizedStackTest<43>(); SizedStackTest<51>(); SizedStackTest<62>(); SizedStackTest<64>(); SizedStackTest<128>(); } #if !defined(_WIN32) // FIXME: It's a bit hard to write multi-line death test expectations // in a portable way. Anyways, this should just be turned into a lit test. TEST(AddressSanitizer, ManyStackObjectsTest) { char XXX[10]; char YYY[20]; char ZZZ[30]; Ident(XXX); Ident(YYY); EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ"); } #endif #if 0 // This test requires online symbolizer. // Moved to lit_tests/stack-oob-frames.cc. // Reenable here once we have online symbolizer by default. NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { char d[4] = {0}; char *D = Ident(d); switch (frame) { case 3: a[5]++; break; case 2: b[5]++; break; case 1: c[5]++; break; case 0: D[5]++; break; } } NOINLINE static void Frame1(int frame, char *a, char *b) { char c[4] = {0}; Frame0(frame, a, b, c); break_optimization(0); } NOINLINE static void Frame2(int frame, char *a) { char b[4] = {0}; Frame1(frame, a, b); break_optimization(0); } NOINLINE static void Frame3(int frame) { char a[4] = {0}; Frame2(frame, a); break_optimization(0); } TEST(AddressSanitizer, GuiltyStackFrame0Test) { EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0"); } TEST(AddressSanitizer, GuiltyStackFrame1Test) { EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1"); } TEST(AddressSanitizer, GuiltyStackFrame2Test) { EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2"); } TEST(AddressSanitizer, GuiltyStackFrame3Test) { EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3"); } #endif NOINLINE void LongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; longjmp(buf, 1); } NOINLINE void TouchStackFunc() { int a[100]; // long array will intersect with redzones from LongJmpFunc1. int *A = Ident(a); for (int i = 0; i < 100; i++) A[i] = i*i; } // Test that we handle longjmp and do not report false positives on stack. TEST(AddressSanitizer, LongJmpTest) { static jmp_buf buf; if (!setjmp(buf)) { LongJmpFunc1(buf); } else { TouchStackFunc(); } } #if !defined(_WIN32) // Only basic longjmp is available on Windows. NOINLINE void UnderscopeLongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; _longjmp(buf, 1); } NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; siglongjmp(buf, 1); } #if !defined(__ANDROID__) && !defined(__arm__) && \ !defined(__aarch64__) && !defined(__mips__) && \ !defined(__mips64) && !defined(__s390__) NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; __builtin_longjmp((void**)buf, 1); } // Does not work on ARM: // https://github.com/google/sanitizers/issues/185 TEST(AddressSanitizer, BuiltinLongJmpTest) { static jmp_buf buf; if (!__builtin_setjmp((void**)buf)) { BuiltinLongJmpFunc1(buf); } else { TouchStackFunc(); } } #endif // !defined(__ANDROID__) && !defined(__arm__) && // !defined(__aarch64__) && !defined(__mips__) // !defined(__mips64) && !defined(__s390__) TEST(AddressSanitizer, UnderscopeLongJmpTest) { static jmp_buf buf; if (!_setjmp(buf)) { UnderscopeLongJmpFunc1(buf); } else { TouchStackFunc(); } } TEST(AddressSanitizer, SigLongJmpTest) { static sigjmp_buf buf; if (!sigsetjmp(buf, 1)) { SigLongJmpFunc1(buf); } else { TouchStackFunc(); } } #endif // FIXME: Why does clang-cl define __EXCEPTIONS? #if defined(__EXCEPTIONS) && !defined(_WIN32) NOINLINE void ThrowFunc() { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; ASAN_THROW(1); } TEST(AddressSanitizer, CxxExceptionTest) { if (ASAN_UAR) return; // TODO(kcc): this test crashes on 32-bit for some reason... if (SANITIZER_WORDSIZE == 32) return; try { ThrowFunc(); } catch(...) {} TouchStackFunc(); } #endif void *ThreadStackReuseFunc1(void *unused) { // create three red zones for these two stack objects. int a; int b; int *A = Ident(&a); int *B = Ident(&b); *A = *B; pthread_exit(0); return 0; } void *ThreadStackReuseFunc2(void *unused) { TouchStackFunc(); return 0; } #if !defined(__thumb__) TEST(AddressSanitizer, ThreadStackReuseTest) { pthread_t t; PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc1, 0); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc2, 0); PTHREAD_JOIN(t, 0); } #endif #if defined(__SSE2__) #include TEST(AddressSanitizer, Store128Test) { char *a = Ident((char*)malloc(Ident(12))); char *p = a; if (((uintptr_t)a % 16) != 0) p = a + 8; assert(((uintptr_t)p % 16) == 0); __m128i value_wide = _mm_set1_epi16(0x1234); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "AddressSanitizer: heap-buffer-overflow"); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "WRITE of size 16"); EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), "located 0 bytes to the right of 12-byte"); free(a); } #endif // FIXME: All tests that use this function should be turned into lit tests. string RightOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance >= 0); char expected_str[100]; sprintf(expected_str, ASAN_PCRE_DOTALL #if !GTEST_USES_SIMPLE_RE "buffer-overflow.*%s.*" #endif "located %d bytes to the right", #if !GTEST_USES_SIMPLE_RE is_write ? "WRITE" : "READ", #endif oob_distance); return string(expected_str); } string RightOOBWriteMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/true); } string RightOOBReadMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/false); } // FIXME: All tests that use this function should be turned into lit tests. string LeftOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, #if !GTEST_USES_SIMPLE_RE ASAN_PCRE_DOTALL "%s.*" #endif "located %d bytes to the left", #if !GTEST_USES_SIMPLE_RE is_write ? "WRITE" : "READ", #endif oob_distance); return string(expected_str); } string LeftOOBWriteMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/true); } string LeftOOBReadMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/false); } string LeftOOBAccessMessage(int oob_distance) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, "located %d bytes to the left", oob_distance); return string(expected_str); } char* MallocAndMemsetString(size_t size, char ch) { char *s = Ident((char*)malloc(size)); memset(s, ch, size); return s; } char* MallocAndMemsetString(size_t size) { return MallocAndMemsetString(size, 'z'); } #if defined(__linux__) && !defined(__ANDROID__) #define READ_TEST(READ_N_BYTES) \ char *x = new char[10]; \ int fd = open("/proc/self/stat", O_RDONLY); \ ASSERT_GT(fd, 0); \ EXPECT_DEATH(READ_N_BYTES, \ ASAN_PCRE_DOTALL \ "AddressSanitizer: heap-buffer-overflow" \ ".* is located 0 bytes to the right of 10-byte region"); \ close(fd); \ delete [] x; \ TEST(AddressSanitizer, pread) { READ_TEST(pread(fd, x, 15, 0)); } TEST(AddressSanitizer, pread64) { READ_TEST(pread64(fd, x, 15, 0)); } TEST(AddressSanitizer, read) { READ_TEST(read(fd, x, 15)); } #endif // defined(__linux__) && !defined(__ANDROID__) // This test case fails // Clang optimizes memcpy/memset calls which lead to unaligned access TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) { int size = Ident(4096); char *s = Ident((char*)malloc(size)); EXPECT_DEATH(memset(s + size - 1, 0, 2), RightOOBWriteMessage(0)); free(s); } NOINLINE static int LargeFunction(bool do_bad_access) { int *x = new int[100]; x[0]++; x[1]++; x[2]++; x[3]++; x[4]++; x[5]++; x[6]++; x[7]++; x[8]++; x[9]++; x[do_bad_access ? 100 : 0]++; int res = __LINE__; x[10]++; x[11]++; x[12]++; x[13]++; x[14]++; x[15]++; x[16]++; x[17]++; x[18]++; x[19]++; delete[] x; return res; } // Test the we have correct debug info for the failing instruction. // This test requires the in-process symbolizer to be enabled by default. TEST(AddressSanitizer, DISABLED_LargeFunctionSymbolizeTest) { int failing_line = LargeFunction(false); char expected_warning[128]; sprintf(expected_warning, "LargeFunction.*asan_test.*:%d", failing_line); EXPECT_DEATH(LargeFunction(true), expected_warning); } // Check that we unwind and symbolize correctly. TEST(AddressSanitizer, DISABLED_MallocFreeUnwindAndSymbolizeTest) { int *a = (int*)malloc_aaa(sizeof(int)); *a = 1; free_aaa(a); EXPECT_DEATH(*a = 1, "free_ccc.*free_bbb.*free_aaa.*" "malloc_fff.*malloc_eee.*malloc_ddd"); } static bool TryToSetThreadName(const char *name) { #if defined(__linux__) && defined(PR_SET_NAME) return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); #else return false; #endif } void *ThreadedTestAlloc(void *a) { EXPECT_EQ(true, TryToSetThreadName("AllocThr")); int **p = (int**)a; *p = new int; return 0; } void *ThreadedTestFree(void *a) { EXPECT_EQ(true, TryToSetThreadName("FreeThr")); int **p = (int**)a; delete *p; return 0; } void *ThreadedTestUse(void *a) { EXPECT_EQ(true, TryToSetThreadName("UseThr")); int **p = (int**)a; **p = 1; return 0; } void ThreadedTestSpawn() { pthread_t t; int *x; PTHREAD_CREATE(&t, 0, ThreadedTestAlloc, &x); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadedTestFree, &x); PTHREAD_JOIN(t, 0); PTHREAD_CREATE(&t, 0, ThreadedTestUse, &x); PTHREAD_JOIN(t, 0); } #if !defined(_WIN32) // FIXME: This should be a lit test. TEST(AddressSanitizer, ThreadedTest) { EXPECT_DEATH(ThreadedTestSpawn(), ASAN_PCRE_DOTALL "Thread T.*created" ".*Thread T.*created" ".*Thread T.*created"); } #endif void *ThreadedTestFunc(void *unused) { // Check if prctl(PR_SET_NAME) is supported. Return if not. if (!TryToSetThreadName("TestFunc")) return 0; EXPECT_DEATH(ThreadedTestSpawn(), ASAN_PCRE_DOTALL "WRITE .*thread T. .UseThr." ".*freed by thread T. .FreeThr. here:" ".*previously allocated by thread T. .AllocThr. here:" ".*Thread T. .UseThr. created by T.*TestFunc" ".*Thread T. .FreeThr. created by T" ".*Thread T. .AllocThr. created by T" ""); return 0; } TEST(AddressSanitizer, ThreadNamesTest) { // Run ThreadedTestFunc in a separate thread because it tries to set a // thread name and we don't want to change the main thread's name. pthread_t t; PTHREAD_CREATE(&t, 0, ThreadedTestFunc, 0); PTHREAD_JOIN(t, 0); } #if ASAN_NEEDS_SEGV TEST(AddressSanitizer, ShadowGapTest) { #if SANITIZER_WORDSIZE == 32 char *addr = (char*)0x22000000; #else # if defined(__powerpc64__) char *addr = (char*)0x024000800000; # elif defined(__s390x__) char *addr = (char*)0x11000000000000; # else char *addr = (char*)0x0000100000080000; # endif #endif EXPECT_DEATH(*addr = 1, "AddressSanitizer: (SEGV|BUS) on unknown"); } #endif // ASAN_NEEDS_SEGV extern "C" { NOINLINE static void UseThenFreeThenUse() { char *x = Ident((char*)malloc(8)); *x = 1; free_aaa(x); *x = 2; } } TEST(AddressSanitizer, UseThenFreeThenUseTest) { EXPECT_DEATH(UseThenFreeThenUse(), "freed by thread"); } TEST(AddressSanitizer, StrDupTest) { free(strdup(Ident("123"))); } // Currently we create and poison redzone at right of global variables. static char static110[110]; const char ConstGlob[7] = {1, 2, 3, 4, 5, 6, 7}; static const char StaticConstGlob[3] = {9, 8, 7}; TEST(AddressSanitizer, GlobalTest) { static char func_static15[15]; static char fs1[10]; static char fs2[10]; static char fs3[10]; glob5[Ident(0)] = 0; glob5[Ident(1)] = 0; glob5[Ident(2)] = 0; glob5[Ident(3)] = 0; glob5[Ident(4)] = 0; EXPECT_DEATH(glob5[Ident(5)] = 0, "0 bytes to the right of global variable.*glob5.* size 5"); EXPECT_DEATH(glob5[Ident(5+6)] = 0, "6 bytes to the right of global variable.*glob5.* size 5"); Ident(static110); // avoid optimizations static110[Ident(0)] = 0; static110[Ident(109)] = 0; EXPECT_DEATH(static110[Ident(110)] = 0, "0 bytes to the right of global variable"); EXPECT_DEATH(static110[Ident(110+7)] = 0, "7 bytes to the right of global variable"); Ident(func_static15); // avoid optimizations func_static15[Ident(0)] = 0; EXPECT_DEATH(func_static15[Ident(15)] = 0, "0 bytes to the right of global variable"); EXPECT_DEATH(func_static15[Ident(15 + 9)] = 0, "9 bytes to the right of global variable"); Ident(fs1); Ident(fs2); Ident(fs3); // We don't create left redzones, so this is not 100% guaranteed to fail. // But most likely will. EXPECT_DEATH(fs2[Ident(-1)] = 0, "is located.*of global variable"); EXPECT_DEATH(Ident(Ident(ConstGlob)[8]), "is located 1 bytes to the right of .*ConstGlob"); EXPECT_DEATH(Ident(Ident(StaticConstGlob)[5]), "is located 2 bytes to the right of .*StaticConstGlob"); // call stuff from another file. GlobalsTest(0); } TEST(AddressSanitizer, GlobalStringConstTest) { static const char *zoo = "FOOBAR123"; const char *p = Ident(zoo); EXPECT_DEATH(Ident(p[15]), "is ascii string 'FOOBAR123'"); } TEST(AddressSanitizer, FileNameInGlobalReportTest) { static char zoo[10]; const char *p = Ident(zoo); // The file name should be present in the report. EXPECT_DEATH(Ident(p[15]), "zoo.*asan_test."); } int *ReturnsPointerToALocalObject() { int a = 0; return Ident(&a); } #if ASAN_UAR == 1 TEST(AddressSanitizer, LocalReferenceReturnTest) { int *(*f)() = Ident(ReturnsPointerToALocalObject); int *p = f(); // Call 'f' a few more times, 'p' should still be poisoned. for (int i = 0; i < 32; i++) f(); EXPECT_DEATH(*p = 1, "AddressSanitizer: stack-use-after-return"); EXPECT_DEATH(*p = 1, "is located.*in frame .*ReturnsPointerToALocal"); } #endif template NOINLINE static void FuncWithStack() { char x[kSize]; Ident(x)[0] = 0; Ident(x)[kSize-1] = 0; } static void LotsOfStackReuse() { int LargeStack[10000]; Ident(LargeStack)[0] = 0; for (int i = 0; i < 10000; i++) { FuncWithStack<128 * 1>(); FuncWithStack<128 * 2>(); FuncWithStack<128 * 4>(); FuncWithStack<128 * 8>(); FuncWithStack<128 * 16>(); FuncWithStack<128 * 32>(); FuncWithStack<128 * 64>(); FuncWithStack<128 * 128>(); FuncWithStack<128 * 256>(); FuncWithStack<128 * 512>(); Ident(LargeStack)[0] = 0; } } TEST(AddressSanitizer, StressStackReuseTest) { LotsOfStackReuse(); } TEST(AddressSanitizer, ThreadedStressStackReuseTest) { const int kNumThreads = 20; pthread_t t[kNumThreads]; for (int i = 0; i < kNumThreads; i++) { PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))LotsOfStackReuse, 0); } for (int i = 0; i < kNumThreads; i++) { PTHREAD_JOIN(t[i], 0); } } // pthread_exit tries to perform unwinding stuff that leads to dlopen'ing // libgcc_s.so. dlopen in its turn calls malloc to store "libgcc_s.so" string // that confuses LSan on Thumb because it fails to understand that this // allocation happens in dynamic linker and should be ignored. #if !defined(__thumb__) static void *PthreadExit(void *a) { pthread_exit(0); return 0; } TEST(AddressSanitizer, PthreadExitTest) { pthread_t t; for (int i = 0; i < 1000; i++) { PTHREAD_CREATE(&t, 0, PthreadExit, 0); PTHREAD_JOIN(t, 0); } } #endif // FIXME: Why does clang-cl define __EXCEPTIONS? #if defined(__EXCEPTIONS) && !defined(_WIN32) NOINLINE static void StackReuseAndException() { int large_stack[1000]; Ident(large_stack); ASAN_THROW(1); } // TODO(kcc): support exceptions with use-after-return. TEST(AddressSanitizer, DISABLED_StressStackReuseAndExceptionsTest) { for (int i = 0; i < 10000; i++) { try { StackReuseAndException(); } catch(...) { } } } #endif #if !defined(_WIN32) TEST(AddressSanitizer, MlockTest) { EXPECT_EQ(0, mlockall(MCL_CURRENT)); EXPECT_EQ(0, mlock((void*)0x12345, 0x5678)); EXPECT_EQ(0, munlockall()); EXPECT_EQ(0, munlock((void*)0x987, 0x654)); } #endif struct LargeStruct { int foo[100]; }; // Test for bug http://llvm.org/bugs/show_bug.cgi?id=11763. // Struct copy should not cause asan warning even if lhs == rhs. TEST(AddressSanitizer, LargeStructCopyTest) { LargeStruct a; *Ident(&a) = *Ident(&a); } ATTRIBUTE_NO_SANITIZE_ADDRESS static void NoSanitizeAddress() { char *foo = new char[10]; Ident(foo)[10] = 0; delete [] foo; } TEST(AddressSanitizer, AttributeNoSanitizeAddressTest) { Ident(NoSanitizeAddress)(); } // The new/delete/etc mismatch checks don't work on Android, // as calls to new/delete go through malloc/free. // OS X support is tracked here: // https://github.com/google/sanitizers/issues/131 // Windows support is tracked here: // https://github.com/google/sanitizers/issues/309 #if !defined(__ANDROID__) && \ !defined(__APPLE__) && \ !defined(_WIN32) static string MismatchStr(const string &str) { return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; } static string MismatchOrNewDeleteTypeStr(const string &mismatch_str) { return "(" + MismatchStr(mismatch_str) + ")|(AddressSanitizer: new-delete-type-mismatch)"; } TEST(AddressSanitizer, AllocDeallocMismatch) { EXPECT_DEATH(free(Ident(new int)), MismatchStr("operator new vs free")); EXPECT_DEATH(free(Ident(new int[2])), MismatchStr("operator new \\[\\] vs free")); EXPECT_DEATH( delete (Ident(new int[2])), MismatchOrNewDeleteTypeStr("operator new \\[\\] vs operator delete")); EXPECT_DEATH(delete (Ident((int *)malloc(2 * sizeof(int)))), MismatchOrNewDeleteTypeStr("malloc vs operator delete")); EXPECT_DEATH(delete [] (Ident(new int)), MismatchStr("operator new vs operator delete \\[\\]")); EXPECT_DEATH(delete [] (Ident((int*)malloc(2 * sizeof(int)))), MismatchStr("malloc vs operator delete \\[\\]")); } #endif // ------------------ demo tests; run each one-by-one ------------- // e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests TEST(AddressSanitizer, DISABLED_DemoThreadedTest) { ThreadedTestSpawn(); } void *SimpleBugOnSTack(void *x = 0) { char a[20]; Ident(a)[20] = 0; return 0; } TEST(AddressSanitizer, DISABLED_DemoStackTest) { SimpleBugOnSTack(); } TEST(AddressSanitizer, DISABLED_DemoThreadStackTest) { pthread_t t; PTHREAD_CREATE(&t, 0, SimpleBugOnSTack, 0); PTHREAD_JOIN(t, 0); } TEST(AddressSanitizer, DISABLED_DemoUAFLowIn) { uaf_test(10, 0); } TEST(AddressSanitizer, DISABLED_DemoUAFLowLeft) { uaf_test(10, -2); } TEST(AddressSanitizer, DISABLED_DemoUAFLowRight) { uaf_test(10, 10); } TEST(AddressSanitizer, DISABLED_DemoUAFHigh) { uaf_test(kLargeMalloc, 0); } TEST(AddressSanitizer, DISABLED_DemoOOM) { size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 40) : (0xf0000000); printf("%p\n", malloc(size)); } TEST(AddressSanitizer, DISABLED_DemoDoubleFreeTest) { DoubleFree(); } TEST(AddressSanitizer, DISABLED_DemoNullDerefTest) { int *a = 0; Ident(a)[10] = 0; } TEST(AddressSanitizer, DISABLED_DemoFunctionStaticTest) { static char a[100]; static char b[100]; static char c[100]; Ident(a); Ident(b); Ident(c); Ident(a)[5] = 0; Ident(b)[105] = 0; Ident(a)[5] = 0; } TEST(AddressSanitizer, DISABLED_DemoTooMuchMemoryTest) { const size_t kAllocSize = (1 << 28) - 1024; size_t total_size = 0; while (true) { void *x = malloc(kAllocSize); memset(x, 0, kAllocSize); total_size += kAllocSize; fprintf(stderr, "total: %ldM %p\n", (long)total_size >> 20, x); } } // https://github.com/google/sanitizers/issues/66 TEST(AddressSanitizer, BufferOverflowAfterManyFrees) { for (int i = 0; i < 1000000; i++) { delete [] (Ident(new char [8644])); } char *x = new char[8192]; EXPECT_DEATH(x[Ident(8192)] = 0, "AddressSanitizer: heap-buffer-overflow"); delete [] Ident(x); } // Test that instrumentation of stack allocations takes into account // AllocSize of a type, and not its StoreSize (16 vs 10 bytes for long double). // See http://llvm.org/bugs/show_bug.cgi?id=12047 for more details. TEST(AddressSanitizer, LongDoubleNegativeTest) { long double a, b; static long double c; memcpy(Ident(&a), Ident(&b), sizeof(long double)); memcpy(Ident(&c), Ident(&b), sizeof(long double)); } #if !defined(_WIN32) TEST(AddressSanitizer, pthread_getschedparam) { int policy; struct sched_param param; EXPECT_DEATH( pthread_getschedparam(pthread_self(), &policy, Ident(¶m) + 2), "AddressSanitizer: stack-buffer-.*flow"); EXPECT_DEATH( pthread_getschedparam(pthread_self(), Ident(&policy) - 1, ¶m), "AddressSanitizer: stack-buffer-.*flow"); int res = pthread_getschedparam(pthread_self(), &policy, ¶m); ASSERT_EQ(0, res); } #endif #if SANITIZER_TEST_HAS_PRINTF_L static int vsnprintf_l_wrapper(char *s, size_t n, locale_t l, const char *format, ...) { va_list va; va_start(va, format); int res = vsnprintf_l(s, n , l, format, va); va_end(va); return res; } TEST(AddressSanitizer, snprintf_l) { char buff[5]; // Check that snprintf_l() works fine with Asan. int res = snprintf_l(buff, 5, _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"); EXPECT_EQ(12, res); // Check that vsnprintf_l() works fine with Asan. res = vsnprintf_l_wrapper(buff, 5, _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"); EXPECT_EQ(13, res); EXPECT_DEATH(snprintf_l(buff, 10, _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"), "AddressSanitizer: stack-buffer-overflow"); EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10, _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"), "AddressSanitizer: stack-buffer-overflow"); } #endif diff --git a/lib/builtins/cpu_model.c b/lib/builtins/cpu_model.c index c6b30eda0a77..83ea7a49faf7 100644 --- a/lib/builtins/cpu_model.c +++ b/lib/builtins/cpu_model.c @@ -1,615 +1,596 @@ //===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is based on LLVM's lib/Support/Host.cpp. // It implements the operating system Host concept and builtin // __cpu_model for the compiler_rt library, for x86 only. // //===----------------------------------------------------------------------===// #if (defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64__) || defined(_M_X64)) && \ (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)) #include #define bool int #define true 1 #define false 0 #ifdef _MSC_VER #include #endif #ifndef __has_attribute #define __has_attribute(attr) 0 #endif enum VendorSignatures { SIG_INTEL = 0x756e6547 /* Genu */, SIG_AMD = 0x68747541 /* Auth */ }; enum ProcessorVendors { VENDOR_INTEL = 1, VENDOR_AMD, VENDOR_OTHER, VENDOR_MAX }; enum ProcessorTypes { INTEL_BONNELL = 1, INTEL_CORE2, INTEL_COREI7, AMDFAM10H, AMDFAM15H, INTEL_SILVERMONT, INTEL_KNL, AMD_BTVER1, AMD_BTVER2, AMDFAM17H, CPU_TYPE_MAX }; enum ProcessorSubtypes { INTEL_COREI7_NEHALEM = 1, INTEL_COREI7_WESTMERE, INTEL_COREI7_SANDYBRIDGE, AMDFAM10H_BARCELONA, AMDFAM10H_SHANGHAI, AMDFAM10H_ISTANBUL, AMDFAM15H_BDVER1, AMDFAM15H_BDVER2, AMDFAM15H_BDVER3, AMDFAM15H_BDVER4, AMDFAM17H_ZNVER1, INTEL_COREI7_IVYBRIDGE, INTEL_COREI7_HASWELL, INTEL_COREI7_BROADWELL, INTEL_COREI7_SKYLAKE, INTEL_COREI7_SKYLAKE_AVX512, CPU_SUBTYPE_MAX }; enum ProcessorFeatures { FEATURE_CMOV = 0, FEATURE_MMX, FEATURE_POPCNT, FEATURE_SSE, FEATURE_SSE2, FEATURE_SSE3, FEATURE_SSSE3, FEATURE_SSE4_1, FEATURE_SSE4_2, FEATURE_AVX, FEATURE_AVX2, FEATURE_SSE4_A, FEATURE_FMA4, FEATURE_XOP, FEATURE_FMA, FEATURE_AVX512F, FEATURE_BMI, FEATURE_BMI2, FEATURE_AES, FEATURE_PCLMUL, FEATURE_AVX512VL, FEATURE_AVX512BW, FEATURE_AVX512DQ, FEATURE_AVX512CD, FEATURE_AVX512ER, FEATURE_AVX512PF, FEATURE_AVX512VBMI, FEATURE_AVX512IFMA, FEATURE_AVX5124VNNIW, FEATURE_AVX5124FMAPS, FEATURE_AVX512VPOPCNTDQ }; // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max). // Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID // support. Consequently, for i386, the presence of CPUID is checked first // via the corresponding eflags bit. static bool isCpuIdSupported() { #if defined(__GNUC__) || defined(__clang__) #if defined(__i386__) int __cpuid_supported; __asm__(" pushfl\n" " popl %%eax\n" " movl %%eax,%%ecx\n" " xorl $0x00200000,%%eax\n" " pushl %%eax\n" " popfl\n" " pushfl\n" " popl %%eax\n" " movl $0,%0\n" " cmpl %%eax,%%ecx\n" " je 1f\n" " movl $1,%0\n" "1:" : "=r"(__cpuid_supported) : : "eax", "ecx"); if (!__cpuid_supported) return false; #endif return true; #endif return true; } // This code is copied from lib/Support/Host.cpp. // Changes to either file should be mirrored in the other. /// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in /// the specified arguments. If we can't run cpuid on the host, return true. static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX, unsigned *rECX, unsigned *rEDX) { #if defined(__GNUC__) || defined(__clang__) #if defined(__x86_64__) // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually. // FIXME: should we save this for Clang? __asm__("movq\t%%rbx, %%rsi\n\t" "cpuid\n\t" "xchgq\t%%rbx, %%rsi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value)); return false; #elif defined(__i386__) __asm__("movl\t%%ebx, %%esi\n\t" "cpuid\n\t" "xchgl\t%%ebx, %%esi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value)); return false; #else return true; #endif #elif defined(_MSC_VER) // The MSVC intrinsic is portable across x86 and x64. int registers[4]; __cpuid(registers, value); *rEAX = registers[0]; *rEBX = registers[1]; *rECX = registers[2]; *rEDX = registers[3]; return false; #else return true; #endif } /// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return /// the 4 values in the specified arguments. If we can't run cpuid on the host, /// return true. static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX, unsigned *rEBX, unsigned *rECX, unsigned *rEDX) { -#if defined(__x86_64__) || defined(_M_X64) #if defined(__GNUC__) || defined(__clang__) +#if defined(__x86_64__) // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually. // FIXME: should we save this for Clang? __asm__("movq\t%%rbx, %%rsi\n\t" "cpuid\n\t" "xchgq\t%%rbx, %%rsi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value), "c"(subleaf)); return false; -#elif defined(_MSC_VER) - int registers[4]; - __cpuidex(registers, value, subleaf); - *rEAX = registers[0]; - *rEBX = registers[1]; - *rECX = registers[2]; - *rEDX = registers[3]; - return false; -#else - return true; -#endif -#elif defined(__i386__) || defined(_M_IX86) -#if defined(__GNUC__) || defined(__clang__) +#elif defined(__i386__) __asm__("movl\t%%ebx, %%esi\n\t" "cpuid\n\t" "xchgl\t%%ebx, %%esi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value), "c"(subleaf)); return false; -#elif defined(_MSC_VER) - __asm { - mov eax,value - mov ecx,subleaf - cpuid - mov esi,rEAX - mov dword ptr [esi],eax - mov esi,rEBX - mov dword ptr [esi],ebx - mov esi,rECX - mov dword ptr [esi],ecx - mov esi,rEDX - mov dword ptr [esi],edx - } - return false; #else return true; #endif +#elif defined(_MSC_VER) + int registers[4]; + __cpuidex(registers, value, subleaf); + *rEAX = registers[0]; + *rEBX = registers[1]; + *rECX = registers[2]; + *rEDX = registers[3]; + return false; #else return true; #endif } // Read control register 0 (XCR0). Used to detect features such as AVX. static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) { #if defined(__GNUC__) || defined(__clang__) // Check xgetbv; this uses a .byte sequence instead of the instruction // directly because older assemblers do not include support for xgetbv and // there is no easy way to conditionally compile based on the assembler used. __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX), "=d"(*rEDX) : "c"(0)); return false; #elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK) unsigned long long Result = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); *rEAX = Result; *rEDX = Result >> 32; return false; #else return true; #endif } static void detectX86FamilyModel(unsigned EAX, unsigned *Family, unsigned *Model) { *Family = (EAX >> 8) & 0xf; // Bits 8 - 11 *Model = (EAX >> 4) & 0xf; // Bits 4 - 7 if (*Family == 6 || *Family == 0xf) { if (*Family == 0xf) // Examine extended family ID if family ID is F. *Family += (EAX >> 20) & 0xff; // Bits 20 - 27 // Examine extended model ID if family ID is 6 or F. *Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19 } } static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, unsigned Brand_id, unsigned Features, unsigned *Type, unsigned *Subtype) { if (Brand_id != 0) return; switch (Family) { case 6: switch (Model) { case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile // processor, Intel Core 2 Quad processor, Intel Core 2 Quad // mobile processor, Intel Core 2 Extreme processor, Intel // Pentium Dual-Core processor, Intel Xeon processor, model // 0Fh. All processors are manufactured using the 65 nm process. case 0x16: // Intel Celeron processor model 16h. All processors are // manufactured using the 65 nm process case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model // 17h. All processors are manufactured using the 45 nm process. // // 45nm: Penryn , Wolfdale, Yorkfield (XE) case 0x1d: // Intel Xeon processor MP. All processors are manufactured using // the 45 nm process. *Type = INTEL_CORE2; // "penryn" break; case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 45 nm process. case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz. // As found in a Summer 2010 model iMac. case 0x1f: case 0x2e: // Nehalem EX *Type = INTEL_COREI7; // "nehalem" *Subtype = INTEL_COREI7_NEHALEM; break; case 0x25: // Intel Core i7, laptop version. case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 32 nm process. case 0x2f: // Westmere EX *Type = INTEL_COREI7; // "westmere" *Subtype = INTEL_COREI7_WESTMERE; break; case 0x2a: // Intel Core i7 processor. All processors are manufactured // using the 32 nm process. case 0x2d: *Type = INTEL_COREI7; //"sandybridge" *Subtype = INTEL_COREI7_SANDYBRIDGE; break; case 0x3a: case 0x3e: // Ivy Bridge EP *Type = INTEL_COREI7; // "ivybridge" *Subtype = INTEL_COREI7_IVYBRIDGE; break; // Haswell: case 0x3c: case 0x3f: case 0x45: case 0x46: *Type = INTEL_COREI7; // "haswell" *Subtype = INTEL_COREI7_HASWELL; break; // Broadwell: case 0x3d: case 0x47: case 0x4f: case 0x56: *Type = INTEL_COREI7; // "broadwell" *Subtype = INTEL_COREI7_BROADWELL; break; // Skylake: case 0x4e: // Skylake mobile case 0x5e: // Skylake desktop case 0x8e: // Kaby Lake mobile case 0x9e: // Kaby Lake desktop *Type = INTEL_COREI7; // "skylake" *Subtype = INTEL_COREI7_SKYLAKE; break; // Skylake Xeon: case 0x55: *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512" break; case 0x1c: // Most 45 nm Intel Atom processors case 0x26: // 45 nm Atom Lincroft case 0x27: // 32 nm Atom Medfield case 0x35: // 32 nm Atom Midview case 0x36: // 32 nm Atom Midview *Type = INTEL_BONNELL; break; // "bonnell" // Atom Silvermont codes from the Intel software optimization guide. case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d: case 0x4c: // really airmont *Type = INTEL_SILVERMONT; break; // "silvermont" case 0x57: *Type = INTEL_KNL; // knl break; default: // Unknown family 6 CPU. break; break; } default: break; // Unknown. } } static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, unsigned Features, unsigned *Type, unsigned *Subtype) { // FIXME: this poorly matches the generated SubtargetFeatureKV table. There // appears to be no way to generate the wide variety of AMD-specific targets // from the information returned from CPUID. switch (Family) { case 16: *Type = AMDFAM10H; // "amdfam10" switch (Model) { case 2: *Subtype = AMDFAM10H_BARCELONA; break; case 4: *Subtype = AMDFAM10H_SHANGHAI; break; case 8: *Subtype = AMDFAM10H_ISTANBUL; break; } break; case 20: *Type = AMD_BTVER1; break; // "btver1"; case 21: *Type = AMDFAM15H; if (Model >= 0x60 && Model <= 0x7f) { *Subtype = AMDFAM15H_BDVER4; break; // "bdver4"; 60h-7Fh: Excavator } if (Model >= 0x30 && Model <= 0x3f) { *Subtype = AMDFAM15H_BDVER3; break; // "bdver3"; 30h-3Fh: Steamroller } if (Model >= 0x10 && Model <= 0x1f) { *Subtype = AMDFAM15H_BDVER2; break; // "bdver2"; 10h-1Fh: Piledriver } if (Model <= 0x0f) { *Subtype = AMDFAM15H_BDVER1; break; // "bdver1"; 00h-0Fh: Bulldozer } break; case 22: *Type = AMD_BTVER2; break; // "btver2" case 23: *Type = AMDFAM17H; *Subtype = AMDFAM17H_ZNVER1; break; default: break; // "generic" } } static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, unsigned *FeaturesOut) { unsigned Features = 0; unsigned EAX, EBX; if ((EDX >> 15) & 1) Features |= 1 << FEATURE_CMOV; if ((EDX >> 23) & 1) Features |= 1 << FEATURE_MMX; if ((EDX >> 25) & 1) Features |= 1 << FEATURE_SSE; if ((EDX >> 26) & 1) Features |= 1 << FEATURE_SSE2; if ((ECX >> 0) & 1) Features |= 1 << FEATURE_SSE3; if ((ECX >> 1) & 1) Features |= 1 << FEATURE_PCLMUL; if ((ECX >> 9) & 1) Features |= 1 << FEATURE_SSSE3; if ((ECX >> 12) & 1) Features |= 1 << FEATURE_FMA; if ((ECX >> 19) & 1) Features |= 1 << FEATURE_SSE4_1; if ((ECX >> 20) & 1) Features |= 1 << FEATURE_SSE4_2; if ((ECX >> 23) & 1) Features |= 1 << FEATURE_POPCNT; if ((ECX >> 25) & 1) Features |= 1 << FEATURE_AES; // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV // indicates that the AVX registers will be saved and restored on context // switch, then we have full AVX support. const unsigned AVXBits = (1 << 27) | (1 << 28); bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) && ((EAX & 0x6) == 0x6); bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0); if (HasAVX) Features |= 1 << FEATURE_AVX; bool HasLeaf7 = MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX); if (HasLeaf7 && ((EBX >> 3) & 1)) Features |= 1 << FEATURE_BMI; if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX) Features |= 1 << FEATURE_AVX2; if (HasLeaf7 && ((EBX >> 9) & 1)) Features |= 1 << FEATURE_BMI2; if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512F; if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512DQ; if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512IFMA; if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512PF; if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512ER; if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512CD; if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512BW; if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512VL; if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512VBMI; if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX512VPOPCNTDQ; if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX5124VNNIW; if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) Features |= 1 << FEATURE_AVX5124FMAPS; unsigned MaxExtLevel; getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 && !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); if (HasExtLeaf1 && ((ECX >> 6) & 1)) Features |= 1 << FEATURE_SSE4_A; if (HasExtLeaf1 && ((ECX >> 11) & 1)) Features |= 1 << FEATURE_XOP; if (HasExtLeaf1 && ((ECX >> 16) & 1)) Features |= 1 << FEATURE_FMA4; *FeaturesOut = Features; } #if defined(HAVE_INIT_PRIORITY) #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101)) #elif __has_attribute(__constructor__) #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__)) #else // FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that // this runs during initialization. #define CONSTRUCTOR_ATTRIBUTE #endif int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE; struct __processor_model { unsigned int __cpu_vendor; unsigned int __cpu_type; unsigned int __cpu_subtype; unsigned int __cpu_features[1]; } __cpu_model = {0, 0, 0, {0}}; /* A constructor function that is sets __cpu_model and __cpu_features with the right values. This needs to run only once. This constructor is given the highest priority and it should run before constructors without the priority set. However, it still runs after ifunc initializers and needs to be called explicitly there. */ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) { unsigned EAX, EBX, ECX, EDX; unsigned MaxLeaf = 5; unsigned Vendor; unsigned Model, Family, Brand_id; unsigned Features = 0; /* This function needs to run just once. */ if (__cpu_model.__cpu_vendor) return 0; if (!isCpuIdSupported()) return -1; /* Assume cpuid insn present. Run in level 0 to get vendor id. */ if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) { __cpu_model.__cpu_vendor = VENDOR_OTHER; return -1; } getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX); detectX86FamilyModel(EAX, &Family, &Model); Brand_id = EBX & 0xff; /* Find available features. */ getAvailableFeatures(ECX, EDX, MaxLeaf, &Features); __cpu_model.__cpu_features[0] = Features; if (Vendor == SIG_INTEL) { /* Get CPU type. */ getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features, &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_INTEL; } else if (Vendor == SIG_AMD) { /* Get CPU type. */ getAMDProcessorTypeAndSubtype(Family, Model, Features, &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_AMD; } else __cpu_model.__cpu_vendor = VENDOR_OTHER; assert(__cpu_model.__cpu_vendor < VENDOR_MAX); assert(__cpu_model.__cpu_type < CPU_TYPE_MAX); assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX); return 0; } #endif diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc index 6514aea6f609..2df58b44f6b8 100644 --- a/lib/lsan/lsan_allocator.cc +++ b/lib/lsan/lsan_allocator.cc @@ -1,281 +1,290 @@ //=-- lsan_allocator.cc ---------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // See lsan_allocator.h for details. // //===----------------------------------------------------------------------===// #include "lsan_allocator.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "lsan_common.h" extern "C" void *memset(void *ptr, int value, uptr num); namespace __lsan { #if defined(__i386__) || defined(__arm__) static const uptr kMaxAllowedMallocSize = 1UL << 30; #elif defined(__mips64) || defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 4UL << 30; #else static const uptr kMaxAllowedMallocSize = 8UL << 30; #endif typedef LargeMmapAllocator<> SecondaryAllocator; typedef CombinedAllocator Allocator; static Allocator allocator; void InitializeAllocator() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator.InitLinkerInitialized( common_flags()->allocator_release_to_os_interval_ms); } void AllocatorThreadFinish() { allocator.SwallowCache(GetAllocatorCache()); } static ChunkMetadata *Metadata(const void *p) { return reinterpret_cast(allocator.GetMetaData(p)); } static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { if (!p) return; ChunkMetadata *m = Metadata(p); CHECK(m); m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; m->stack_trace_id = StackDepotPut(stack); m->requested_size = size; atomic_store(reinterpret_cast(m), 1, memory_order_relaxed); } static void RegisterDeallocation(void *p) { if (!p) return; ChunkMetadata *m = Metadata(p); CHECK(m); atomic_store(reinterpret_cast(m), 0, memory_order_relaxed); } void *Allocate(const StackTrace &stack, uptr size, uptr alignment, bool cleared) { if (size == 0) size = 1; if (size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); return Allocator::FailureHandler::OnBadRequest(); } void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); RegisterAllocation(stack, p, size); if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); RunMallocHooks(p, size); return p; } +static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) + return Allocator::FailureHandler::OnBadRequest(); + size *= nmemb; + return Allocate(stack, size, 1, true); +} + void Deallocate(void *p) { if (&__sanitizer_free_hook) __sanitizer_free_hook(p); RunFreeHooks(p); RegisterDeallocation(p); allocator.Deallocate(GetAllocatorCache(), p); } void *Reallocate(const StackTrace &stack, void *p, uptr new_size, uptr alignment) { RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); return Allocator::FailureHandler::OnBadRequest(); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); return p; } void GetAllocatorCacheRange(uptr *begin, uptr *end) { *begin = (uptr)GetAllocatorCache(); *end = *begin + sizeof(AllocatorCache); } uptr GetMallocUsableSize(const void *p) { ChunkMetadata *m = Metadata(p); if (!m) return 0; return m->requested_size; } void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { - return Allocate(stack, size, alignment, kAlwaysClearMemory); + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } void *lsan_malloc(uptr size, const StackTrace &stack) { - return Allocate(stack, size, 1, kAlwaysClearMemory); + return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); } void lsan_free(void *p) { Deallocate(p); } void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { - return Reallocate(stack, p, size, 1); + return SetErrnoOnNull(Reallocate(stack, p, size, 1)); } void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { - if (CheckForCallocOverflow(size, nmemb)) - return Allocator::FailureHandler::OnBadRequest(); - size *= nmemb; - return Allocate(stack, size, 1, true); + return SetErrnoOnNull(Calloc(nmemb, size, stack)); } void *lsan_valloc(uptr size, const StackTrace &stack) { - if (size == 0) - size = GetPageSizeCached(); - return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); + return SetErrnoOnNull( + Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); } uptr lsan_mz_size(const void *p) { return GetMallocUsableSize(p); } ///// Interface to the common LSan module. ///// void LockAllocator() { allocator.ForceLock(); } void UnlockAllocator() { allocator.ForceUnlock(); } void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *begin = (uptr)&allocator; *end = *begin + sizeof(allocator); } uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); if (!chunk) return 0; // LargeMmapAllocator considers pointers to the meta-region of a chunk to be // valid, but we don't want that. if (addr < chunk) return 0; ChunkMetadata *m = Metadata(reinterpret_cast(chunk)); CHECK(m); if (!m->allocated) return 0; if (addr < chunk + m->requested_size) return chunk; if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) return chunk; return 0; } uptr GetUserBegin(uptr chunk) { return chunk; } LsanMetadata::LsanMetadata(uptr chunk) { metadata_ = Metadata(reinterpret_cast(chunk)); CHECK(metadata_); } bool LsanMetadata::allocated() const { return reinterpret_cast(metadata_)->allocated; } ChunkTag LsanMetadata::tag() const { return reinterpret_cast(metadata_)->tag; } void LsanMetadata::set_tag(ChunkTag value) { reinterpret_cast(metadata_)->tag = value; } uptr LsanMetadata::requested_size() const { return reinterpret_cast(metadata_)->requested_size; } u32 LsanMetadata::stack_trace_id() const { return reinterpret_cast(metadata_)->stack_trace_id; } void ForEachChunk(ForEachChunkCallback callback, void *arg) { allocator.ForEachChunk(callback, arg); } IgnoreObjectResult IgnoreObjectLocked(const void *p) { void *chunk = allocator.GetBlockBegin(p); if (!chunk || p < chunk) return kIgnoreObjectInvalid; ChunkMetadata *m = Metadata(chunk); CHECK(m); if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { if (m->tag == kIgnored) return kIgnoreObjectAlreadyIgnored; m->tag = kIgnored; return kIgnoreObjectSuccess; } else { return kIgnoreObjectInvalid; } } } // namespace __lsan using namespace __lsan; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; allocator.GetStats(stats); return stats[AllocatorStatAllocated]; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size() { uptr stats[AllocatorStatCount]; allocator.GetStats(stats); return stats[AllocatorStatMapped]; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes() { return 0; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes() { return 0; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_allocated_size(const void *p) { return GetMallocUsableSize(p); } #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default (no-op) implementation of malloc hooks. SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_malloc_hook(void *ptr, uptr size) { (void)ptr; (void)size; } SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_free_hook(void *ptr) { (void)ptr; } #endif } // extern "C" diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc index 4ffa91568cc8..c121e6a8fb24 100644 --- a/lib/lsan/lsan_common.cc +++ b/lib/lsan/lsan_common.cc @@ -1,868 +1,866 @@ //=-- lsan_common.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Implementation of common leak checking functionality. // //===----------------------------------------------------------------------===// #include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS namespace __lsan { // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and // also to protect the global list of root regions. BlockingMutex global_mutex(LINKER_INITIALIZED); Flags lsan_flags; void DisableCounterUnderflow() { if (common_flags()->detect_leaks) { Report("Unmatched call to __lsan_enable().\n"); Die(); } } void Flags::SetDefaults() { #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; #include "lsan_flags.inc" #undef LSAN_FLAG } void RegisterLsanFlags(FlagParser *parser, Flags *f) { #define LSAN_FLAG(Type, Name, DefaultValue, Description) \ RegisterFlag(parser, #Name, Description, &f->Name); #include "lsan_flags.inc" #undef LSAN_FLAG } #define LOG_POINTERS(...) \ do { \ if (flags()->log_pointers) Report(__VA_ARGS__); \ } while (0); #define LOG_THREADS(...) \ do { \ if (flags()->log_threads) Report(__VA_ARGS__); \ } while (0); ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; static SuppressionContext *suppression_ctx = nullptr; static const char kSuppressionLeak[] = "leak"; static const char *kSuppressionTypes[] = { kSuppressionLeak }; static const char kStdSuppressions[] = #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // definition. "leak:*pthread_exit*\n" #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT #if SANITIZER_MAC // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 "leak:*_os_trace*\n" #endif // TLS leak in some glibc versions, described in // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. "leak:*tls_get_addr*\n"; void InitializeSuppressions() { CHECK_EQ(nullptr, suppression_ctx); suppression_ctx = new (suppression_placeholder) // NOLINT SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); suppression_ctx->ParseFromFile(flags()->suppressions); if (&__lsan_default_suppressions) suppression_ctx->Parse(__lsan_default_suppressions()); suppression_ctx->Parse(kStdSuppressions); } static SuppressionContext *GetSuppressionContext() { CHECK(suppression_ctx); return suppression_ctx; } static InternalMmapVector *root_regions; InternalMmapVector const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); ALIGNED(64) static char placeholder[sizeof(InternalMmapVector)]; root_regions = new(placeholder) InternalMmapVector(1); } void InitCommonLsan() { InitializeRootRegions(); if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if // LSan is actually enabled. InitializeSuppressions(); InitializePlatformSpecificModules(); } } class Decorator: public __sanitizer::SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() { } const char *Error() { return Red(); } const char *Leak() { return Blue(); } const char *End() { return Default(); } }; static inline bool CanBeAHeapPointer(uptr p) { // Since our heap is located in mmap-ed memory, we can assume a sensible lower // bound on heap addresses. const uptr kMinAddress = 4 * 4096; if (p < kMinAddress) return false; #if defined(__x86_64__) // Accept only canonical form user-space addresses. return ((p >> 47) == 0); #elif defined(__mips64) return ((p >> 40) == 0); #elif defined(__aarch64__) unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); return ((p >> runtimeVMA) == 0); #else return true; #endif } // Scans the memory range, looking for byte patterns that point into allocator // chunks. Marks those chunks with |tag| and adds them to |frontier|. // There are two usage modes for this function: finding reachable chunks // (|tag| = kReachable) and finding indirectly leaked chunks // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // so |frontier| = 0. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { CHECK(tag == kReachable || tag == kIndirectlyLeaked); const uptr alignment = flags()->pointer_alignment(); LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); uptr pp = begin; if (pp % alignment) pp = pp + alignment - pp % alignment; for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT void *p = *reinterpret_cast(pp); if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; uptr chunk = PointsIntoChunk(p); if (!chunk) continue; // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. if (chunk == begin) continue; LsanMetadata m(chunk); if (m.tag() == kReachable || m.tag() == kIgnored) continue; // Do this check relatively late so we can log only the interesting cases. if (!flags()->use_poisoned && WordIsPoisoned(pp)) { LOG_POINTERS( "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size()); continue; } m.set_tag(tag); LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size()); if (frontier) frontier->push_back(chunk); } } // Scans a global range for pointers void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { uptr allocator_begin = 0, allocator_end = 0; GetAllocatorGlobalRange(&allocator_begin, &allocator_end); if (begin <= allocator_begin && allocator_begin < end) { CHECK_LE(allocator_begin, allocator_end); CHECK_LE(allocator_end, end); if (begin < allocator_begin) ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", kReachable); if (allocator_end < end) ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); } else { ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); } } void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { Frontier *frontier = reinterpret_cast(arg); ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); } // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { InternalScopedBuffer registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast(registers.data()); uptr registers_end = registers_begin + registers.size(); for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; DTLS *dtls; bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin, &tls_end, &cache_begin, &cache_end, &dtls); if (!thread_found) { // If a thread can't be found in the thread registry, it's probably in the // process of destruction. Log this event and move on. LOG_THREADS("Thread %d not found in registry.\n", os_id); continue; } uptr sp; PtraceRegistersStatus have_registers = suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); if (have_registers != REGISTERS_AVAILABLE) { Report("Unable to get registers from thread %d.\n", os_id); // If unable to get SP, consider the entire stack to be reachable unless // GetRegistersAndSP failed with ESRCH. if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; sp = stack_begin; } if (flags()->use_registers && have_registers) ScanRangeForPointers(registers_begin, registers_end, frontier, "REGISTERS", kReachable); if (flags()->use_stacks) { LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); if (sp < stack_begin || sp >= stack_end) { // SP is outside the recorded stack range (e.g. the thread is running a // signal handler on alternate stack, or swapcontext was used). // Again, consider the entire stack range to be reachable. LOG_THREADS("WARNING: stack pointer not in stack range.\n"); uptr page_size = GetPageSizeCached(); int skipped = 0; while (stack_begin < stack_end && !IsAccessibleMemoryRange(stack_begin, 1)) { skipped++; stack_begin += page_size; } LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped, stack_begin, stack_end); } else { // Shrink the stack range to ignore out-of-scope values. stack_begin = sp; } ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", kReachable); ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); } if (flags()->use_tls) { if (tls_begin) { LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); // If the tls and cache ranges don't overlap, scan full tls range, // otherwise, only scan the non-overlapping portions if (cache_begin == cache_end || tls_end < cache_begin || tls_begin > cache_end) { ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); } else { if (tls_begin < cache_begin) ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable); if (tls_end > cache_end) ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); } } if (dtls && !DTLSInDestruction(dtls)) { for (uptr j = 0; j < dtls->dtv_size; ++j) { uptr dtls_beg = dtls->dtv[j].beg; uptr dtls_end = dtls_beg + dtls->dtv[j].size; if (dtls_beg < dtls_end) { LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable); } } } else { // We are handling a thread with DTLS under destruction. Log about // this and continue. LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id); } } } } void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, uptr region_begin, uptr region_end, bool is_readable) { uptr intersection_begin = Max(root_region.begin, region_begin); uptr intersection_end = Min(region_end, root_region.begin + root_region.size); if (intersection_begin >= intersection_end) return; LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", root_region.begin, root_region.begin + root_region.size, region_begin, region_end, is_readable ? "readable" : "unreadable"); if (is_readable) ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", kReachable); } static void ProcessRootRegion(Frontier *frontier, const RootRegion &root_region) { MemoryMappingLayout proc_maps(/*cache_enabled*/ true); MemoryMappedSegment segment; while (proc_maps.Next(&segment)) { ScanRootRegion(frontier, root_region, segment.start, segment.end, segment.IsReadable()); } } // Scans root regions for heap pointers. static void ProcessRootRegions(Frontier *frontier) { if (!flags()->use_root_regions) return; CHECK(root_regions); for (uptr i = 0; i < root_regions->size(); i++) { ProcessRootRegion(frontier, (*root_regions)[i]); } } static void FloodFillTag(Frontier *frontier, ChunkTag tag) { while (frontier->size()) { uptr next_chunk = frontier->back(); frontier->pop_back(); LsanMetadata m(next_chunk); ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, "HEAP", tag); } } // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks // which are reachable from it as indirectly leaked. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { ScanRangeForPointers(chunk, chunk + m.requested_size(), /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); } } // ForEachChunk callback. If chunk is marked as ignored, adds its address to // frontier. static void CollectIgnoredCb(uptr chunk, void *arg) { CHECK(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() == kIgnored) { LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", chunk, chunk + m.requested_size(), m.requested_size()); reinterpret_cast(arg)->push_back(chunk); } } static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { CHECK(stack_id); StackTrace stack = map->Get(stack_id); // The top frame is our malloc/calloc/etc. The next frame is the caller. if (stack.size >= 2) return stack.trace[1]; return 0; } struct InvalidPCParam { Frontier *frontier; StackDepotReverseMap *stack_depot_reverse_map; bool skip_linker_allocations; }; // ForEachChunk callback. If the caller pc is invalid or is within the linker, // mark as reachable. Called by ProcessPlatformSpecificAllocations. static void MarkInvalidPCCb(uptr chunk, void *arg) { CHECK(arg); InvalidPCParam *param = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { u32 stack_id = m.stack_trace_id(); uptr caller_pc = 0; if (stack_id > 0) caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark // it as reachable, as we can't properly report its allocation stack anyway. if (caller_pc == 0 || (param->skip_linker_allocations && GetLinker()->containsAddress(caller_pc))) { m.set_tag(kReachable); param->frontier->push_back(chunk); } } } // On Linux, handles dynamically allocated TLS blocks by treating all chunks // allocated from ld-linux.so as reachable. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. // They are allocated with a __libc_memalign() call in allocate_and_init() // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those // blocks, but we can make sure they come from our own allocator by intercepting // __libc_memalign(). On top of that, there is no easy way to reach them. Their // addresses are stored in a dynamically allocated array (the DTV) which is // referenced from the static TLS. Unfortunately, we can't just rely on the DTV // being reachable from the static TLS, and the dynamic TLS being reachable from // the DTV. This is because the initial DTV is allocated before our interception // mechanism kicks in, and thus we don't recognize it as allocated memory. We // can't special-case it either, since we don't know its size. // Our solution is to include in the root set all allocations made from // ld-linux.so (which is where allocate_and_init() is implemented). This is // guaranteed to include all dynamic TLS blocks (and possibly other allocations // which we don't care about). // On all other platforms, this simply checks to ensure that the caller pc is // valid before reporting chunks as leaked. void ProcessPC(Frontier *frontier) { StackDepotReverseMap stack_depot_reverse_map; InvalidPCParam arg; arg.frontier = frontier; arg.stack_depot_reverse_map = &stack_depot_reverse_map; arg.skip_linker_allocations = flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; ForEachChunk(MarkInvalidPCCb, &arg); } // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. Frontier frontier(1); ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); ProcessThreads(suspended_threads, &frontier); ProcessRootRegions(&frontier); FloodFillTag(&frontier, kReachable); CHECK_EQ(0, frontier.size()); ProcessPC(&frontier); // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); ProcessPlatformSpecificAllocations(&frontier); FloodFillTag(&frontier, kReachable); // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. LOG_POINTERS("Scanning leaked chunks.\n"); ForEachChunk(MarkIndirectlyLeakedCb, nullptr); } // ForEachChunk callback. Resets the tags to pre-leak-check state. static void ResetTagsCb(uptr chunk, void *arg) { (void)arg; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kIgnored) m.set_tag(kDirectlyLeaked); } static void PrintStackTraceById(u32 stack_trace_id) { CHECK(stack_trace_id); StackDepotGet(stack_trace_id).Print(); } // ForEachChunk callback. Aggregates information about unreachable chunks into // a LeakReport. static void CollectLeaksCb(uptr chunk, void *arg) { CHECK(arg); LeakReport *leak_report = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (!m.allocated()) return; if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { u32 resolution = flags()->resolution; u32 stack_trace_id = 0; if (resolution > 0) { StackTrace stack = StackDepotGet(m.stack_trace_id()); stack.size = Min(stack.size, resolution); stack_trace_id = StackDepotPut(stack); } else { stack_trace_id = m.stack_trace_id(); } leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), m.tag()); } } static void PrintMatchedSuppressions() { InternalMmapVector matched(1); GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; const char *line = "-----------------------------------------------------"; Printf("%s\n", line); Printf("Suppressions used:\n"); Printf(" count bytes template\n"); for (uptr i = 0; i < matched.size(); i++) Printf("%7zu %10zu %s\n", static_cast(atomic_load_relaxed( &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); Printf("%s\n\n", line); } struct CheckForLeaksParam { bool success; LeakReport leak_report; }; static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast(arg); CHECK(param); CHECK(!param->success); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. ForEachChunk(ResetTagsCb, nullptr); param->success = true; } static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) return false; EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; param.success = false; LockThreadRegistry(); LockAllocator(); DoStopTheWorld(CheckForLeaksCallback, ¶m); UnlockAllocator(); UnlockThreadRegistry(); if (!param.success) { Report("LeakSanitizer has encountered a fatal error.\n"); Report( "HINT: For debugging, try setting environment variable " "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); Report( "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); Die(); } param.leak_report.ApplySuppressions(); uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); if (unsuppressed_count > 0) { Decorator d; Printf("\n" "=================================================================" "\n"); Printf("%s", d.Error()); Report("ERROR: LeakSanitizer: detected memory leaks\n"); Printf("%s", d.End()); param.leak_report.ReportTopLeaks(flags()->max_leaks); } if (common_flags()->print_suppressions) PrintMatchedSuppressions(); if (unsuppressed_count > 0) { param.leak_report.PrintSummary(); return true; } return false; } +static bool has_reported_leaks = false; +bool HasReportedLeaks() { return has_reported_leaks; } + void DoLeakCheck() { BlockingMutexLock l(&global_mutex); static bool already_done; if (already_done) return; already_done = true; - bool have_leaks = CheckForLeaks(); - if (!have_leaks) { - return; - } - if (common_flags()->exitcode) { - Die(); - } + has_reported_leaks = CheckForLeaks(); + if (has_reported_leaks) HandleLeaks(); } static int DoRecoverableLeakCheck() { BlockingMutexLock l(&global_mutex); bool have_leaks = CheckForLeaks(); return have_leaks ? 1 : 0; } static Suppression *GetSuppressionForAddr(uptr addr) { Suppression *s = nullptr; // Suppress by module name. SuppressionContext *suppressions = GetSuppressionContext(); if (const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) if (suppressions->Match(module_name, kSuppressionLeak, &s)) return s; // Suppress by file or function name. SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); for (SymbolizedStack *cur = frames; cur; cur = cur->next) { if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { break; } } frames->ClearAll(); return s; } static Suppression *GetSuppressionForStack(u32 stack_trace_id) { StackTrace stack = StackDepotGet(stack_trace_id); for (uptr i = 0; i < stack.size; i++) { Suppression *s = GetSuppressionForAddr( StackTrace::GetPreviousInstructionPc(stack.trace[i])); if (s) return s; } return nullptr; } ///// LeakReport implementation. ///// // A hard limit on the number of distinct leaks, to avoid quadratic complexity // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks // in real-world applications. // FIXME: Get rid of this limit by changing the implementation of LeakReport to // use a hash table. const uptr kMaxLeaksConsidered = 5000; void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); bool is_directly_leaked = (tag == kDirectlyLeaked); uptr i; for (i = 0; i < leaks_.size(); i++) { if (leaks_[i].stack_trace_id == stack_trace_id && leaks_[i].is_directly_leaked == is_directly_leaked) { leaks_[i].hit_count++; leaks_[i].total_size += leaked_size; break; } } if (i == leaks_.size()) { if (leaks_.size() == kMaxLeaksConsidered) return; Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, is_directly_leaked, /* is_suppressed */ false }; leaks_.push_back(leak); } if (flags()->report_objects) { LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; leaked_objects_.push_back(obj); } } static bool LeakComparator(const Leak &leak1, const Leak &leak2) { if (leak1.is_directly_leaked == leak2.is_directly_leaked) return leak1.total_size > leak2.total_size; else return leak1.is_directly_leaked; } void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { CHECK(leaks_.size() <= kMaxLeaksConsidered); Printf("\n"); if (leaks_.size() == kMaxLeaksConsidered) Printf("Too many leaks! Only the first %zu leaks encountered will be " "reported.\n", kMaxLeaksConsidered); uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) Printf("The %zu top leak(s):\n", num_leaks_to_report); InternalSort(&leaks_, leaks_.size(), LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; PrintReportForLeak(i); leaks_reported++; if (leaks_reported == num_leaks_to_report) break; } if (leaks_reported < unsuppressed_count) { uptr remaining = unsuppressed_count - leaks_reported; Printf("Omitting %zu more leak(s).\n", remaining); } } void LeakReport::PrintReportForLeak(uptr index) { Decorator d; Printf("%s", d.Leak()); Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", leaks_[index].is_directly_leaked ? "Direct" : "Indirect", leaks_[index].total_size, leaks_[index].hit_count); Printf("%s", d.End()); PrintStackTraceById(leaks_[index].stack_trace_id); if (flags()->report_objects) { Printf("Objects leaked above:\n"); PrintLeakedObjectsForLeak(index); Printf("\n"); } } void LeakReport::PrintLeakedObjectsForLeak(uptr index) { u32 leak_id = leaks_[index].id; for (uptr j = 0; j < leaked_objects_.size(); j++) { if (leaked_objects_[j].leak_id == leak_id) Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, leaked_objects_[j].size); } } void LeakReport::PrintSummary() { CHECK(leaks_.size() <= kMaxLeaksConsidered); uptr bytes = 0, allocations = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; bytes += leaks_[i].total_size; allocations += leaks_[i].hit_count; } InternalScopedString summary(kMaxSummaryLength); summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, allocations); ReportErrorSummary(summary.data()); } void LeakReport::ApplySuppressions() { for (uptr i = 0; i < leaks_.size(); i++) { Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); if (s) { s->weight += leaks_[i].total_size; atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + leaks_[i].hit_count); leaks_[i].is_suppressed = true; } } } uptr LeakReport::UnsuppressedLeakCount() { uptr result = 0; for (uptr i = 0; i < leaks_.size(); i++) if (!leaks_[i].is_suppressed) result++; return result; } } // namespace __lsan #else // CAN_SANITIZE_LEAKS namespace __lsan { void InitCommonLsan() { } void DoLeakCheck() { } void DisableInThisThread() { } void EnableInThisThread() { } } #endif // CAN_SANITIZE_LEAKS using namespace __lsan; // NOLINT extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __lsan_ignore_object(const void *p) { #if CAN_SANITIZE_LEAKS if (!common_flags()->detect_leaks) return; // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not // locked. BlockingMutexLock l(&global_mutex); IgnoreObjectResult res = IgnoreObjectLocked(p); if (res == kIgnoreObjectInvalid) VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); if (res == kIgnoreObjectAlreadyIgnored) VReport(1, "__lsan_ignore_object(): " "heap object at %p is already being ignored\n", p); if (res == kIgnoreObjectSuccess) VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_register_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS BlockingMutexLock l(&global_mutex); CHECK(root_regions); RootRegion region = {reinterpret_cast(begin), size}; root_regions->push_back(region); VReport(1, "Registered root region at %p of size %llu\n", begin, size); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_unregister_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS BlockingMutexLock l(&global_mutex); CHECK(root_regions); bool removed = false; for (uptr i = 0; i < root_regions->size(); i++) { RootRegion region = (*root_regions)[i]; if (region.begin == reinterpret_cast(begin) && region.size == size) { removed = true; uptr last_index = root_regions->size() - 1; (*root_regions)[i] = (*root_regions)[last_index]; root_regions->pop_back(); VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); break; } } if (!removed) { Report( "__lsan_unregister_root_region(): region at %p of size %llu has not " "been registered.\n", begin, size); Die(); } #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_disable() { #if CAN_SANITIZE_LEAKS __lsan::DisableInThisThread(); #endif } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_enable() { #if CAN_SANITIZE_LEAKS __lsan::EnableInThisThread(); #endif } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_do_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) __lsan::DoLeakCheck(); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE int __lsan_do_recoverable_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) return __lsan::DoRecoverableLeakCheck(); #endif // CAN_SANITIZE_LEAKS return 0; } #if !SANITIZER_SUPPORTS_WEAK_HOOKS SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int __lsan_is_turned_off() { return 0; } SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *__lsan_default_suppressions() { return ""; } #endif } // extern "C" diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h index d93ac1b10919..31bf3eb1df42 100644 --- a/lib/lsan/lsan_common.h +++ b/lib/lsan/lsan_common.h @@ -1,253 +1,259 @@ //=-- lsan_common.h -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Private LSan header. // //===----------------------------------------------------------------------===// #ifndef LSAN_COMMON_H #define LSAN_COMMON_H #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_symbolizer.h" // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus // supported for Linux only. Also, LSan doesn't like 32 bit architectures // because of "small" (4 bytes) pointer size that leads to high false negative // ratio on large leaks. But we still want to have it for some 32 bit arches // (e.g. x86), see https://github.com/google/sanitizers/issues/403. // To enable LeakSanitizer on new architecture, one need to implement // internal_clone function as well as (probably) adjust TLS machinery for // new architecture inside sanitizer library. #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \ (SANITIZER_WORDSIZE == 64) && \ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ defined(__powerpc64__)) #define CAN_SANITIZE_LEAKS 1 #elif defined(__i386__) && \ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) #define CAN_SANITIZE_LEAKS 1 #elif defined(__arm__) && \ SANITIZER_LINUX && !SANITIZER_ANDROID #define CAN_SANITIZE_LEAKS 1 #else #define CAN_SANITIZE_LEAKS 0 #endif namespace __sanitizer { class FlagParser; struct DTLS; } namespace __lsan { // Chunk tags. enum ChunkTag { kDirectlyLeaked = 0, // default kIndirectlyLeaked = 1, kReachable = 2, kIgnored = 3 }; const u32 kInvalidTid = (u32) -1; struct Flags { #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; #include "lsan_flags.inc" #undef LSAN_FLAG void SetDefaults(); uptr pointer_alignment() const { return use_unaligned ? 1 : sizeof(uptr); } }; extern Flags lsan_flags; inline Flags *flags() { return &lsan_flags; } void RegisterLsanFlags(FlagParser *parser, Flags *f); struct Leak { u32 id; uptr hit_count; uptr total_size; u32 stack_trace_id; bool is_directly_leaked; bool is_suppressed; }; struct LeakedObject { u32 leak_id; uptr addr; uptr size; }; // Aggregates leaks by stack trace prefix. class LeakReport { public: LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {} void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag); void ReportTopLeaks(uptr max_leaks); void PrintSummary(); void ApplySuppressions(); uptr UnsuppressedLeakCount(); private: void PrintReportForLeak(uptr index); void PrintLeakedObjectsForLeak(uptr index); u32 next_id_; InternalMmapVector leaks_; InternalMmapVector leaked_objects_; }; typedef InternalMmapVector Frontier; // Platform-specific functions. void InitializePlatformSpecificModules(); void ProcessGlobalRegions(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier); struct RootRegion { uptr begin; uptr size; }; InternalMmapVector const *GetRootRegions(); void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, uptr region_begin, uptr region_end, bool is_readable); // Run stoptheworld while holding any platform-specific locks. void DoStopTheWorld(StopTheWorldCallback callback, void* argument); void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag); void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier); enum IgnoreObjectResult { kIgnoreObjectSuccess, kIgnoreObjectAlreadyIgnored, kIgnoreObjectInvalid }; // Functions called from the parent tool. void InitCommonLsan(); void DoLeakCheck(); void DisableCounterUnderflow(); bool DisabledInThisThread(); // Used to implement __lsan::ScopedDisabler. void DisableInThisThread(); void EnableInThisThread(); // Can be used to ignore memory allocated by an intercepted // function. struct ScopedInterceptorDisabler { ScopedInterceptorDisabler() { DisableInThisThread(); } ~ScopedInterceptorDisabler() { EnableInThisThread(); } }; // According to Itanium C++ ABI array cookie is a one word containing // size of allocated array. static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size, uptr addr) { return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr && *reinterpret_cast(chunk_beg) == 0; } // According to ARM C++ ABI array cookie consists of two words: // struct array_cookie { // std::size_t element_size; // element_size != 0 // std::size_t element_count; // }; static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size, uptr addr) { return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr && *reinterpret_cast(chunk_beg + sizeof(uptr)) == 0; } // Special case for "new T[0]" where T is a type with DTOR. // new T[0] will allocate a cookie (one or two words) for the array size (0) // and store a pointer to the end of allocated chunk. The actual cookie layout // varies between platforms according to their C++ ABI implementation. inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size, uptr addr) { #if defined(__arm__) return IsARMABIArrayCookie(chunk_beg, chunk_size, addr); #else return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr); #endif } // The following must be implemented in the parent tool. void ForEachChunk(ForEachChunkCallback callback, void *arg); // Returns the address range occupied by the global allocator object. void GetAllocatorGlobalRange(uptr *begin, uptr *end); // Wrappers for allocator's ForceLock()/ForceUnlock(). void LockAllocator(); void UnlockAllocator(); // Returns true if [addr, addr + sizeof(void *)) is poisoned. bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. void LockThreadRegistry(); void UnlockThreadRegistry(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg); // If called from the main thread, updates the main thread's TID in the thread // registry. We need this to handle processes that fork() without a subsequent // exec(), which invalidates the recorded TID. To update it, we must call // gettid() from the main thread. Our solution is to call this function before // leak checking and also before every call to pthread_create() (to handle cases // where leak checking is initiated from a non-main thread). void EnsureMainThreadIDIsCorrect(); // If p points into a chunk that has been allocated to the user, returns its // user-visible address. Otherwise, returns 0. uptr PointsIntoChunk(void *p); // Returns address of user-visible chunk contained in this allocator chunk. uptr GetUserBegin(uptr chunk); // Helper for __lsan_ignore_object(). IgnoreObjectResult IgnoreObjectLocked(const void *p); // Return the linker module, if valid for the platform. LoadedModule *GetLinker(); +// Return true if LSan has finished leak checking and reported leaks. +bool HasReportedLeaks(); + +// Run platform-specific leak handlers. +void HandleLeaks(); + // Wrapper for chunk metadata operations. class LsanMetadata { public: // Constructor accepts address of user-visible chunk. explicit LsanMetadata(uptr chunk); bool allocated() const; ChunkTag tag() const; void set_tag(ChunkTag value); uptr requested_size() const; u32 stack_trace_id() const; private: void *metadata_; }; } // namespace __lsan extern "C" { SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int __lsan_is_turned_off(); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *__lsan_default_suppressions(); } // extern "C" #endif // LSAN_COMMON_H diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc index c903be42d1e7..5042c7b3ada5 100644 --- a/lib/lsan/lsan_common_linux.cc +++ b/lib/lsan/lsan_common_linux.cc @@ -1,125 +1,132 @@ //=-- lsan_common_linux.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Implementation of common leak checking functionality. Linux-specific code. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #include "lsan_common.h" #if CAN_SANITIZE_LEAKS && SANITIZER_LINUX #include #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_stackdepot.h" namespace __lsan { static const char kLinkerName[] = "ld"; static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64); static LoadedModule *linker = nullptr; static bool IsLinker(const char* full_name) { return LibraryNameIs(full_name, kLinkerName); } __attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter; bool DisabledInThisThread() { return disable_counter > 0; } void DisableInThisThread() { disable_counter++; } void EnableInThisThread() { if (disable_counter == 0) { DisableCounterUnderflow(); } disable_counter--; } void InitializePlatformSpecificModules() { ListOfModules modules; modules.init(); for (LoadedModule &module : modules) { if (!IsLinker(module.full_name())) continue; if (linker == nullptr) { linker = reinterpret_cast(linker_placeholder); *linker = module; module = LoadedModule(); } else { VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " "TLS will not be handled correctly.\n", kLinkerName); linker->clear(); linker = nullptr; return; } } if (linker == nullptr) { VReport(1, "LeakSanitizer: Dynamic linker not found. " "TLS will not be handled correctly.\n"); } } static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, void *data) { Frontier *frontier = reinterpret_cast(data); for (uptr j = 0; j < info->dlpi_phnum; j++) { const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); // We're looking for .data and .bss sections, which reside in writeable, // loadable segments. if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || (phdr->p_memsz == 0)) continue; uptr begin = info->dlpi_addr + phdr->p_vaddr; uptr end = begin + phdr->p_memsz; ScanGlobalRange(begin, end, frontier); } return 0; } // Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { if (!flags()->use_globals) return; dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); } LoadedModule *GetLinker() { return linker; } void ProcessPlatformSpecificAllocations(Frontier *frontier) {} struct DoStopTheWorldParam { StopTheWorldCallback callback; void *argument; }; +// While calling Die() here is undefined behavior and can potentially +// cause race conditions, it isn't possible to intercept exit on linux, +// so we have no choice but to call Die() from the atexit handler. +void HandleLeaks() { + if (common_flags()->exitcode) Die(); +} + static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size, void *data) { DoStopTheWorldParam *param = reinterpret_cast(data); StopTheWorld(param->callback, param->argument); return 1; } // LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one // of the threads is frozen while holding the libdl lock, the tracer will hang // in dl_iterate_phdr() forever. // Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the // tracer task and the thread that spawned it. Thus, if we run the tracer task // while holding the libdl lock in the parent thread, we can safely reenter it // in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() // callback in the parent thread. void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { DoStopTheWorldParam param = {callback, argument}; dl_iterate_phdr(DoStopTheWorldCallback, ¶m); } } // namespace __lsan #endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX diff --git a/lib/lsan/lsan_common_mac.cc b/lib/lsan/lsan_common_mac.cc index f87c6b7e0425..ade94340ae81 100644 --- a/lib/lsan/lsan_common_mac.cc +++ b/lib/lsan/lsan_common_mac.cc @@ -1,173 +1,178 @@ //=-- lsan_common_mac.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Implementation of common leak checking functionality. Darwin-specific code. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #include "lsan_common.h" #if CAN_SANITIZE_LEAKS && SANITIZER_MAC #include "sanitizer_common/sanitizer_allocator_internal.h" #include "lsan_allocator.h" #include #include namespace __lsan { typedef struct { int disable_counter; u32 current_thread_id; AllocatorCache cache; } thread_local_data_t; static pthread_key_t key; static pthread_once_t key_once = PTHREAD_ONCE_INIT; // The main thread destructor requires the current thread id, // so we can't destroy it until it's been used and reset to invalid tid void restore_tid_data(void *ptr) { thread_local_data_t *data = (thread_local_data_t *)ptr; if (data->current_thread_id != kInvalidTid) pthread_setspecific(key, data); } static void make_tls_key() { CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); } static thread_local_data_t *get_tls_val(bool alloc) { pthread_once(&key_once, make_tls_key); thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); if (ptr == NULL && alloc) { ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); ptr->disable_counter = 0; ptr->current_thread_id = kInvalidTid; ptr->cache = AllocatorCache(); pthread_setspecific(key, ptr); } return ptr; } bool DisabledInThisThread() { thread_local_data_t *data = get_tls_val(false); return data ? data->disable_counter > 0 : false; } void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } void EnableInThisThread() { int *disable_counter = &get_tls_val(true)->disable_counter; if (*disable_counter == 0) { DisableCounterUnderflow(); } --*disable_counter; } u32 GetCurrentThread() { thread_local_data_t *data = get_tls_val(false); return data ? data->current_thread_id : kInvalidTid; } void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } LoadedModule *GetLinker() { return nullptr; } // Required on Linux for initialization of TLS behavior, but should not be // required on Darwin. void InitializePlatformSpecificModules() {} // Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { MemoryMappingLayout memory_mapping(false); InternalMmapVector modules(/*initial_capacity*/ 128); memory_mapping.DumpListOfModules(&modules); for (uptr i = 0; i < modules.size(); ++i) { // Even when global scanning is disabled, we still need to scan // system libraries for stashed pointers if (!flags()->use_globals && modules[i].instrumented()) continue; for (const __sanitizer::LoadedModule::AddressRange &range : modules[i].ranges()) { // Sections storing global variables are writable and non-executable if (range.executable || !range.writable) continue; ScanGlobalRange(range.beg, range.end, frontier); } } } void ProcessPlatformSpecificAllocations(Frontier *frontier) { mach_port_name_t port; if (task_for_pid(mach_task_self(), internal_getpid(), &port) != KERN_SUCCESS) { return; } unsigned depth = 1; vm_size_t size = 0; vm_address_t address = 0; kern_return_t err = KERN_SUCCESS; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; InternalMmapVector const *root_regions = GetRootRegions(); while (err == KERN_SUCCESS) { struct vm_region_submap_info_64 info; err = vm_region_recurse_64(port, &address, &size, &depth, (vm_region_info_t)&info, &count); uptr end_address = address + size; // libxpc stashes some pointers in the Kernel Alloc Once page, // make sure not to report those as leaks. if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { ScanRangeForPointers(address, end_address, frontier, "GLOBAL", kReachable); // Recursing over the full memory map is very slow, break out // early if we don't need the full iteration. if (!flags()->use_root_regions || !root_regions->size()) break; } // This additional root region scan is required on Darwin in order to // detect root regions contained within mmap'd memory regions, because // the Darwin implementation of sanitizer_procmaps traverses images // as loaded by dyld, and not the complete set of all memory regions. // // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same // behavior as sanitizer_procmaps_linux and traverses all memory regions if (flags()->use_root_regions) { for (uptr i = 0; i < root_regions->size(); i++) { ScanRootRegion(frontier, (*root_regions)[i], address, end_address, info.protection & kProtectionRead); } } address = end_address; } } +// On darwin, we can intercept _exit gracefully, and return a failing exit code +// if required at that point. Calling Die() here is undefined behavior and +// causes rare race conditions. +void HandleLeaks() {} + void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { StopTheWorld(callback, argument); } } // namespace __lsan #endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc index 7d514402ad4b..168868b012bc 100644 --- a/lib/lsan/lsan_interceptors.cc +++ b/lib/lsan/lsan_interceptors.cc @@ -1,381 +1,387 @@ //=-- lsan_interceptors.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Interceptors for standalone LSan. // //===----------------------------------------------------------------------===// #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "lsan.h" #include "lsan_allocator.h" #include "lsan_common.h" #include "lsan_thread.h" #include using namespace __lsan; extern "C" { int pthread_attr_init(void *attr); int pthread_attr_destroy(void *attr); int pthread_attr_getdetachstate(void *attr, int *v); int pthread_key_create(unsigned *key, void (*destructor)(void* v)); int pthread_setspecific(unsigned key, const void *v); } ///// Malloc/free interceptors. ///// namespace std { struct nothrow_t; } #if !SANITIZER_MAC INTERCEPTOR(void*, malloc, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_malloc(size, stack); } INTERCEPTOR(void, free, void *p) { ENSURE_LSAN_INITED; lsan_free(p); } INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { if (lsan_init_is_running) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. const uptr kCallocPoolSize = 1024; static uptr calloc_memory_for_dlsym[kCallocPoolSize]; static uptr allocated; uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; void *mem = (void*)&calloc_memory_for_dlsym[allocated]; allocated += size_in_words; CHECK(allocated < kCallocPoolSize); return mem; } ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_calloc(nmemb, size, stack); } INTERCEPTOR(void*, realloc, void *q, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_realloc(q, size, stack); } INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; *memptr = lsan_memalign(alignment, size, stack); // FIXME: Return ENOMEM if user requested more than max alloc size. return 0; } INTERCEPTOR(void*, valloc, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_valloc(size, stack); } #endif #if SANITIZER_INTERCEPT_MEMALIGN INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_memalign(alignment, size, stack); } #define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; void *res = lsan_memalign(alignment, size, stack); DTLS_on_libc_memalign(res, size); return res; } #define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign) #else #define LSAN_MAYBE_INTERCEPT_MEMALIGN #define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN #endif // SANITIZER_INTERCEPT_MEMALIGN #if SANITIZER_INTERCEPT_ALIGNED_ALLOC INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_memalign(alignment, size, stack); } #define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) #else #define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC #endif #if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { ENSURE_LSAN_INITED; return GetMallocUsableSize(ptr); } #define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \ INTERCEPT_FUNCTION(malloc_usable_size) #else #define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE #endif #if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO struct fake_mallinfo { int x[10]; }; INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { struct fake_mallinfo res; internal_memset(&res, 0, sizeof(res)); return res; } #define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo) INTERCEPTOR(int, mallopt, int cmd, int value) { return -1; } #define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt) #else #define LSAN_MAYBE_INTERCEPT_MALLINFO #define LSAN_MAYBE_INTERCEPT_MALLOPT #endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO #if SANITIZER_INTERCEPT_PVALLOC INTERCEPTOR(void*, pvalloc, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; uptr PageSize = GetPageSizeCached(); size = RoundUpTo(size, PageSize); if (size == 0) { // pvalloc(0) should allocate one page. size = PageSize; } return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); } #define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else #define LSAN_MAYBE_INTERCEPT_PVALLOC #endif // SANITIZER_INTERCEPT_PVALLOC #if SANITIZER_INTERCEPT_CFREE INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free)); #define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) #else #define LSAN_MAYBE_INTERCEPT_CFREE #endif // SANITIZER_INTERCEPT_CFREE #if SANITIZER_INTERCEPT_MCHECK_MPROBE INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) { return 0; } INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) { return 0; } INTERCEPTOR(int, mprobe, void *ptr) { return 0; } #endif // SANITIZER_INTERCEPT_MCHECK_MPROBE // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. #define OPERATOR_NEW_BODY(nothrow) \ ENSURE_LSAN_INITED; \ GET_STACK_TRACE_MALLOC; \ void *res = Allocate(stack, size, 1, kAlwaysClearMemory);\ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ return res; #define OPERATOR_DELETE_BODY \ ENSURE_LSAN_INITED; \ Deallocate(ptr); // On OS X it's not enough to just provide our own 'operator new' and // 'operator delete' implementations, because they're going to be in the runtime // dylib, and the main executable will depend on both the runtime dylib and // libstdc++, each of has its implementation of new and delete. // To make sure that C++ allocation/deallocation operators are overridden on // OS X we need to intercept them using their mangled names. #if !SANITIZER_MAC INTERCEPTOR_ATTRIBUTE void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete[](void *ptr, std::nothrow_t const &) { OPERATOR_DELETE_BODY; } #else // SANITIZER_MAC INTERCEPTOR(void *, _Znwm, size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR(void *, _Znam, size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } INTERCEPTOR(void, _ZdlPv, void *ptr) { OPERATOR_DELETE_BODY; } INTERCEPTOR(void, _ZdaPv, void *ptr) { OPERATOR_DELETE_BODY; } INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } #endif // !SANITIZER_MAC ///// Thread initialization and finalization. ///// static unsigned g_thread_finalize_key; static void thread_finalize(void *v) { uptr iter = (uptr)v; if (iter > 1) { if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { Report("LeakSanitizer: failed to set thread key.\n"); Die(); } return; } ThreadFinish(); } struct ThreadParam { void *(*callback)(void *arg); void *param; atomic_uintptr_t tid; }; extern "C" void *__lsan_thread_start_func(void *arg) { ThreadParam *p = (ThreadParam*)arg; void* (*callback)(void *arg) = p->callback; void *param = p->param; // Wait until the last iteration to maximize the chance that we are the last // destructor to run. if (pthread_setspecific(g_thread_finalize_key, (void*)GetPthreadDestructorIterations())) { Report("LeakSanitizer: failed to set thread key.\n"); Die(); } int tid = 0; while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) internal_sched_yield(); SetCurrentThread(tid); ThreadStart(tid, GetTid()); atomic_store(&p->tid, 0, memory_order_release); return callback(param); } INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void *), void *param) { ENSURE_LSAN_INITED; EnsureMainThreadIDIsCorrect(); __sanitizer_pthread_attr_t myattr; if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } AdjustStackSize(attr); int detached = 0; pthread_attr_getdetachstate(attr, &detached); ThreadParam p; p.callback = callback; p.param = param; atomic_store(&p.tid, 0, memory_order_relaxed); int res; { // Ignore all allocations made by pthread_create: thread stack/TLS may be // stored by pthread for future reuse even after thread destruction, and // the linked list it's stored in doesn't even hold valid pointers to the // objects, the latter are calculated by obscure pointer arithmetic. ScopedInterceptorDisabler disabler; res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); } if (res == 0) { int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, IsStateDetached(detached)); CHECK_NE(tid, 0); atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) internal_sched_yield(); } if (attr == &myattr) pthread_attr_destroy(&myattr); return res; } INTERCEPTOR(int, pthread_join, void *th, void **ret) { ENSURE_LSAN_INITED; int tid = ThreadTid((uptr)th); int res = REAL(pthread_join)(th, ret); if (res == 0) ThreadJoin(tid); return res; } +INTERCEPTOR(void, _exit, int status) { + if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode; + REAL(_exit)(status); +} + namespace __lsan { void InitializeInterceptors() { INTERCEPT_FUNCTION(malloc); INTERCEPT_FUNCTION(free); LSAN_MAYBE_INTERCEPT_CFREE; INTERCEPT_FUNCTION(calloc); INTERCEPT_FUNCTION(realloc); LSAN_MAYBE_INTERCEPT_MEMALIGN; LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN; LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC; INTERCEPT_FUNCTION(posix_memalign); INTERCEPT_FUNCTION(valloc); LSAN_MAYBE_INTERCEPT_PVALLOC; LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE; LSAN_MAYBE_INTERCEPT_MALLINFO; LSAN_MAYBE_INTERCEPT_MALLOPT; INTERCEPT_FUNCTION(pthread_create); INTERCEPT_FUNCTION(pthread_join); + INTERCEPT_FUNCTION(_exit); if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Report("LeakSanitizer: failed to create thread key.\n"); Die(); } } } // namespace __lsan diff --git a/lib/msan/msan.h b/lib/msan/msan.h index 0709260eebe2..fa9c15b88bef 100644 --- a/lib/msan/msan.h +++ b/lib/msan/msan.h @@ -1,390 +1,398 @@ //===-- msan.h --------------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of MemorySanitizer. // // Private MSan header. //===----------------------------------------------------------------------===// #ifndef MSAN_H #define MSAN_H #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "msan_interface_internal.h" #include "msan_flags.h" #include "ubsan/ubsan_platform.h" #ifndef MSAN_REPLACE_OPERATORS_NEW_AND_DELETE # define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1 #endif #ifndef MSAN_CONTAINS_UBSAN # define MSAN_CONTAINS_UBSAN CAN_SANITIZE_UB #endif struct MappingDesc { uptr start; uptr end; enum Type { INVALID, APP, SHADOW, ORIGIN } type; const char *name; }; #if SANITIZER_LINUX && defined(__mips64) // MIPS64 maps: // - 0x0000000000-0x0200000000: Program own segments // - 0xa200000000-0xc000000000: PIE program segments // - 0xe200000000-0xffffffffff: libraries segments. const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"}, {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"}, {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"}, {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"}, {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"}, {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"}, {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"}, {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"}, {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"}, {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"}, {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"}, {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"}, {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}}; #define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) #define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) #elif SANITIZER_LINUX && defined(__aarch64__) // The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64 // maps: // - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments // - 0x0005500000000-0x0005600000000: 39-bits PIE program segments // - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments // - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments // - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments // - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments // - 0xffff000000000-0x1000000000000: 48-bits libraries segments // It is fragmented in multiples segments to increase the memory available // on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for // 39 bits). The 48-bits segments only cover the usual PIE/default segments // plus some more segments (262144GB total, 0.39% total VMA). const MappingDesc kMemoryLayout[] = { {0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"}, {0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"}, {0x02000000000ULL, 0x03000000000ULL, MappingDesc::ORIGIN, "origin-2"}, {0x03000000000ULL, 0x04000000000ULL, MappingDesc::SHADOW, "shadow-1"}, {0x04000000000ULL, 0x05000000000ULL, MappingDesc::ORIGIN, "origin-1"}, {0x05000000000ULL, 0x06000000000ULL, MappingDesc::APP, "app-1"}, {0x06000000000ULL, 0x07000000000ULL, MappingDesc::INVALID, "invalid"}, {0x07000000000ULL, 0x08000000000ULL, MappingDesc::APP, "app-2"}, {0x08000000000ULL, 0x09000000000ULL, MappingDesc::INVALID, "invalid"}, // The mappings below are used only for 42-bits VMA. {0x09000000000ULL, 0x0A000000000ULL, MappingDesc::SHADOW, "shadow-3"}, {0x0A000000000ULL, 0x0B000000000ULL, MappingDesc::ORIGIN, "origin-3"}, {0x0B000000000ULL, 0x0F000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0F000000000ULL, 0x10000000000ULL, MappingDesc::APP, "app-3"}, {0x10000000000ULL, 0x11000000000ULL, MappingDesc::INVALID, "invalid"}, {0x11000000000ULL, 0x12000000000ULL, MappingDesc::APP, "app-4"}, {0x12000000000ULL, 0x17000000000ULL, MappingDesc::INVALID, "invalid"}, {0x17000000000ULL, 0x18000000000ULL, MappingDesc::SHADOW, "shadow-4"}, {0x18000000000ULL, 0x19000000000ULL, MappingDesc::ORIGIN, "origin-4"}, {0x19000000000ULL, 0x20000000000ULL, MappingDesc::INVALID, "invalid"}, {0x20000000000ULL, 0x21000000000ULL, MappingDesc::APP, "app-5"}, {0x21000000000ULL, 0x26000000000ULL, MappingDesc::INVALID, "invalid"}, {0x26000000000ULL, 0x27000000000ULL, MappingDesc::SHADOW, "shadow-5"}, {0x27000000000ULL, 0x28000000000ULL, MappingDesc::ORIGIN, "origin-5"}, {0x28000000000ULL, 0x29000000000ULL, MappingDesc::SHADOW, "shadow-7"}, {0x29000000000ULL, 0x2A000000000ULL, MappingDesc::ORIGIN, "origin-7"}, {0x2A000000000ULL, 0x2B000000000ULL, MappingDesc::APP, "app-6"}, {0x2B000000000ULL, 0x2C000000000ULL, MappingDesc::INVALID, "invalid"}, {0x2C000000000ULL, 0x2D000000000ULL, MappingDesc::SHADOW, "shadow-6"}, {0x2D000000000ULL, 0x2E000000000ULL, MappingDesc::ORIGIN, "origin-6"}, {0x2E000000000ULL, 0x2F000000000ULL, MappingDesc::APP, "app-7"}, {0x2F000000000ULL, 0x39000000000ULL, MappingDesc::INVALID, "invalid"}, {0x39000000000ULL, 0x3A000000000ULL, MappingDesc::SHADOW, "shadow-9"}, {0x3A000000000ULL, 0x3B000000000ULL, MappingDesc::ORIGIN, "origin-9"}, {0x3B000000000ULL, 0x3C000000000ULL, MappingDesc::APP, "app-8"}, {0x3C000000000ULL, 0x3D000000000ULL, MappingDesc::INVALID, "invalid"}, {0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"}, {0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"}, {0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"}, // The mappings below are used only for 48-bits VMA. // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE // segments and some more segments totalizing 262144GB of VMA (which cover // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by // adding multiple application segments like 39 and 42 mapping. {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"}, {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"}, {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"}, {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"}, {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"}, {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"}, {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"}, {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"}, {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"}, {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"}, {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"}, {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"}, {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"}, {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"}, {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"}, {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"}, {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"}, {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"}, {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"}, {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"}, {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"}, {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"}, {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"}, {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"}, {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"}, {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"}, }; # define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL) # define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL) #elif SANITIZER_LINUX && SANITIZER_PPC64 const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x000100000000ULL, MappingDesc::APP, "low memory"}, {0x000100000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"}, {0x080000000000ULL, 0x180100000000ULL, MappingDesc::SHADOW, "shadow"}, {0x180100000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"}, {0x1C0000000000ULL, 0x2C0100000000ULL, MappingDesc::ORIGIN, "origin"}, {0x2C0100000000ULL, 0x300000000000ULL, MappingDesc::INVALID, "invalid"}, {0x300000000000ULL, 0x400000000000ULL, MappingDesc::APP, "high memory"}}; // Maps low and high app ranges to contiguous space with zero base: // Low: 0000 0000 0000 - 0000 ffff ffff -> 1000 0000 0000 - 1000 ffff ffff // High: 3000 0000 0000 - 3fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff #define LINEARIZE_MEM(mem) \ (((uptr)(mem) & ~0x200000000000ULL) ^ 0x100000000000ULL) #define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x080000000000ULL) #define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL) #elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64 // Low memory: main binary, MAP_32BIT mappings and modules // High memory: heap, modules and main thread stack const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"}, {0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"}, {0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"}, {0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"}, {0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"}, {0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"}, {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}}; // Maps low and high app ranges to contiguous space with zero base: // Low: 0000 0000 0000 - 00ff ffff ffff -> 2000 0000 0000 - 20ff ffff ffff // High: 6000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 1fff ffff ffff #define LINEARIZE_MEM(mem) \ (((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL) #define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL) #define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000) #elif SANITIZER_LINUX && SANITIZER_WORDSIZE == 64 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING // Requries PIE binary and ASLR enabled. // Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000). // Heap at 0x600000000000. const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"}, {0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"}, {0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"}, {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}}; #define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL) #define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL) #else // MSAN_LINUX_X86_64_OLD_MAPPING // All of the following configurations are supported. // ASLR disabled: main executable and DSOs at 0x555550000000 // PIE and ASLR: main executable and DSOs at 0x7f0000000000 // non-PIE: main executable below 0x100000000, DSOs at 0x7f0000000000 // Heap at 0x700000000000. const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"}, {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"}, {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"}, {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"}, {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"}, {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"}, {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"}, {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"}, {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"}, {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"}, {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"}, {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}}; #define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL) #define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL) #endif // MSAN_LINUX_X86_64_OLD_MAPPING #else #error "Unsupported platform" #endif const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]); #define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem)))) #ifndef __clang__ __attribute__((optimize("unroll-loops"))) #endif inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) { // It is critical for performance that this loop is unrolled (because then it is // simplified into just a few constant comparisons). #ifdef __clang__ #pragma unroll #endif for (unsigned i = 0; i < kMemoryLayoutSize; ++i) if (kMemoryLayout[i].type == mapping_type && addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end) return true; return false; } #define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP) #define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW) #define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN) // These constants must be kept in sync with the ones in MemorySanitizer.cc. const int kMsanParamTlsSize = 800; const int kMsanRetvalTlsSize = 800; namespace __msan { extern int msan_inited; extern bool msan_init_is_running; extern int msan_report_count; bool ProtectRange(uptr beg, uptr end); bool InitShadow(bool init_origins); char *GetProcSelfMaps(); void InitializeInterceptors(); void MsanAllocatorInit(); void MsanAllocatorThreadFinish(); -void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size); -void *MsanReallocate(StackTrace *stack, void *oldp, uptr size, - uptr alignment, bool zeroise); void MsanDeallocate(StackTrace *stack, void *ptr); + +void *msan_malloc(uptr size, StackTrace *stack); +void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack); +void *msan_realloc(void *ptr, uptr size, StackTrace *stack); +void *msan_valloc(uptr size, StackTrace *stack); +void *msan_pvalloc(uptr size, StackTrace *stack); +void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack); +void *msan_memalign(uptr alignment, uptr size, StackTrace *stack); +int msan_posix_memalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack); + void InstallTrapHandler(); void InstallAtExitHandler(); const char *GetStackOriginDescr(u32 id, uptr *pc); void EnterSymbolizer(); void ExitSymbolizer(); bool IsInSymbolizer(); struct SymbolizerScope { SymbolizerScope() { EnterSymbolizer(); } ~SymbolizerScope() { ExitSymbolizer(); } }; void PrintWarning(uptr pc, uptr bp); void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin); void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, bool request_fast_unwind); void ReportUMR(StackTrace *stack, u32 origin); void ReportExpectedUMRNotFound(StackTrace *stack); void ReportStats(); void ReportAtExitStatistics(); void DescribeMemoryRange(const void *x, uptr size); void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size, uptr offset); // Unpoison first n function arguments. void UnpoisonParam(uptr n); void UnpoisonThreadLocalState(); // Returns a "chained" origin id, pointing to the given stack trace followed by // the previous origin id. u32 ChainOrigin(u32 id, StackTrace *stack); const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; #define GET_MALLOC_STACK_TRACE \ BufferedStackTrace stack; \ if (__msan_get_track_origins() && msan_inited) \ GetStackTrace(&stack, common_flags()->malloc_context_size, \ StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ common_flags()->fast_unwind_on_malloc) // For platforms which support slow unwinder only, we restrict the store context // size to 1, basically only storing the current pc. We do this because the slow // unwinder which is based on libunwind is not async signal safe and causes // random freezes in forking applications as well as in signal handlers. #define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ BufferedStackTrace stack; \ if (__msan_get_track_origins() > 1 && msan_inited) { \ if (!SANITIZER_CAN_FAST_UNWIND) \ GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \ false); \ else \ GetStackTrace(&stack, flags()->store_context_size, pc, bp, \ common_flags()->fast_unwind_on_malloc); \ } #define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ BufferedStackTrace stack; \ if (msan_inited) \ GetStackTrace(&stack, kStackTraceMax, pc, bp, \ common_flags()->fast_unwind_on_fatal) #define GET_STORE_STACK_TRACE \ GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) class ScopedThreadLocalStateBackup { public: ScopedThreadLocalStateBackup() { Backup(); } ~ScopedThreadLocalStateBackup() { Restore(); } void Backup(); void Restore(); private: u64 va_arg_overflow_size_tls; }; void MsanTSDInit(void (*destructor)(void *tsd)); void *MsanTSDGet(); void MsanTSDSet(void *tsd); void MsanTSDDtor(void *tsd); } // namespace __msan #define MSAN_MALLOC_HOOK(ptr, size) \ do { \ if (&__sanitizer_malloc_hook) { \ UnpoisonParam(2); \ __sanitizer_malloc_hook(ptr, size); \ } \ RunMallocHooks(ptr, size); \ } while (false) #define MSAN_FREE_HOOK(ptr) \ do { \ if (&__sanitizer_free_hook) { \ UnpoisonParam(1); \ __sanitizer_free_hook(ptr); \ } \ RunFreeHooks(ptr); \ } while (false) #endif // MSAN_H diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index a92b7fd12f92..1034dbdf9b55 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -1,270 +1,317 @@ //===-- msan_allocator.cc --------------------------- ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of MemorySanitizer. // // MemorySanitizer allocator. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" #include "msan.h" #include "msan_allocator.h" #include "msan_origin.h" #include "msan_thread.h" #include "msan_poisoning.h" namespace __msan { struct Metadata { uptr requested_size; }; struct MsanMapUnmapCallback { void OnMap(uptr p, uptr size) const {} void OnUnmap(uptr p, uptr size) const { __msan_unpoison((void *)p, size); // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. uptr shadow_p = MEM_TO_SHADOW(p); ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); if (__msan_get_track_origins()) { uptr origin_p = MEM_TO_ORIGIN(p); ReleaseMemoryPagesToOS(origin_p, origin_p + size); } } }; #if defined(__mips64) static const uptr kMaxAllowedMallocSize = 2UL << 30; static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = sizeof(Metadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __msan::kRegionSizeLog; typedef __msan::ByteMap ByteMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator32 PrimaryAllocator; #elif defined(__x86_64__) #if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING) static const uptr kAllocatorSpace = 0x700000000000ULL; #else static const uptr kAllocatorSpace = 0x600000000000ULL; #endif static const uptr kMaxAllowedMallocSize = 8UL << 30; struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = 0x40000000000; // 4T. static const uptr kMetadataSize = sizeof(Metadata); typedef DefaultSizeClassMap SizeClassMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator64 PrimaryAllocator; #elif defined(__powerpc64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = 0x300000000000; static const uptr kSpaceSize = 0x020000000000; // 2T. static const uptr kMetadataSize = sizeof(Metadata); typedef DefaultSizeClassMap SizeClassMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator64 PrimaryAllocator; #elif defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = sizeof(Metadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __msan::kRegionSizeLog; typedef __msan::ByteMap ByteMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator32 PrimaryAllocator; #endif typedef SizeClassAllocatorLocalCache AllocatorCache; typedef LargeMmapAllocator SecondaryAllocator; typedef CombinedAllocator Allocator; static Allocator allocator; static AllocatorCache fallback_allocator_cache; static SpinMutex fallback_mutex; void MsanAllocatorInit() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator.Init(common_flags()->allocator_release_to_os_interval_ms); } AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { CHECK(ms); CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); return reinterpret_cast(ms->allocator_cache); } void MsanThreadLocalMallocStorage::CommitBack() { allocator.SwallowCache(GetAllocatorCache(this)); } static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, bool zeroise) { if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); return Allocator::FailureHandler::OnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocated = allocator.Allocate(cache, size, alignment); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->requested_size = size; if (zeroise) { __msan_clear_and_unpoison(allocated, size); } else if (flags()->poison_in_malloc) { __msan_poison(allocated, size); if (__msan_get_track_origins()) { stack->tag = StackTrace::TAG_ALLOC; Origin o = Origin::CreateHeapOrigin(stack); __msan_set_origin(allocated, size, o.raw_id()); } } MSAN_MALLOC_HOOK(allocated, size); return allocated; } void MsanDeallocate(StackTrace *stack, void *p) { CHECK(p); MSAN_FREE_HOOK(p); Metadata *meta = reinterpret_cast(allocator.GetMetaData(p)); uptr size = meta->requested_size; meta->requested_size = 0; // This memory will not be reused by anyone else, so we are free to keep it // poisoned. if (flags()->poison_in_free) { __msan_poison(p, size); if (__msan_get_track_origins()) { stack->tag = StackTrace::TAG_DEALLOC; Origin o = Origin::CreateHeapOrigin(stack); __msan_set_origin(p, size, o.raw_id()); } } MsanThread *t = GetCurrentThread(); if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocator.Deallocate(cache, p); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocator.Deallocate(cache, p); } } -void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { - if (CheckForCallocOverflow(size, nmemb)) - return Allocator::FailureHandler::OnBadRequest(); - return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); -} - void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, - uptr alignment, bool zeroise) { - if (!old_p) - return MsanAllocate(stack, new_size, alignment, zeroise); - if (!new_size) { - MsanDeallocate(stack, old_p); - return nullptr; - } + uptr alignment) { Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); uptr old_size = meta->requested_size; uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); if (new_size <= actually_allocated_size) { // We are not reallocating here. meta->requested_size = new_size; if (new_size > old_size) { - if (zeroise) { - __msan_clear_and_unpoison((char *)old_p + old_size, - new_size - old_size); - } else if (flags()->poison_in_malloc) { + if (flags()->poison_in_malloc) { stack->tag = StackTrace::TAG_ALLOC; PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); } } return old_p; } uptr memcpy_size = Min(new_size, old_size); - void *new_p = MsanAllocate(stack, new_size, alignment, zeroise); - // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size); + void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/); if (new_p) { CopyMemory(new_p, old_p, memcpy_size, stack); MsanDeallocate(stack, old_p); } return new_p; } static uptr AllocationSize(const void *p) { if (!p) return 0; const void *beg = allocator.GetBlockBegin(p); if (beg != p) return 0; Metadata *b = (Metadata *)allocator.GetMetaData(p); return b->requested_size; } +void *msan_malloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); +} + +void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) + return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true)); +} + +void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { + if (!ptr) + return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); + if (size == 0) { + MsanDeallocate(stack, ptr); + return nullptr; + } + return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64))); +} + +void *msan_valloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false)); +} + +void *msan_pvalloc(uptr size, StackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + // pvalloc(0) should allocate one page. + size = size == 0 ? PageSize : RoundUpTo(size, PageSize); + return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false)); +} + +void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); +} + +void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); +} + +int msan_posix_memalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + Allocator::FailureHandler::OnBadRequest(); + return errno_EINVAL; + } + void *ptr = MsanAllocate(stack, size, alignment, false); + if (UNLIKELY(!ptr)) + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + } // namespace __msan using namespace __msan; uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; allocator.GetStats(stats); return stats[AllocatorStatAllocated]; } uptr __sanitizer_get_heap_size() { uptr stats[AllocatorStatCount]; allocator.GetStats(stats); return stats[AllocatorStatMapped]; } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index 069777c7f5e7..b5d22baca08d 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -1,1587 +1,1574 @@ //===-- msan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of MemorySanitizer. // // Interceptors for standard library functions. // // FIXME: move as many interceptors as possible into // sanitizer_common/sanitizer_common_interceptors.h //===----------------------------------------------------------------------===// #include "interception/interception.h" #include "msan.h" #include "msan_chained_origin_depot.h" #include "msan_origin.h" #include "msan_thread.h" #include "msan_poisoning.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include // ACHTUNG! No other system header includes in this file. // Ideally, we should get rid of stdarg.h as well. using namespace __msan; using __sanitizer::memory_order; using __sanitizer::atomic_load; using __sanitizer::atomic_store; using __sanitizer::atomic_uintptr_t; DECLARE_REAL(SIZE_T, strlen, const char *s) DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen) DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n) DECLARE_REAL(void *, memset, void *dest, int c, uptr n) // True if this is a nested interceptor. static THREADLOCAL int in_interceptor_scope; struct InterceptorScope { InterceptorScope() { ++in_interceptor_scope; } ~InterceptorScope() { --in_interceptor_scope; } }; bool IsInInterceptorScope() { return in_interceptor_scope; } static uptr allocated_for_dlsym; static const uptr kDlsymAllocPoolSize = 1024; static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; static bool IsInDlsymAllocPool(const void *ptr) { uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; return off < sizeof(alloc_memory_for_dlsym); } static void *AllocateFromLocalPool(uptr size_in_bytes) { uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; allocated_for_dlsym += size_in_words; CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); return mem; } #define ENSURE_MSAN_INITED() do { \ CHECK(!msan_init_is_running); \ if (!msan_inited) { \ __msan_init(); \ } \ } while (0) // Check that [x, x+n) range is unpoisoned. #define CHECK_UNPOISONED_0(x, n) \ do { \ sptr offset = __msan_test_shadow(x, n); \ if (__msan::IsInSymbolizer()) \ break; \ if (offset >= 0 && __msan::flags()->report_umrs) { \ GET_CALLER_PC_BP_SP; \ (void) sp; \ ReportUMRInsideAddressRange(__func__, x, n, offset); \ __msan::PrintWarningWithOrigin( \ pc, bp, __msan_get_origin((const char *)x + offset)); \ if (__msan::flags()->halt_on_error) { \ Printf("Exiting\n"); \ Die(); \ } \ } \ } while (0) // Check that [x, x+n) range is unpoisoned unless we are in a nested // interceptor. #define CHECK_UNPOISONED(x, n) \ do { \ if (!IsInInterceptorScope()) CHECK_UNPOISONED_0(x, n); \ } while (0); #define CHECK_UNPOISONED_STRING_OF_LEN(x, len, n) \ CHECK_UNPOISONED((x), \ common_flags()->strict_string_checks ? (len) + 1 : (n) ) #define CHECK_UNPOISONED_STRING(x, n) \ CHECK_UNPOISONED_STRING_OF_LEN((x), internal_strlen(x), (n)) #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) { ENSURE_MSAN_INITED(); SIZE_T res = REAL(fread_unlocked)(ptr, size, nmemb, file); if (res > 0) __msan_unpoison(ptr, res *size); return res; } #define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED INTERCEPT_FUNCTION(fread_unlocked) #else #define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED #endif INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED_STRING(path, 0) SSIZE_T res = REAL(readlink)(path, buf, bufsiz); if (res > 0) __msan_unpoison(buf, res); return res; } INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) { return (char *)__msan_memcpy(dest, src, n) + n; } INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) { ENSURE_MSAN_INITED(); void *res = REAL(memccpy)(dest, src, c, n); CHECK(!res || (res >= dest && res <= (char *)dest + n)); SIZE_T sz = res ? (char *)res - (char *)dest : n; CHECK_UNPOISONED(src, sz); __msan_unpoison(dest, sz); return res; } INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) { return __msan_memmove(dest, src, n); } INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(alignment & (alignment - 1), 0); CHECK_NE(memptr, 0); - *memptr = MsanReallocate(&stack, nullptr, size, alignment, false); - CHECK_NE(*memptr, 0); - __msan_unpoison(memptr, sizeof(*memptr)); - return 0; + int res = msan_posix_memalign(memptr, alignment, size, &stack); + if (!res) + __msan_unpoison(memptr, sizeof(*memptr)); + return res; } #if !SANITIZER_FREEBSD -INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - return ptr; + return msan_memalign(alignment, size, &stack); } #define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) #else #define MSAN_MAYBE_INTERCEPT_MEMALIGN #endif -INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - return ptr; + return msan_aligned_alloc(alignment, size, &stack); } -INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - DTLS_on_libc_memalign(ptr, size); + void *ptr = msan_memalign(alignment, size, &stack); + if (ptr) + DTLS_on_libc_memalign(ptr, size); return ptr; } INTERCEPTOR(void *, valloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; - void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false); - return ptr; + return msan_valloc(size, &stack); } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, pvalloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false); - return ptr; + return msan_pvalloc(size, &stack); } #define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else #define MSAN_MAYBE_INTERCEPT_PVALLOC #endif INTERCEPTOR(void, free, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #if !SANITIZER_FREEBSD INTERCEPTOR(void, cfree, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) #else #define MSAN_MAYBE_INTERCEPT_CFREE #endif INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { return __sanitizer_get_allocated_size(ptr); } #if !SANITIZER_FREEBSD // This function actually returns a struct by value, but we can't unpoison a // temporary! The following is equivalent on all supported platforms but // aarch64 (which uses a different register for sret value). We have a test // to confirm that. INTERCEPTOR(void, mallinfo, __sanitizer_mallinfo *sret) { #ifdef __aarch64__ uptr r8; asm volatile("mov %0,x8" : "=r" (r8)); sret = reinterpret_cast<__sanitizer_mallinfo*>(r8); #endif REAL(memset)(sret, 0, sizeof(*sret)); __msan_unpoison(sret, sizeof(*sret)); } #define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo) #else #define MSAN_MAYBE_INTERCEPT_MALLINFO #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, mallopt, int cmd, int value) { return -1; } #define MSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt) #else #define MSAN_MAYBE_INTERCEPT_MALLOPT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(void, malloc_stats, void) { // FIXME: implement, but don't call REAL(malloc_stats)! } #define MSAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats) #else #define MSAN_MAYBE_INTERCEPT_MALLOC_STATS #endif INTERCEPTOR(char *, strcpy, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(strcpy)(dest, src); // NOLINT CopyShadowAndOrigin(dest, src, n + 1, &stack); return res; } INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T copy_size = REAL(strnlen)(src, n); if (copy_size < n) copy_size++; // trailing \0 char *res = REAL(strncpy)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest, src, copy_size, &stack); __msan_unpoison(dest + copy_size, n - copy_size); return res; } INTERCEPTOR(char *, stpcpy, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(stpcpy)(dest, src); // NOLINT CopyShadowAndOrigin(dest, src, n + 1, &stack); return res; } INTERCEPTOR(char *, strdup, char *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; // On FreeBSD strdup() leverages strlen(). InterceptorScope interceptor_scope; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(strdup)(src); CopyShadowAndOrigin(res, src, n + 1, &stack); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(char *, __strdup, char *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(__strdup)(src); CopyShadowAndOrigin(res, src, n + 1, &stack); return res; } #define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup) #else #define MSAN_MAYBE_INTERCEPT___STRDUP #endif INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) { ENSURE_MSAN_INITED(); char *res = REAL(gcvt)(number, ndigit, buf); SIZE_T n = REAL(strlen)(buf); __msan_unpoison(buf, n + 1); return res; } INTERCEPTOR(char *, strcat, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T src_size = REAL(strlen)(src); SIZE_T dest_size = REAL(strlen)(dest); CHECK_UNPOISONED_STRING(src + src_size, 0); CHECK_UNPOISONED_STRING(dest + dest_size, 0); char *res = REAL(strcat)(dest, src); // NOLINT CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack); return res; } INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T dest_size = REAL(strlen)(dest); SIZE_T copy_size = REAL(strnlen)(src, n); CHECK_UNPOISONED_STRING(dest + dest_size, 0); char *res = REAL(strncat)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack); __msan_unpoison(dest + dest_size + copy_size, 1); // \0 return res; } // Hack: always pass nptr and endptr as part of __VA_ARGS_ to avoid having to // deal with empty __VA_ARGS__ in the case of INTERCEPTOR_STRTO. #define INTERCEPTOR_STRTO_BODY(ret_type, func, ...) \ ENSURE_MSAN_INITED(); \ ret_type res = REAL(func)(__VA_ARGS__); \ __msan_unpoison(endptr, sizeof(*endptr)); \ return res; #define INTERCEPTOR_STRTO(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \ } #define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ int base) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base); \ } #define INTERCEPTOR_STRTO_LOC(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ void *loc) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \ } #define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ int base, void *loc) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base, loc); \ } #define INTERCEPTORS_STRTO(ret_type, func, char_type) \ INTERCEPTOR_STRTO(ret_type, func, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type) #define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_l, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_internal, char_type) INTERCEPTORS_STRTO(double, strtod, char) // NOLINT INTERCEPTORS_STRTO(float, strtof, char) // NOLINT INTERCEPTORS_STRTO(long double, strtold, char) // NOLINT INTERCEPTORS_STRTO_BASE(long, strtol, char) // NOLINT INTERCEPTORS_STRTO_BASE(long long, strtoll, char) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long long, strtoull, char) // NOLINT INTERCEPTORS_STRTO(double, wcstod, wchar_t) // NOLINT INTERCEPTORS_STRTO(float, wcstof, wchar_t) // NOLINT INTERCEPTORS_STRTO(long double, wcstold, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t) // NOLINT #define INTERCEPT_STRTO(func) \ INTERCEPT_FUNCTION(func); \ INTERCEPT_FUNCTION(func##_l); \ INTERCEPT_FUNCTION(__##func##_l); \ INTERCEPT_FUNCTION(__##func##_internal); // FIXME: support *wprintf in common format interceptors. INTERCEPTOR(int, vswprintf, void *str, uptr size, void *format, va_list ap) { ENSURE_MSAN_INITED(); int res = REAL(vswprintf)(str, size, format, ap); if (res >= 0) { __msan_unpoison(str, 4 * (res + 1)); } return res; } INTERCEPTOR(int, swprintf, void *str, uptr size, void *format, ...) { ENSURE_MSAN_INITED(); va_list ap; va_start(ap, format); int res = vswprintf(str, size, format, ap); va_end(ap); return res; } INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T n) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); SIZE_T res = REAL(strxfrm)(dest, src, n); if (res < n) __msan_unpoison(dest, res + 1); return res; } INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T n, void *loc) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); SIZE_T res = REAL(strxfrm_l)(dest, src, n, loc); if (res < n) __msan_unpoison(dest, res + 1); return res; } #define INTERCEPTOR_STRFTIME_BODY(char_type, ret_type, func, s, ...) \ ENSURE_MSAN_INITED(); \ ret_type res = REAL(func)(s, __VA_ARGS__); \ if (s) __msan_unpoison(s, sizeof(char_type) * (res + 1)); \ return res; INTERCEPTOR(SIZE_T, strftime, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime, s, max, format, tm); } INTERCEPTOR(SIZE_T, strftime_l, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime_l, s, max, format, tm, loc); } #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, __strftime_l, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, __strftime_l, s, max, format, tm, loc); } #define MSAN_MAYBE_INTERCEPT___STRFTIME_L INTERCEPT_FUNCTION(__strftime_l) #else #define MSAN_MAYBE_INTERCEPT___STRFTIME_L #endif INTERCEPTOR(SIZE_T, wcsftime, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime, s, max, format, tm); } INTERCEPTOR(SIZE_T, wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime_l, s, max, format, tm, loc); } #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, __wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, __wcsftime_l, s, max, format, tm, loc); } #define MSAN_MAYBE_INTERCEPT___WCSFTIME_L INTERCEPT_FUNCTION(__wcsftime_l) #else #define MSAN_MAYBE_INTERCEPT___WCSFTIME_L #endif INTERCEPTOR(int, mbtowc, wchar_t *dest, const char *src, SIZE_T n) { ENSURE_MSAN_INITED(); int res = REAL(mbtowc)(dest, src, n); if (res != -1 && dest) __msan_unpoison(dest, sizeof(wchar_t)); return res; } INTERCEPTOR(int, mbrtowc, wchar_t *dest, const char *src, SIZE_T n, void *ps) { ENSURE_MSAN_INITED(); SIZE_T res = REAL(mbrtowc)(dest, src, n, ps); if (res != (SIZE_T)-1 && dest) __msan_unpoison(dest, sizeof(wchar_t)); return res; } // wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n); INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmemcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(wchar_t *, wmempcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmempcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(wchar_t *, wmemset, wchar_t *s, wchar_t c, SIZE_T n) { CHECK(MEM_IS_APP(s)); ENSURE_MSAN_INITED(); wchar_t *res = REAL(wmemset)(s, c, n); __msan_unpoison(s, n * sizeof(wchar_t)); return res; } INTERCEPTOR(wchar_t *, wmemmove, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmemmove)(dest, src, n); MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(int, wcscmp, const wchar_t *s1, const wchar_t *s2) { ENSURE_MSAN_INITED(); int res = REAL(wcscmp)(s1, s2); return res; } INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { ENSURE_MSAN_INITED(); int res = REAL(gettimeofday)(tv, tz); if (tv) __msan_unpoison(tv, 16); if (tz) __msan_unpoison(tz, 8); return res; } INTERCEPTOR(char *, fcvt, double x, int a, int *b, int *c) { ENSURE_MSAN_INITED(); char *res = REAL(fcvt)(x, a, b, c); __msan_unpoison(b, sizeof(*b)); __msan_unpoison(c, sizeof(*c)); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } INTERCEPTOR(char *, getenv, char *name) { if (msan_init_is_running) return REAL(getenv)(name); ENSURE_MSAN_INITED(); char *res = REAL(getenv)(name); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } extern char **environ; static void UnpoisonEnviron() { char **envp = environ; for (; *envp; ++envp) { __msan_unpoison(envp, sizeof(*envp)); __msan_unpoison(*envp, REAL(strlen)(*envp) + 1); } // Trailing NULL pointer. __msan_unpoison(envp, sizeof(*envp)); } INTERCEPTOR(int, setenv, const char *name, const char *value, int overwrite) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED_STRING(name, 0) int res = REAL(setenv)(name, value, overwrite); if (!res) UnpoisonEnviron(); return res; } INTERCEPTOR(int, putenv, char *string) { ENSURE_MSAN_INITED(); int res = REAL(putenv)(string); if (!res) UnpoisonEnviron(); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat)(magic, fd, buf); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat) #else #define MSAN_MAYBE_INTERCEPT___FXSTAT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat64)(magic, fd, buf); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64) #else #define MSAN_MAYBE_INTERCEPT___FXSTAT64 #endif #if SANITIZER_FREEBSD INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(fstatat)(fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } # define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat) #else INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(__fxstatat)(magic, fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } # define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat) #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(__fxstatat64)(magic, fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64) #else #define MSAN_MAYBE_INTERCEPT___FXSTATAT64 #endif INTERCEPTOR(int, pipe, int pipefd[2]) { if (msan_init_is_running) return REAL(pipe)(pipefd); ENSURE_MSAN_INITED(); int res = REAL(pipe)(pipefd); if (!res) __msan_unpoison(pipefd, sizeof(int[2])); return res; } INTERCEPTOR(int, pipe2, int pipefd[2], int flags) { ENSURE_MSAN_INITED(); int res = REAL(pipe2)(pipefd, flags); if (!res) __msan_unpoison(pipefd, sizeof(int[2])); return res; } INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int sv[2]) { ENSURE_MSAN_INITED(); int res = REAL(socketpair)(domain, type, protocol, sv); if (!res) __msan_unpoison(sv, sizeof(int[2])); return res; } INTERCEPTOR(char *, fgets, char *s, int size, void *stream) { ENSURE_MSAN_INITED(); char *res = REAL(fgets)(s, size, stream); if (res) __msan_unpoison(s, REAL(strlen)(s) + 1); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) { ENSURE_MSAN_INITED(); char *res = REAL(fgets_unlocked)(s, size, stream); if (res) __msan_unpoison(s, REAL(strlen)(s) + 1); return res; } #define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked) #else #define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED #endif INTERCEPTOR(int, getrlimit, int resource, void *rlim) { if (msan_init_is_running) return REAL(getrlimit)(resource, rlim); ENSURE_MSAN_INITED(); int res = REAL(getrlimit)(resource, rlim); if (!res) __msan_unpoison(rlim, __sanitizer::struct_rlimit_sz); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, getrlimit64, int resource, void *rlim) { if (msan_init_is_running) return REAL(getrlimit64)(resource, rlim); ENSURE_MSAN_INITED(); int res = REAL(getrlimit64)(resource, rlim); if (!res) __msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz); return res; } INTERCEPTOR(int, prlimit, int pid, int resource, void *new_rlimit, void *old_rlimit) { if (msan_init_is_running) return REAL(prlimit)(pid, resource, new_rlimit, old_rlimit); ENSURE_MSAN_INITED(); CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit_sz); int res = REAL(prlimit)(pid, resource, new_rlimit, old_rlimit); if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit_sz); return res; } INTERCEPTOR(int, prlimit64, int pid, int resource, void *new_rlimit, void *old_rlimit) { if (msan_init_is_running) return REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit); ENSURE_MSAN_INITED(); CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit64_sz); int res = REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit); if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit64_sz); return res; } #define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 INTERCEPT_FUNCTION(getrlimit64) #define MSAN_MAYBE_INTERCEPT_PRLIMIT INTERCEPT_FUNCTION(prlimit) #define MSAN_MAYBE_INTERCEPT_PRLIMIT64 INTERCEPT_FUNCTION(prlimit64) #else #define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 #define MSAN_MAYBE_INTERCEPT_PRLIMIT #define MSAN_MAYBE_INTERCEPT_PRLIMIT64 #endif #if SANITIZER_FREEBSD // FreeBSD's define uname() as // static __inline int uname(struct utsname *name) { // return __xuname(SYS_NMLN, (void*)name); // } INTERCEPTOR(int, __xuname, int size, void *utsname) { ENSURE_MSAN_INITED(); int res = REAL(__xuname)(size, utsname); if (!res) __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); return res; } #define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname) #else INTERCEPTOR(int, uname, struct utsname *utsname) { ENSURE_MSAN_INITED(); int res = REAL(uname)(utsname); if (!res) __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); return res; } #define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname) #endif INTERCEPTOR(int, gethostname, char *name, SIZE_T len) { ENSURE_MSAN_INITED(); int res = REAL(gethostname)(name, len); if (!res) { SIZE_T real_len = REAL(strnlen)(name, len); if (real_len < len) ++real_len; __msan_unpoison(name, real_len); } return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents, int timeout) { ENSURE_MSAN_INITED(); int res = REAL(epoll_wait)(epfd, events, maxevents, timeout); if (res > 0) { __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } #define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT INTERCEPT_FUNCTION(epoll_wait) #else #define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents, int timeout, void *sigmask) { ENSURE_MSAN_INITED(); int res = REAL(epoll_pwait)(epfd, events, maxevents, timeout, sigmask); if (res > 0) { __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } #define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT INTERCEPT_FUNCTION(epoll_pwait) #else #define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT #endif INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(!msan_inited)) // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. return AllocateFromLocalPool(nmemb * size); - return MsanCalloc(&stack, nmemb, size); + return msan_calloc(nmemb, size, &stack); } INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(IsInDlsymAllocPool(ptr))) { uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); void *new_ptr; if (UNLIKELY(!msan_inited)) { new_ptr = AllocateFromLocalPool(copy_size); } else { copy_size = size; - new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false); + new_ptr = msan_malloc(copy_size, &stack); } internal_memcpy(new_ptr, ptr, copy_size); return new_ptr; } - return MsanReallocate(&stack, ptr, size, sizeof(u64), false); + return msan_realloc(ptr, size, &stack); } INTERCEPTOR(void *, malloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(!msan_inited)) // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. return AllocateFromLocalPool(size); - return MsanReallocate(&stack, nullptr, size, sizeof(u64), false); + return msan_malloc(size, &stack); } void __msan_allocated_memory(const void *data, uptr size) { GET_MALLOC_STACK_TRACE; if (flags()->poison_in_malloc) { stack.tag = STACK_TRACE_TAG_POISON; PoisonMemory(data, size, &stack); } } void __msan_copy_shadow(void *dest, const void *src, uptr n) { GET_STORE_STACK_TRACE; MoveShadowAndOrigin(dest, src, n, &stack); } void __sanitizer_dtor_callback(const void *data, uptr size) { GET_MALLOC_STACK_TRACE; if (flags()->poison_in_dtor) { stack.tag = STACK_TRACE_TAG_POISON; PoisonMemory(data, size, &stack); } } INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, int fd, OFF_T offset) { if (msan_init_is_running) return REAL(mmap)(addr, length, prot, flags, fd, offset); ENSURE_MSAN_INITED(); if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { errno = errno_EINVAL; return (void *)-1; } else { addr = nullptr; } } void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); if (res != (void*)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, int fd, OFF64_T offset) { ENSURE_MSAN_INITED(); if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { errno = errno_EINVAL; return (void *)-1; } else { addr = nullptr; } } void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); if (res != (void*)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); return res; } #define MSAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64) #else #define MSAN_MAYBE_INTERCEPT_MMAP64 #endif INTERCEPTOR(int, getrusage, int who, void *usage) { ENSURE_MSAN_INITED(); int res = REAL(getrusage)(who, usage); if (res == 0) { __msan_unpoison(usage, __sanitizer::struct_rusage_sz); } return res; } class SignalHandlerScope { public: SignalHandlerScope() { if (MsanThread *t = GetCurrentThread()) t->EnterSignalHandler(); } ~SignalHandlerScope() { if (MsanThread *t = GetCurrentThread()) t->LeaveSignalHandler(); } }; // sigactions_mu guarantees atomicity of sigaction() and signal() calls. // Access to sigactions[] is gone with relaxed atomics to avoid data race with // the signal handler. const int kMaxSignals = 1024; static atomic_uintptr_t sigactions[kMaxSignals]; static StaticSpinMutex sigactions_mu; static void SignalHandler(int signo) { SignalHandlerScope signal_handler_scope; ScopedThreadLocalStateBackup stlsb; UnpoisonParam(1); typedef void (*signal_cb)(int x); signal_cb cb = (signal_cb)atomic_load(&sigactions[signo], memory_order_relaxed); cb(signo); } static void SignalAction(int signo, void *si, void *uc) { SignalHandlerScope signal_handler_scope; ScopedThreadLocalStateBackup stlsb; UnpoisonParam(3); __msan_unpoison(si, sizeof(__sanitizer_sigaction)); __msan_unpoison(uc, __sanitizer::ucontext_t_sz); typedef void (*sigaction_cb)(int, void *, void *); sigaction_cb cb = (sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed); cb(signo, si, uc); } INTERCEPTOR(int, sigaction, int signo, const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) { ENSURE_MSAN_INITED(); // FIXME: check that *act is unpoisoned. // That requires intercepting all of sigemptyset, sigfillset, etc. int res; if (flags()->wrap_signals) { SpinMutexLock lock(&sigactions_mu); CHECK_LT(signo, kMaxSignals); uptr old_cb = atomic_load(&sigactions[signo], memory_order_relaxed); __sanitizer_sigaction new_act; __sanitizer_sigaction *pnew_act = act ? &new_act : nullptr; if (act) { REAL(memcpy)(pnew_act, act, sizeof(__sanitizer_sigaction)); uptr cb = (uptr)pnew_act->sigaction; uptr new_cb = (pnew_act->sa_flags & __sanitizer::sa_siginfo) ? (uptr)SignalAction : (uptr)SignalHandler; if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { atomic_store(&sigactions[signo], cb, memory_order_relaxed); pnew_act->sigaction = (void (*)(int, void *, void *))new_cb; } } res = REAL(sigaction)(signo, pnew_act, oldact); if (res == 0 && oldact) { uptr cb = (uptr)oldact->sigaction; if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { oldact->sigaction = (void (*)(int, void *, void *))old_cb; } } } else { res = REAL(sigaction)(signo, act, oldact); } if (res == 0 && oldact) { __msan_unpoison(oldact, sizeof(__sanitizer_sigaction)); } return res; } INTERCEPTOR(int, signal, int signo, uptr cb) { ENSURE_MSAN_INITED(); if (flags()->wrap_signals) { CHECK_LT(signo, kMaxSignals); SpinMutexLock lock(&sigactions_mu); if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { atomic_store(&sigactions[signo], cb, memory_order_relaxed); cb = (uptr) SignalHandler; } return REAL(signal)(signo, cb); } else { return REAL(signal)(signo, cb); } } extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); static void *MsanThreadStartFunc(void *arg) { MsanThread *t = (MsanThread *)arg; SetCurrentThread(t); return t->ThreadStart(); } INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), void * param) { ENSURE_MSAN_INITED(); // for GetTlsSize() __sanitizer_pthread_attr_t myattr; if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } AdjustStackSize(attr); MsanThread *t = MsanThread::Create(callback, param); int res = REAL(pthread_create)(th, attr, MsanThreadStartFunc, t); if (attr == &myattr) pthread_attr_destroy(&myattr); if (!res) { __msan_unpoison(th, __sanitizer::pthread_t_sz); } return res; } INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key, void (*dtor)(void *value)) { if (msan_init_is_running) return REAL(pthread_key_create)(key, dtor); ENSURE_MSAN_INITED(); int res = REAL(pthread_key_create)(key, dtor); if (!res && key) __msan_unpoison(key, sizeof(*key)); return res; } INTERCEPTOR(int, pthread_join, void *th, void **retval) { ENSURE_MSAN_INITED(); int res = REAL(pthread_join)(th, retval); if (!res && retval) __msan_unpoison(retval, sizeof(*retval)); return res; } extern char *tzname[2]; INTERCEPTOR(void, tzset, int fake) { ENSURE_MSAN_INITED(); REAL(tzset)(fake); if (tzname[0]) __msan_unpoison(tzname[0], REAL(strlen)(tzname[0]) + 1); if (tzname[1]) __msan_unpoison(tzname[1], REAL(strlen)(tzname[1]) + 1); return; } struct MSanAtExitRecord { void (*func)(void *arg); void *arg; }; void MSanAtExitWrapper(void *arg) { UnpoisonParam(1); MSanAtExitRecord *r = (MSanAtExitRecord *)arg; r->func(r->arg); InternalFree(r); } // Unpoison argument shadow for C++ module destructors. INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, void *dso_handle) { if (msan_init_is_running) return REAL(__cxa_atexit)(func, arg, dso_handle); ENSURE_MSAN_INITED(); MSanAtExitRecord *r = (MSanAtExitRecord *)InternalAlloc(sizeof(MSanAtExitRecord)); r->func = func; r->arg = arg; return REAL(__cxa_atexit)(MSanAtExitWrapper, r, dso_handle); } DECLARE_REAL(int, shmctl, int shmid, int cmd, void *buf) INTERCEPTOR(void *, shmat, int shmid, const void *shmaddr, int shmflg) { ENSURE_MSAN_INITED(); void *p = REAL(shmat)(shmid, shmaddr, shmflg); if (p != (void *)-1) { __sanitizer_shmid_ds ds; int res = REAL(shmctl)(shmid, shmctl_ipc_stat, &ds); if (!res) { __msan_unpoison(p, ds.shm_segsz); } } return p; } static void BeforeFork() { StackDepotLockAll(); ChainedOriginDepotLockAll(); } static void AfterFork() { ChainedOriginDepotUnlockAll(); StackDepotUnlockAll(); } INTERCEPTOR(int, fork, void) { ENSURE_MSAN_INITED(); BeforeFork(); int pid = REAL(fork)(); AfterFork(); return pid; } INTERCEPTOR(int, openpty, int *amaster, int *aslave, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); InterceptorScope interceptor_scope; int res = REAL(openpty)(amaster, aslave, name, termp, winp); if (!res) { __msan_unpoison(amaster, sizeof(*amaster)); __msan_unpoison(aslave, sizeof(*aslave)); } return res; } INTERCEPTOR(int, forkpty, int *amaster, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); InterceptorScope interceptor_scope; int res = REAL(forkpty)(amaster, name, termp, winp); if (res != -1) __msan_unpoison(amaster, sizeof(*amaster)); return res; } struct MSanInterceptorContext { bool in_interceptor_scope; }; namespace __msan { int OnExit() { // FIXME: ask frontend whether we need to return failure. return 0; } } // namespace __msan // A version of CHECK_UNPOISONED using a saved scope value. Used in common // interceptors. #define CHECK_UNPOISONED_CTX(ctx, x, n) \ do { \ if (!((MSanInterceptorContext *)ctx)->in_interceptor_scope) \ CHECK_UNPOISONED_0(x, n); \ } while (0) #define MSAN_INTERCEPT_FUNC(name) \ do { \ if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ VReport(1, "MemorySanitizer: failed to intercept '" #name "'\n"); \ } while (0) #define MSAN_INTERCEPT_FUNC_VER(name, ver) \ do { \ if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \ VReport( \ 1, "MemorySanitizer: failed to intercept '" #name "@@" #ver "'\n"); \ } while (0) #define COMMON_INTERCEPT_FUNCTION(name) MSAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ MSAN_INTERCEPT_FUNC_VER(name, ver) #define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) \ UnpoisonParam(count) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ __msan_unpoison(ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ CHECK_UNPOISONED_CTX(ctx, ptr, size) #define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \ __msan_unpoison(ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ if (msan_init_is_running) return REAL(func)(__VA_ARGS__); \ ENSURE_MSAN_INITED(); \ MSanInterceptorContext msan_ctx = {IsInInterceptorScope()}; \ ctx = (void *)&msan_ctx; \ (void)ctx; \ InterceptorScope interceptor_scope; \ __msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */ #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ do { \ } while (false) // FIXME #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ do { \ } while (false) // FIXME #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ do { \ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \ if (filename && map) \ ForEachMappedRegion(map, __msan_unpoison); \ } while (false) #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (MsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ *end = t->tls_end(); \ } else { \ *begin = *end = 0; \ } #define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ { \ (void)ctx; \ return __msan_memset(block, c, size); \ } #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ { \ (void)ctx; \ return __msan_memmove(to, from, size); \ } #define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ { \ (void)ctx; \ return __msan_memcpy(to, from, size); \ } #define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \ do { \ GET_STORE_STACK_TRACE; \ CopyShadowAndOrigin(to, from, size, &stack); \ __msan_unpoison(to + size, 1); \ } while (false) #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_common_interceptors.inc" #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ do { \ } while (false) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s) #include "sanitizer_common/sanitizer_common_syscalls.inc" struct dlinfo { char *dli_fname; void *dli_fbase; char *dli_sname; void *dli_saddr; }; INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dladdr, addr, info); int res = REAL(dladdr)(addr, info); if (res != 0) { __msan_unpoison(info, sizeof(*info)); if (info->dli_fname) __msan_unpoison(info->dli_fname, REAL(strlen)(info->dli_fname) + 1); if (info->dli_sname) __msan_unpoison(info->dli_sname, REAL(strlen)(info->dli_sname) + 1); } return res; } INTERCEPTOR(char *, dlerror, int fake) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dlerror, fake); char *res = REAL(dlerror)(fake); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } typedef int (*dl_iterate_phdr_cb)(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data); struct dl_iterate_phdr_data { dl_iterate_phdr_cb callback; void *data; }; static int msan_dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data) { if (info) { __msan_unpoison(info, size); if (info->dlpi_phdr && info->dlpi_phnum) __msan_unpoison(info->dlpi_phdr, struct_ElfW_Phdr_sz * info->dlpi_phnum); if (info->dlpi_name) __msan_unpoison(info->dlpi_name, REAL(strlen)(info->dlpi_name) + 1); } dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; UnpoisonParam(3); return cbdata->callback(info, size, cbdata->data); } INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb callback, void *data) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dl_iterate_phdr, callback, data); dl_iterate_phdr_data cbdata; cbdata.callback = callback; cbdata.data = data; int res = REAL(dl_iterate_phdr)(msan_dl_iterate_phdr_cb, (void *)&cbdata); return res; } // wchar_t *wcschr(const wchar_t *wcs, wchar_t wc); INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) { ENSURE_MSAN_INITED(); wchar_t *res = REAL(wcschr)(s, wc, ps); return res; } // wchar_t *wcscpy(wchar_t *dest, const wchar_t *src); INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wcscpy)(dest, src); CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), &stack); return res; } INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T copy_size = REAL(wcsnlen)(src, n); if (copy_size < n) copy_size++; // trailing \0 wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack); __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t)); return res; } // These interface functions reside here so that they can use // REAL(memset), etc. void __msan_unpoison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, 0); } void __msan_poison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1); } void __msan_poison_stack(void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1); } void __msan_clear_and_unpoison(void *a, uptr size) { REAL(memset)(a, 0, size); SetShadow(a, size, 0); } void *__msan_memcpy(void *dest, const void *src, SIZE_T n) { if (!msan_inited) return internal_memcpy(dest, src, n); if (msan_init_is_running || __msan::IsInSymbolizer()) return REAL(memcpy)(dest, src, n); ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; void *res = REAL(memcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n, &stack); return res; } void *__msan_memset(void *s, int c, SIZE_T n) { if (!msan_inited) return internal_memset(s, c, n); if (msan_init_is_running) return REAL(memset)(s, c, n); ENSURE_MSAN_INITED(); void *res = REAL(memset)(s, c, n); __msan_unpoison(s, n); return res; } void *__msan_memmove(void *dest, const void *src, SIZE_T n) { if (!msan_inited) return internal_memmove(dest, src, n); if (msan_init_is_running) return REAL(memmove)(dest, src, n); ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; void *res = REAL(memmove)(dest, src, n); MoveShadowAndOrigin(dest, src, n, &stack); return res; } void __msan_unpoison_string(const char* s) { if (!MEM_IS_APP(s)) return; __msan_unpoison(s, REAL(strlen)(s) + 1); } namespace __msan { void InitializeInterceptors() { static int inited = 0; CHECK_EQ(inited, 0); InitializeCommonInterceptors(); INTERCEPT_FUNCTION(mmap); MSAN_MAYBE_INTERCEPT_MMAP64; INTERCEPT_FUNCTION(posix_memalign); MSAN_MAYBE_INTERCEPT_MEMALIGN; INTERCEPT_FUNCTION(__libc_memalign); INTERCEPT_FUNCTION(valloc); MSAN_MAYBE_INTERCEPT_PVALLOC; INTERCEPT_FUNCTION(malloc); INTERCEPT_FUNCTION(calloc); INTERCEPT_FUNCTION(realloc); INTERCEPT_FUNCTION(free); MSAN_MAYBE_INTERCEPT_CFREE; INTERCEPT_FUNCTION(malloc_usable_size); MSAN_MAYBE_INTERCEPT_MALLINFO; MSAN_MAYBE_INTERCEPT_MALLOPT; MSAN_MAYBE_INTERCEPT_MALLOC_STATS; INTERCEPT_FUNCTION(fread); MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED; INTERCEPT_FUNCTION(readlink); INTERCEPT_FUNCTION(memccpy); INTERCEPT_FUNCTION(mempcpy); INTERCEPT_FUNCTION(bcopy); INTERCEPT_FUNCTION(wmemset); INTERCEPT_FUNCTION(wmemcpy); INTERCEPT_FUNCTION(wmempcpy); INTERCEPT_FUNCTION(wmemmove); INTERCEPT_FUNCTION(strcpy); // NOLINT INTERCEPT_FUNCTION(stpcpy); // NOLINT INTERCEPT_FUNCTION(strdup); MSAN_MAYBE_INTERCEPT___STRDUP; INTERCEPT_FUNCTION(strncpy); // NOLINT INTERCEPT_FUNCTION(gcvt); INTERCEPT_FUNCTION(strcat); // NOLINT INTERCEPT_FUNCTION(strncat); // NOLINT INTERCEPT_STRTO(strtod); INTERCEPT_STRTO(strtof); INTERCEPT_STRTO(strtold); INTERCEPT_STRTO(strtol); INTERCEPT_STRTO(strtoul); INTERCEPT_STRTO(strtoll); INTERCEPT_STRTO(strtoull); INTERCEPT_STRTO(wcstod); INTERCEPT_STRTO(wcstof); INTERCEPT_STRTO(wcstold); INTERCEPT_STRTO(wcstol); INTERCEPT_STRTO(wcstoul); INTERCEPT_STRTO(wcstoll); INTERCEPT_STRTO(wcstoull); #ifdef SANITIZER_NLDBL_VERSION INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION); INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION); #else INTERCEPT_FUNCTION(vswprintf); INTERCEPT_FUNCTION(swprintf); #endif INTERCEPT_FUNCTION(strxfrm); INTERCEPT_FUNCTION(strxfrm_l); INTERCEPT_FUNCTION(strftime); INTERCEPT_FUNCTION(strftime_l); MSAN_MAYBE_INTERCEPT___STRFTIME_L; INTERCEPT_FUNCTION(wcsftime); INTERCEPT_FUNCTION(wcsftime_l); MSAN_MAYBE_INTERCEPT___WCSFTIME_L; INTERCEPT_FUNCTION(mbtowc); INTERCEPT_FUNCTION(mbrtowc); INTERCEPT_FUNCTION(wcslen); INTERCEPT_FUNCTION(wcsnlen); INTERCEPT_FUNCTION(wcschr); INTERCEPT_FUNCTION(wcscpy); INTERCEPT_FUNCTION(wcsncpy); INTERCEPT_FUNCTION(wcscmp); INTERCEPT_FUNCTION(getenv); INTERCEPT_FUNCTION(setenv); INTERCEPT_FUNCTION(putenv); INTERCEPT_FUNCTION(gettimeofday); INTERCEPT_FUNCTION(fcvt); MSAN_MAYBE_INTERCEPT___FXSTAT; MSAN_INTERCEPT_FSTATAT; MSAN_MAYBE_INTERCEPT___FXSTAT64; MSAN_MAYBE_INTERCEPT___FXSTATAT64; INTERCEPT_FUNCTION(pipe); INTERCEPT_FUNCTION(pipe2); INTERCEPT_FUNCTION(socketpair); INTERCEPT_FUNCTION(fgets); MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED; INTERCEPT_FUNCTION(getrlimit); MSAN_MAYBE_INTERCEPT_GETRLIMIT64; MSAN_MAYBE_INTERCEPT_PRLIMIT; MSAN_MAYBE_INTERCEPT_PRLIMIT64; MSAN_INTERCEPT_UNAME; INTERCEPT_FUNCTION(gethostname); MSAN_MAYBE_INTERCEPT_EPOLL_WAIT; MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT; INTERCEPT_FUNCTION(dladdr); INTERCEPT_FUNCTION(dlerror); INTERCEPT_FUNCTION(dl_iterate_phdr); INTERCEPT_FUNCTION(getrusage); INTERCEPT_FUNCTION(sigaction); INTERCEPT_FUNCTION(signal); #if defined(__mips__) INTERCEPT_FUNCTION_VER(pthread_create, "GLIBC_2.2"); #else INTERCEPT_FUNCTION(pthread_create); #endif INTERCEPT_FUNCTION(pthread_key_create); INTERCEPT_FUNCTION(pthread_join); INTERCEPT_FUNCTION(tzset); INTERCEPT_FUNCTION(__cxa_atexit); INTERCEPT_FUNCTION(shmat); INTERCEPT_FUNCTION(fork); INTERCEPT_FUNCTION(openpty); INTERCEPT_FUNCTION(forkpty); inited = 1; } } // namespace __msan diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc index c7295feebfe4..721926791029 100644 --- a/lib/msan/msan_new_delete.cc +++ b/lib/msan/msan_new_delete.cc @@ -1,66 +1,66 @@ //===-- msan_new_delete.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of MemorySanitizer. // // Interceptors for operators new and delete. //===----------------------------------------------------------------------===// #include "msan.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" #if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE #include using namespace __msan; // NOLINT // Fake std::nothrow_t to avoid including . namespace std { struct nothrow_t {}; } // namespace std // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. #define OPERATOR_NEW_BODY(nothrow) \ GET_MALLOC_STACK_TRACE; \ - void *res = MsanReallocate(&stack, 0, size, sizeof(u64), false);\ + void *res = msan_malloc(size, &stack);\ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ return res INTERCEPTOR_ATTRIBUTE void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } INTERCEPTOR_ATTRIBUTE void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(true /*nothrow*/); } #define OPERATOR_DELETE_BODY \ GET_MALLOC_STACK_TRACE; \ if (ptr) MsanDeallocate(&stack, ptr) INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } #endif // MSAN_REPLACE_OPERATORS_NEW_AND_DELETE diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index 2f8f6e3f9aa7..84f523c5e431 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -1,258 +1,253 @@ //===-- sanitizer_allocator.cc --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. // This allocator is used inside run-times. //===----------------------------------------------------------------------===// #include "sanitizer_allocator.h" +#include "sanitizer_allocator_checks.h" #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_common.h" namespace __sanitizer { // ThreadSanitizer for Go uses libc malloc/free. #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) # if SANITIZER_LINUX && !SANITIZER_ANDROID extern "C" void *__libc_malloc(uptr size); # if !SANITIZER_GO extern "C" void *__libc_memalign(uptr alignment, uptr size); # endif extern "C" void *__libc_realloc(void *ptr, uptr size); extern "C" void __libc_free(void *ptr); # else # include # define __libc_malloc malloc # if !SANITIZER_GO static void *__libc_memalign(uptr alignment, uptr size) { void *p; uptr error = posix_memalign(&p, alignment, size); if (error) return nullptr; return p; } # endif # define __libc_realloc realloc # define __libc_free free # endif static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { (void)cache; #if !SANITIZER_GO if (alignment == 0) return __libc_malloc(size); else return __libc_memalign(alignment, size); #else // Windows does not provide __libc_memalign/posix_memalign. It provides // __aligned_malloc, but the allocated blocks can't be passed to free, // they need to be passed to __aligned_free. InternalAlloc interface does // not account for such requirement. Alignemnt does not seem to be used // anywhere in runtime, so just call __libc_malloc for now. DCHECK_EQ(alignment, 0); return __libc_malloc(size); #endif } static void *RawInternalRealloc(void *ptr, uptr size, InternalAllocatorCache *cache) { (void)cache; return __libc_realloc(ptr, size); } static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { (void)cache; __libc_free(ptr); } InternalAllocator *internal_allocator() { return 0; } #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; static atomic_uint8_t internal_allocator_initialized; static StaticSpinMutex internal_alloc_init_mu; static InternalAllocatorCache internal_allocator_cache; static StaticSpinMutex internal_allocator_cache_mu; InternalAllocator *internal_allocator() { InternalAllocator *internal_allocator_instance = reinterpret_cast(&internal_alloc_placeholder); if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { internal_allocator_instance->Init(kReleaseToOSIntervalNever); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } return internal_allocator_instance; } static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { if (alignment == 0) alignment = 8; if (cache == 0) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Allocate(&internal_allocator_cache, size, alignment); } return internal_allocator()->Allocate(cache, size, alignment); } static void *RawInternalRealloc(void *ptr, uptr size, InternalAllocatorCache *cache) { uptr alignment = 8; if (cache == 0) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Reallocate(&internal_allocator_cache, ptr, size, alignment); } return internal_allocator()->Reallocate(cache, ptr, size, alignment); } static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { if (!cache) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); } internal_allocator()->Deallocate(cache, ptr); } #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { if (size + sizeof(u64) < size) return nullptr; void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); if (!p) return nullptr; ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { if (!addr) return InternalAlloc(size, cache); if (size + sizeof(u64) < size) return nullptr; addr = (char*)addr - sizeof(u64); size = size + sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); void *p = RawInternalRealloc(addr, size, cache); if (!p) return nullptr; return (char*)p + sizeof(u64); } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { - if (CheckForCallocOverflow(count, size)) + if (UNLIKELY(CheckForCallocOverflow(count, size))) return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); return p; } void InternalFree(void *addr, InternalAllocatorCache *cache) { if (!addr) return; addr = (char*)addr - sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); ((u64*)addr)[0] = 0; RawInternalFree(addr, cache); } // LowLevelAllocator static LowLevelAllocateCallback low_level_alloc_callback; void *LowLevelAllocator::Allocate(uptr size) { // Align allocation size. size = RoundUpTo(size, 8); if (allocated_end_ - allocated_current_ < (sptr)size) { uptr size_to_allocate = Max(size, GetPageSizeCached()); allocated_current_ = (char*)MmapOrDie(size_to_allocate, __func__); allocated_end_ = allocated_current_ + size_to_allocate; if (low_level_alloc_callback) { low_level_alloc_callback((uptr)allocated_current_, size_to_allocate); } } CHECK(allocated_end_ - allocated_current_ >= (sptr)size); void *res = allocated_current_; allocated_current_ += size; return res; } void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } -bool CheckForCallocOverflow(uptr size, uptr n) { - if (!size) return false; - uptr max = (uptr)-1L; - return (max / size) < n; -} - static atomic_uint8_t allocator_out_of_memory = {0}; static atomic_uint8_t allocator_may_return_null = {0}; bool IsAllocatorOutOfMemory() { return atomic_load_relaxed(&allocator_out_of_memory); } // Prints error message and kills the program. void NORETURN ReportAllocatorCannotReturnNull() { Report("%s's allocator is terminating the process instead of returning 0\n", SanitizerToolName); Report("If you don't like this behavior set allocator_may_return_null=1\n"); CHECK(0); Die(); } bool AllocatorMayReturnNull() { return atomic_load(&allocator_may_return_null, memory_order_relaxed); } void SetAllocatorMayReturnNull(bool may_return_null) { atomic_store(&allocator_may_return_null, may_return_null, memory_order_relaxed); } void *ReturnNullOrDieOnFailure::OnBadRequest() { if (AllocatorMayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(); } void *ReturnNullOrDieOnFailure::OnOOM() { atomic_store_relaxed(&allocator_out_of_memory, 1); if (AllocatorMayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(); } void NORETURN *DieOnFailure::OnBadRequest() { ReportAllocatorCannotReturnNull(); } void NORETURN *DieOnFailure::OnOOM() { atomic_store_relaxed(&allocator_out_of_memory, 1); ReportAllocatorCannotReturnNull(); } } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index 0fb8a087ed6b..8c5696ea789c 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -1,75 +1,70 @@ //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_ALLOCATOR_H #define SANITIZER_ALLOCATOR_H #include "sanitizer_internal_defs.h" #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_list.h" #include "sanitizer_mutex.h" #include "sanitizer_lfstack.h" #include "sanitizer_procmaps.h" namespace __sanitizer { // Since flags are immutable and allocator behavior can be changed at runtime // (unit tests or ASan on Android are some examples), allocator_may_return_null // flag value is cached here and can be altered later. bool AllocatorMayReturnNull(); void SetAllocatorMayReturnNull(bool may_return_null); // Allocator failure handling policies: // Implements AllocatorMayReturnNull policy, returns null when the flag is set, // dies otherwise. struct ReturnNullOrDieOnFailure { static void *OnBadRequest(); static void *OnOOM(); }; // Always dies on the failure. struct DieOnFailure { static void NORETURN *OnBadRequest(); static void NORETURN *OnOOM(); }; // Returns true if allocator detected OOM condition. Can be used to avoid memory // hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. bool IsAllocatorOutOfMemory(); // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { } }; // Callback type for iterating over chunks. typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); -// Returns true if calloc(size, n) call overflows on size*n calculation. -// The caller should "return POLICY::OnBadRequest();" where POLICY is the -// current allocator failure handling policy. -bool CheckForCallocOverflow(uptr size, uptr n); - #include "sanitizer_allocator_size_class_map.h" #include "sanitizer_allocator_stats.h" #include "sanitizer_allocator_primary64.h" #include "sanitizer_allocator_bytemap.h" #include "sanitizer_allocator_primary32.h" #include "sanitizer_allocator_local_cache.h" #include "sanitizer_allocator_secondary.h" #include "sanitizer_allocator_combined.h" } // namespace __sanitizer #endif // SANITIZER_ALLOCATOR_H diff --git a/lib/sanitizer_common/sanitizer_allocator_checks.h b/lib/sanitizer_common/sanitizer_allocator_checks.h new file mode 100644 index 000000000000..202916eae348 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_allocator_checks.h @@ -0,0 +1,64 @@ +//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory +// allocators. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ALLOCATOR_CHECKS_H +#define SANITIZER_ALLOCATOR_CHECKS_H + +#include "sanitizer_errno.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_common.h" +#include "sanitizer_platform.h" + +namespace __sanitizer { + +// A common errno setting logic shared by almost all sanitizer allocator APIs. +INLINE void *SetErrnoOnNull(void *ptr) { + if (UNLIKELY(!ptr)) + errno = errno_ENOMEM; + return ptr; +} + +// In case of the check failure, the caller of the following Check... functions +// should "return POLICY::OnBadRequest();" where POLICY is the current allocator +// failure handling policy. + +// Checks aligned_alloc() parameters, verifies that the alignment is a power of +// two and that the size is a multiple of alignment for POSIX implementation, +// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple +// of alignment. +INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { +#if SANITIZER_POSIX + return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0; +#else + return size % alignment == 0; +#endif +} + +// Checks posix_memalign() parameters, verifies that alignment is a power of two +// and a multiple of sizeof(void *). +INLINE bool CheckPosixMemalignAlignment(uptr alignment) { + return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT +} + +// Returns true if calloc(size, n) call overflows on size*n calculation. +INLINE bool CheckForCallocOverflow(uptr size, uptr n) { + if (!size) + return false; + uptr max = (uptr)-1L; + return (max / size) < n; +} + +} // namespace __sanitizer + +#endif // SANITIZER_ALLOCATOR_CHECKS_H diff --git a/lib/sanitizer_common/sanitizer_errno.h b/lib/sanitizer_common/sanitizer_errno.h index c405307ba8ec..7872b89c227c 100644 --- a/lib/sanitizer_common/sanitizer_errno.h +++ b/lib/sanitizer_common/sanitizer_errno.h @@ -1,35 +1,37 @@ //===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between sanitizers run-time libraries. // // Defines errno to avoid including errno.h and its dependencies into sensitive // files (e.g. interceptors are not supposed to include any system headers). // It's ok to use errno.h directly when your file already depend on other system // includes though. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_ERRNO_H #define SANITIZER_ERRNO_H #include "sanitizer_errno_codes.h" #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_MAC # define __errno_location __error #elif SANITIZER_ANDROID # define __errno_location __errno +#elif SANITIZER_WINDOWS +# define __errno_location _errno #endif extern "C" int *__errno_location(); #define errno (*__errno_location()) #endif // SANITIZER_ERRNO_H diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc index a79a2a155db9..8c3c1e5d6a5d 100644 --- a/lib/sanitizer_common/sanitizer_linux.cc +++ b/lib/sanitizer_common/sanitizer_linux.cc @@ -1,1708 +1,1707 @@ //===-- sanitizer_linux.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements linux-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_linux.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" #if !SANITIZER_FREEBSD #include #endif // For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat' // format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To // access stat from asm/stat.h, without conflicting with definition in // sys/stat.h, we use this trick. #if defined(__mips64) #include #include #define stat kernel_stat #include #undef stat #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #include #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID #include #endif #if SANITIZER_FREEBSD #include #include #include extern "C" { // must be included after and on // FreeBSD 9.2 and 10.0. #include } extern char **environ; // provided by crt1 #endif // SANITIZER_FREEBSD #if !SANITIZER_ANDROID #include #endif #ifndef __GLIBC_PREREQ #define __GLIBC_PREREQ(x, y) 0 #endif #if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16) # define SANITIZER_USE_GETAUXVAL 1 #else # define SANITIZER_USE_GETAUXVAL 0 #endif #if SANITIZER_USE_GETAUXVAL #include #endif #if SANITIZER_LINUX // struct kernel_timeval { long tv_sec; long tv_usec; }; // is broken on some linux distributions. const int FUTEX_WAIT = 0; const int FUTEX_WAKE = 1; #endif // SANITIZER_LINUX // Are we using 32-bit or 64-bit Linux syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // but it still needs to use 64-bit syscalls. #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ SANITIZER_WORDSIZE == 64) # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1 #else # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 #endif #if defined(__x86_64__) || SANITIZER_MIPS64 extern "C" { extern void internal_sigreturn(); } #endif namespace __sanitizer { #if SANITIZER_LINUX && defined(__x86_64__) #include "sanitizer_syscall_linux_x86_64.inc" #elif SANITIZER_LINUX && defined(__aarch64__) #include "sanitizer_syscall_linux_aarch64.inc" #else #include "sanitizer_syscall_generic.inc" #endif // --------------- sanitizer_libc.h #if !SANITIZER_S390 uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, OFF_T offset) { #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, offset); #else // mmap2 specifies file offset in 4096-byte units. CHECK(IsAligned(offset, 4096)); return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd, offset / 4096); #endif } #endif // !SANITIZER_S390 uptr internal_munmap(void *addr, uptr length) { return internal_syscall(SYSCALL(munmap), (uptr)addr, length); } int internal_mprotect(void *addr, uptr length, int prot) { return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot); } uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); } uptr internal_open(const char *filename, int flags) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags); #else return internal_syscall(SYSCALL(open), (uptr)filename, flags); #endif } uptr internal_open(const char *filename, int flags, u32 mode) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags, mode); #else return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode); #endif } uptr internal_read(fd_t fd, void *buf, uptr count) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, count)); return res; } uptr internal_write(fd_t fd, const void *buf, uptr count) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf, count)); return res; } uptr internal_ftruncate(fd_t fd, uptr size) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, (OFF_T)size)); return res; } #if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD static void stat64_to_stat(struct stat64 *in, struct stat *out) { internal_memset(out, 0, sizeof(*out)); out->st_dev = in->st_dev; out->st_ino = in->st_ino; out->st_mode = in->st_mode; out->st_nlink = in->st_nlink; out->st_uid = in->st_uid; out->st_gid = in->st_gid; out->st_rdev = in->st_rdev; out->st_size = in->st_size; out->st_blksize = in->st_blksize; out->st_blocks = in->st_blocks; out->st_atime = in->st_atime; out->st_mtime = in->st_mtime; out->st_ctime = in->st_ctime; } #endif #if defined(__mips64) static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { internal_memset(out, 0, sizeof(*out)); out->st_dev = in->st_dev; out->st_ino = in->st_ino; out->st_mode = in->st_mode; out->st_nlink = in->st_nlink; out->st_uid = in->st_uid; out->st_gid = in->st_gid; out->st_rdev = in->st_rdev; out->st_size = in->st_size; out->st_blksize = in->st_blksize; out->st_blocks = in->st_blocks; out->st_atime = in->st_atime_nsec; out->st_mtime = in->st_mtime_nsec; out->st_ctime = in->st_ctime_nsec; } #endif uptr internal_stat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if defined(__mips64) // For mips64, stat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(stat), path, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(stat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_lstat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if SANITIZER_MIPS64 // For mips64, lstat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(lstat), path, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(lstat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_fstat(fd_t fd, void *buf) { #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS # if SANITIZER_MIPS64 // For mips64, fstat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(fstat64), fd, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_filesize(fd_t fd) { struct stat st; if (internal_fstat(fd, &st)) return -1; return (uptr)st.st_size; } uptr internal_dup2(int oldfd, int newfd) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0); #else return internal_syscall(SYSCALL(dup2), oldfd, newfd); #endif } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf, bufsize); #else return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize); #endif } uptr internal_unlink(const char *path) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0); #else return internal_syscall(SYSCALL(unlink), (uptr)path); #endif } uptr internal_rename(const char *oldpath, const char *newpath) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath); #else return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath); #endif } uptr internal_sched_yield() { return internal_syscall(SYSCALL(sched_yield)); } void internal__exit(int exitcode) { #if SANITIZER_FREEBSD internal_syscall(SYSCALL(exit), exitcode); #else internal_syscall(SYSCALL(exit_group), exitcode); #endif Die(); // Unreachable. } unsigned int internal_sleep(unsigned int seconds) { struct timespec ts; ts.tv_sec = 1; ts.tv_nsec = 0; int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts); if (res) return ts.tv_sec; return 0; } uptr internal_execve(const char *filename, char *const argv[], char *const envp[]) { return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv, (uptr)envp); } // ----------------- sanitizer_common.h bool FileExists(const char *filename) { struct stat st; #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0)) #else if (internal_stat(filename, &st)) #endif return false; // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } tid_t GetTid() { #if SANITIZER_FREEBSD return (uptr)pthread_self(); #else return internal_syscall(SYSCALL(gettid)); #endif } u64 NanoTime() { #if SANITIZER_FREEBSD timeval tv; #else kernel_timeval tv; #endif internal_memset(&tv, 0, sizeof(tv)); internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0); return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; } // Like getenv, but reads env directly from /proc (on Linux) or parses the // 'environ' array (on FreeBSD) and does not use libc. This function should be // called first inside __asan_init. const char *GetEnv(const char *name) { #if SANITIZER_FREEBSD if (::environ != 0) { uptr NameLen = internal_strlen(name); for (char **Env = ::environ; *Env != 0; Env++) { if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') return (*Env) + NameLen + 1; } } return 0; // Not found. #elif SANITIZER_LINUX static char *environ; static uptr len; static bool inited; if (!inited) { inited = true; uptr environ_size; if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len)) environ = nullptr; } if (!environ || len == 0) return nullptr; uptr namelen = internal_strlen(name); const char *p = environ; while (*p != '\0') { // will happen at the \0\0 that terminates the buffer // proc file has the format NAME=value\0NAME=value\0NAME=value\0... const char* endp = (char*)internal_memchr(p, '\0', len - (p - environ)); if (!endp) // this entry isn't NUL terminated return nullptr; else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match. return p + namelen + 1; // point after = p = endp + 1; } return nullptr; // Not found. #else #error "Unsupported platform" #endif } #if !SANITIZER_FREEBSD extern "C" { SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; } #endif #if !SANITIZER_GO && !SANITIZER_FREEBSD static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; uptr buff_size; uptr buff_len; *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray"); if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) { (*arr)[0] = nullptr; return; } (*arr)[0] = buff; int count, i; for (count = 1, i = 1; ; i++) { if (buff[i] == 0) { if (buff[i+1] == 0) break; (*arr)[count] = &buff[i+1]; CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible. count++; } } (*arr)[count] = nullptr; } #endif static void GetArgsAndEnv(char ***argv, char ***envp) { #if !SANITIZER_FREEBSD #if !SANITIZER_GO if (&__libc_stack_end) { #endif uptr* stack_end = (uptr*)__libc_stack_end; int argc = *stack_end; *argv = (char**)(stack_end + 1); *envp = (char**)(stack_end + argc + 2); #if !SANITIZER_GO } else { static const int kMaxArgv = 2000, kMaxEnvp = 2000; ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv); ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp); } #endif #else // On FreeBSD, retrieving the argument and environment arrays is done via the // kern.ps_strings sysctl, which returns a pointer to a structure containing // this information. See also . ps_strings *pss; size_t sz = sizeof(pss); if (sysctlbyname("kern.ps_strings", &pss, &sz, NULL, 0) == -1) { Printf("sysctl kern.ps_strings failed\n"); Die(); } *argv = pss->ps_argvstr; *envp = pss->ps_envstr; #endif } char **GetArgv() { char **argv, **envp; GetArgsAndEnv(&argv, &envp); return argv; } void ReExec() { char **argv, **envp; GetArgsAndEnv(&argv, &envp); uptr rv = internal_execve("/proc/self/exe", argv, envp); int rverrno; CHECK_EQ(internal_iserror(rv, &rverrno), true); Printf("execve failed, errno %d\n", rverrno); Die(); } enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { CHECK_EQ(owner_, 0); atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) return; while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { #if SANITIZER_FREEBSD _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0); #else internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0); #endif } } void BlockingMutex::Unlock() { atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); CHECK_NE(v, MtxUnlocked); if (v == MtxSleeping) { #if SANITIZER_FREEBSD _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0); #else internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0); #endif } } void BlockingMutex::CheckLocked() { atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); } // ----------------- sanitizer_linux.h // The actual size of this structure is specified by d_reclen. // Note that getdents64 uses a different structure format. We only provide the // 32-bit syscall here. struct linux_dirent { #if SANITIZER_X32 || defined(__aarch64__) u64 d_ino; u64 d_off; #else unsigned long d_ino; unsigned long d_off; #endif unsigned short d_reclen; #ifdef __aarch64__ unsigned char d_type; #endif char d_name[256]; }; // Syscall wrappers. uptr internal_ptrace(int request, int pid, void *addr, void *data) { return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr, (uptr)data); } uptr internal_waitpid(int pid, int *status, int options) { return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options, 0 /* rusage */); } uptr internal_getpid() { return internal_syscall(SYSCALL(getpid)); } uptr internal_getppid() { return internal_syscall(SYSCALL(getppid)); } uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count); #else return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count); #endif } uptr internal_lseek(fd_t fd, OFF_T offset, int whence) { return internal_syscall(SYSCALL(lseek), fd, offset, whence); } #if SANITIZER_LINUX uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) { return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5); } #endif -uptr internal_sigaltstack(const struct sigaltstack *ss, - struct sigaltstack *oss) { +uptr internal_sigaltstack(const void *ss, void *oss) { return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss); } int internal_fork() { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(clone), SIGCHLD, 0); #else return internal_syscall(SYSCALL(fork)); #endif } #if SANITIZER_LINUX #define SA_RESTORER 0x04000000 // Doesn't set sa_restorer if the caller did not set it, so use with caution //(see below). int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { __sanitizer_kernel_sigaction_t k_act, k_oldact; internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t)); internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t)); const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act; __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact; if (u_act) { k_act.handler = u_act->handler; k_act.sigaction = u_act->sigaction; internal_memcpy(&k_act.sa_mask, &u_act->sa_mask, sizeof(__sanitizer_kernel_sigset_t)); // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL). k_act.sa_flags = u_act->sa_flags | SA_RESTORER; // FIXME: most often sa_restorer is unset, however the kernel requires it // to point to a valid signal restorer that calls the rt_sigreturn syscall. // If sa_restorer passed to the kernel is NULL, the program may crash upon // signal delivery or fail to unwind the stack in the signal handler. // libc implementation of sigaction() passes its own restorer to // rt_sigaction, so we need to do the same (we'll need to reimplement the // restorers; for x86_64 the restorer address can be obtained from // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact). #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 k_act.sa_restorer = u_act->sa_restorer; #endif } uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum, (uptr)(u_act ? &k_act : nullptr), (uptr)(u_oldact ? &k_oldact : nullptr), (uptr)sizeof(__sanitizer_kernel_sigset_t)); if ((result == 0) && u_oldact) { u_oldact->handler = k_oldact.handler; u_oldact->sigaction = k_oldact.sigaction; internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask, sizeof(__sanitizer_kernel_sigset_t)); u_oldact->sa_flags = k_oldact.sa_flags; #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 u_oldact->sa_restorer = k_oldact.sa_restorer; #endif } return result; } // Invokes sigaction via a raw syscall with a restorer, but does not support // all platforms yet. // We disable for Go simply because we have not yet added to buildgo.sh. #if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO int internal_sigaction_syscall(int signum, const void *act, void *oldact) { if (act == nullptr) return internal_sigaction_norestorer(signum, act, oldact); __sanitizer_sigaction u_adjust; internal_memcpy(&u_adjust, act, sizeof(u_adjust)); #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 if (u_adjust.sa_restorer == nullptr) { u_adjust.sa_restorer = internal_sigreturn; } #endif return internal_sigaction_norestorer(signum, (const void *)&u_adjust, oldact); } #endif // defined(__x86_64__) && !SANITIZER_GO #endif // SANITIZER_LINUX uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(sigprocmask), how, set, oldset); #else __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset; return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t)); #endif } void internal_sigfillset(__sanitizer_sigset_t *set) { internal_memset(set, 0xff, sizeof(*set)); } void internal_sigemptyset(__sanitizer_sigset_t *set) { internal_memset(set, 0, sizeof(*set)); } #if SANITIZER_LINUX void internal_sigdelset(__sanitizer_sigset_t *set, int signum) { signum -= 1; CHECK_GE(signum, 0); CHECK_LT(signum, sizeof(*set) * 8); __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; const uptr idx = signum / (sizeof(k_set->sig[0]) * 8); const uptr bit = signum % (sizeof(k_set->sig[0]) * 8); k_set->sig[idx] &= ~(1 << bit); } bool internal_sigismember(__sanitizer_sigset_t *set, int signum) { signum -= 1; CHECK_GE(signum, 0); CHECK_LT(signum, sizeof(*set) * 8); __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; const uptr idx = signum / (sizeof(k_set->sig[0]) * 8); const uptr bit = signum % (sizeof(k_set->sig[0]) * 8); return k_set->sig[idx] & (1 << bit); } #endif // SANITIZER_LINUX // ThreadLister implementation. ThreadLister::ThreadLister(int pid) : pid_(pid), descriptor_(-1), buffer_(4096), error_(true), entry_((struct linux_dirent *)buffer_.data()), bytes_read_(0) { char task_directory_path[80]; internal_snprintf(task_directory_path, sizeof(task_directory_path), "/proc/%d/task/", pid); uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY); if (internal_iserror(openrv)) { error_ = true; Report("Can't open /proc/%d/task for reading.\n", pid); } else { error_ = false; descriptor_ = openrv; } } int ThreadLister::GetNextTID() { int tid = -1; do { if (error_) return -1; if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries()) return -1; if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' && entry_->d_name[0] <= '9') { // Found a valid tid. tid = (int)internal_atoll(entry_->d_name); } entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen); } while (tid < 0); return tid; } void ThreadLister::Reset() { if (error_ || descriptor_ < 0) return; internal_lseek(descriptor_, 0, SEEK_SET); } ThreadLister::~ThreadLister() { if (descriptor_ >= 0) internal_close(descriptor_); } bool ThreadLister::error() { return error_; } bool ThreadLister::GetDirectoryEntries() { CHECK_GE(descriptor_, 0); CHECK_NE(error_, true); bytes_read_ = internal_getdents(descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size()); if (internal_iserror(bytes_read_)) { Report("Can't read directory entries from /proc/%d/task.\n", pid_); error_ = true; return false; } else if (bytes_read_ == 0) { return false; } entry_ = (struct linux_dirent *)buffer_.data(); return true; } #if SANITIZER_WORDSIZE == 32 // Take care of unusable kernel area in top gigabyte. static uptr GetKernelAreaSize() { #if SANITIZER_LINUX && !SANITIZER_X32 const uptr gbyte = 1UL << 30; // Firstly check if there are writable segments // mapped to top gigabyte (e.g. stack). MemoryMappingLayout proc_maps(/*cache_enabled*/true); MemoryMappedSegment segment; while (proc_maps.Next(&segment)) { if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0; } #if !SANITIZER_ANDROID // Even if nothing is mapped, top Gb may still be accessible // if we are running on 64-bit kernel. // Uname may report misleading results if personality type // is modified (e.g. under schroot) so check this as well. struct utsname uname_info; int pers = personality(0xffffffffUL); if (!(pers & PER_MASK) && uname(&uname_info) == 0 && internal_strstr(uname_info.machine, "64")) return 0; #endif // SANITIZER_ANDROID // Top gigabyte is reserved for kernel. return gbyte; #else return 0; #endif // SANITIZER_LINUX && !SANITIZER_X32 } #endif // SANITIZER_WORDSIZE == 32 uptr GetMaxVirtualAddress() { #if SANITIZER_WORDSIZE == 64 # if defined(__powerpc64__) || defined(__aarch64__) // On PowerPC64 we have two different address space layouts: 44- and 46-bit. // We somehow need to figure out which one we are using now and choose // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. // Note that with 'ulimit -s unlimited' the stack is moved away from the top // of the address space, so simply checking the stack address is not enough. // This should (does) work for both PowerPC64 Endian modes. // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; # elif defined(__mips64) return (1ULL << 40) - 1; // 0x000000ffffffffffUL; # elif defined(__s390x__) return (1ULL << 53) - 1; // 0x001fffffffffffffUL; # else return (1ULL << 47) - 1; // 0x00007fffffffffffUL; # endif #else // SANITIZER_WORDSIZE == 32 # if defined(__s390__) return (1ULL << 31) - 1; // 0x7fffffff; # else uptr res = (1ULL << 32) - 1; // 0xffffffff; if (!common_flags()->full_address_space) res -= GetKernelAreaSize(); CHECK_LT(reinterpret_cast(&res), res); return res; # endif #endif // SANITIZER_WORDSIZE } uptr GetPageSize() { // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. #if SANITIZER_ANDROID return 4096; #elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) return EXEC_PAGESIZE; #elif SANITIZER_USE_GETAUXVAL return getauxval(AT_PAGESZ); #else return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy. #endif } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { #if SANITIZER_FREEBSD const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; const char *default_module_name = "kern.proc.pathname"; size_t Size = buf_len; bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0); int readlink_error = IsErr ? errno : 0; uptr module_name_len = Size; #else const char *default_module_name = "/proc/self/exe"; uptr module_name_len = internal_readlink( default_module_name, buf, buf_len); int readlink_error; bool IsErr = internal_iserror(module_name_len, &readlink_error); #endif if (IsErr) { // We can't read binary name for some reason, assume it's unknown. Report("WARNING: reading executable name failed with errno %d, " "some stack frames may not be symbolized\n", readlink_error); module_name_len = internal_snprintf(buf, buf_len, "%s", default_module_name); CHECK_LT(module_name_len, buf_len); } return module_name_len; } uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { #if SANITIZER_LINUX char *tmpbuf; uptr tmpsize; uptr tmplen; if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen, 1024 * 1024)) { internal_strncpy(buf, tmpbuf, buf_len); UnmapOrDie(tmpbuf, tmpsize); return internal_strlen(buf); } #endif return ReadBinaryName(buf, buf_len); } // Match full names of the form /path/to/base_name{-,.}* bool LibraryNameIs(const char *full_name, const char *base_name) { const char *name = full_name; // Strip path. while (*name != '\0') name++; while (name > full_name && *name != '/') name--; if (*name == '/') name++; uptr base_name_length = internal_strlen(base_name); if (internal_strncmp(name, base_name, base_name_length)) return false; return (name[base_name_length] == '-' || name[base_name_length] == '.'); } #if !SANITIZER_ANDROID // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) { CHECK_NE(map, nullptr); #if !SANITIZER_FREEBSD typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; #endif // !SANITIZER_FREEBSD char *base = (char *)map->l_addr; Elf_Ehdr *ehdr = (Elf_Ehdr *)base; char *phdrs = base + ehdr->e_phoff; char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize; // Find the segment with the minimum base so we can "relocate" the p_vaddr // fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC // objects have a non-zero base. uptr preferred_base = (uptr)-1; for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { Elf_Phdr *phdr = (Elf_Phdr *)iter; if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr) preferred_base = (uptr)phdr->p_vaddr; } // Compute the delta from the real base to get a relocation delta. sptr delta = (uptr)base - preferred_base; // Now we can figure out what the loader really mapped. for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { Elf_Phdr *phdr = (Elf_Phdr *)iter; if (phdr->p_type == PT_LOAD) { uptr seg_start = phdr->p_vaddr + delta; uptr seg_end = seg_start + phdr->p_memsz; // None of these values are aligned. We consider the ragged edges of the // load command as defined, since they are mapped from the file. seg_start = RoundDownTo(seg_start, GetPageSizeCached()); seg_end = RoundUpTo(seg_end, GetPageSizeCached()); cb((void *)seg_start, seg_end - seg_start); } } } #endif #if defined(__x86_64__) && SANITIZER_LINUX // We cannot use glibc's clone wrapper, because it messes with the child // task's TLS. It writes the PID and TID of the child task to its thread // descriptor, but in our case the child task shares the thread descriptor with // the parent (because we don't know how to allocate a new thread // descriptor to keep glibc happy). So the stock version of clone(), when // used with CLONE_VM, would end up corrupting the parent's thread descriptor. uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register void *r8 __asm__("r8") = newtls; register int *r10 __asm__("r10") = child_tidptr; __asm__ __volatile__( /* %rax = syscall(%rax = SYSCALL(clone), * %rdi = flags, * %rsi = child_stack, * %rdx = parent_tidptr, * %r8 = new_tls, * %r10 = child_tidptr) */ "syscall\n" /* if (%rax != 0) * return; */ "testq %%rax,%%rax\n" "jnz 1f\n" /* In the child. Terminate unwind chain. */ // XXX: We should also terminate the CFI unwind chain // here. Unfortunately clang 3.2 doesn't support the // necessary CFI directives, so we skip that part. "xorq %%rbp,%%rbp\n" /* Call "fn(arg)". */ "popq %%rax\n" "popq %%rdi\n" "call *%%rax\n" /* Call _exit(%rax). */ "movq %%rax,%%rdi\n" "movq %2,%%rax\n" "syscall\n" /* Return to parent. */ "1:\n" : "=a" (res) : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "S"(child_stack), "D"(flags), "d"(parent_tidptr), "r"(r8), "r"(r10) : "rsp", "memory", "r11", "rcx"); return res; } #elif defined(__mips__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register void *a3 __asm__("$7") = newtls; register int *a4 __asm__("$8") = child_tidptr; // We don't have proper CFI directives here because it requires alot of code // for very marginal benefits. __asm__ __volatile__( /* $v0 = syscall($v0 = __NR_clone, * $a0 = flags, * $a1 = child_stack, * $a2 = parent_tidptr, * $a3 = new_tls, * $a4 = child_tidptr) */ ".cprestore 16;\n" "move $4,%1;\n" "move $5,%2;\n" "move $6,%3;\n" "move $7,%4;\n" /* Store the fifth argument on stack * if we are using 32-bit abi. */ #if SANITIZER_WORDSIZE == 32 "lw %5,16($29);\n" #else "move $8,%5;\n" #endif "li $2,%6;\n" "syscall;\n" /* if ($v0 != 0) * return; */ "bnez $2,1f;\n" /* Call "fn(arg)". */ #if SANITIZER_WORDSIZE == 32 #ifdef __BIG_ENDIAN__ "lw $25,4($29);\n" "lw $4,12($29);\n" #else "lw $25,0($29);\n" "lw $4,8($29);\n" #endif #else "ld $25,0($29);\n" "ld $4,8($29);\n" #endif "jal $25;\n" /* Call _exit($v0). */ "move $4,$2;\n" "li $2,%7;\n" "syscall;\n" /* Return to parent. */ "1:\n" : "=r" (res) : "r"(flags), "r"(child_stack), "r"(parent_tidptr), "r"(a3), "r"(a4), "i"(__NR_clone), "i"(__NR_exit) : "memory", "$29" ); return res; } #elif defined(__aarch64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register int (*__fn)(void *) __asm__("x0") = fn; register void *__stack __asm__("x1") = child_stack; register int __flags __asm__("x2") = flags; register void *__arg __asm__("x3") = arg; register int *__ptid __asm__("x4") = parent_tidptr; register void *__tls __asm__("x5") = newtls; register int *__ctid __asm__("x6") = child_tidptr; __asm__ __volatile__( "mov x0,x2\n" /* flags */ "mov x2,x4\n" /* ptid */ "mov x3,x5\n" /* tls */ "mov x4,x6\n" /* ctid */ "mov x8,%9\n" /* clone */ "svc 0x0\n" /* if (%r0 != 0) * return %r0; */ "cmp x0, #0\n" "bne 1f\n" /* In the child, now. Call "fn(arg)". */ "ldp x1, x0, [sp], #16\n" "blr x1\n" /* Call _exit(%r0). */ "mov x8, %10\n" "svc 0x0\n" "1:\n" : "=r" (res) : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit) : "x30", "memory"); return res; } #elif defined(__powerpc64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; // Stack frame structure. #if SANITIZER_PPC64V1 // Back chain == 0 (SP + 112) // Frame (112 bytes): // Parameter save area (SP + 48), 8 doublewords // TOC save area (SP + 40) // Link editor doubleword (SP + 32) // Compiler doubleword (SP + 24) // LR save area (SP + 16) // CR save area (SP + 8) // Back chain (SP + 0) # define FRAME_SIZE 112 # define FRAME_TOC_SAVE_OFFSET 40 #elif SANITIZER_PPC64V2 // Back chain == 0 (SP + 32) // Frame (32 bytes): // TOC save area (SP + 24) // LR save area (SP + 16) // CR save area (SP + 8) // Back chain (SP + 0) # define FRAME_SIZE 32 # define FRAME_TOC_SAVE_OFFSET 24 #else # error "Unsupported PPC64 ABI" #endif if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); register int (*__fn)(void *) __asm__("r3") = fn; register void *__cstack __asm__("r4") = child_stack; register int __flags __asm__("r5") = flags; register void *__arg __asm__("r6") = arg; register int *__ptidptr __asm__("r7") = parent_tidptr; register void *__newtls __asm__("r8") = newtls; register int *__ctidptr __asm__("r9") = child_tidptr; __asm__ __volatile__( /* fn and arg are saved across the syscall */ "mr 28, %5\n\t" "mr 27, %8\n\t" /* syscall r0 == __NR_clone r3 == flags r4 == child_stack r5 == parent_tidptr r6 == newtls r7 == child_tidptr */ "mr 3, %7\n\t" "mr 5, %9\n\t" "mr 6, %10\n\t" "mr 7, %11\n\t" "li 0, %3\n\t" "sc\n\t" /* Test if syscall was successful */ "cmpdi cr1, 3, 0\n\t" "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" "bne- cr1, 1f\n\t" /* Set up stack frame */ "li 29, 0\n\t" "stdu 29, -8(1)\n\t" "stdu 1, -%12(1)\n\t" /* Do the function call */ "std 2, %13(1)\n\t" #if SANITIZER_PPC64V1 "ld 0, 0(28)\n\t" "ld 2, 8(28)\n\t" "mtctr 0\n\t" #elif SANITIZER_PPC64V2 "mr 12, 28\n\t" "mtctr 12\n\t" #else # error "Unsupported PPC64 ABI" #endif "mr 3, 27\n\t" "bctrl\n\t" "ld 2, %13(1)\n\t" /* Call _exit(r3) */ "li 0, %4\n\t" "sc\n\t" /* Return to parent */ "1:\n\t" "mr %0, 3\n\t" : "=r" (res) : "0" (-1), "i" (EINVAL), "i" (__NR_clone), "i" (__NR_exit), "r" (__fn), "r" (__cstack), "r" (__flags), "r" (__arg), "r" (__ptidptr), "r" (__newtls), "r" (__ctidptr), "i" (FRAME_SIZE), "i" (FRAME_TOC_SAVE_OFFSET) : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29"); return res; } #elif defined(__i386__) && SANITIZER_LINUX uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { int res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 7 * sizeof(unsigned int); ((unsigned int *)child_stack)[0] = (uptr)flags; ((unsigned int *)child_stack)[1] = (uptr)0; ((unsigned int *)child_stack)[2] = (uptr)fn; ((unsigned int *)child_stack)[3] = (uptr)arg; __asm__ __volatile__( /* %eax = syscall(%eax = SYSCALL(clone), * %ebx = flags, * %ecx = child_stack, * %edx = parent_tidptr, * %esi = new_tls, * %edi = child_tidptr) */ /* Obtain flags */ "movl (%%ecx), %%ebx\n" /* Do the system call */ "pushl %%ebx\n" "pushl %%esi\n" "pushl %%edi\n" /* Remember the flag value. */ "movl %%ebx, (%%ecx)\n" "int $0x80\n" "popl %%edi\n" "popl %%esi\n" "popl %%ebx\n" /* if (%eax != 0) * return; */ "test %%eax,%%eax\n" "jnz 1f\n" /* terminate the stack frame */ "xorl %%ebp,%%ebp\n" /* Call FN. */ "call *%%ebx\n" #ifdef PIC "call here\n" "here:\n" "popl %%ebx\n" "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n" #endif /* Call exit */ "movl %%eax, %%ebx\n" "movl %2, %%eax\n" "int $0x80\n" "1:\n" : "=a" (res) : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "c"(child_stack), "d"(parent_tidptr), "S"(newtls), "D"(child_tidptr) : "memory"); return res; } #elif defined(__arm__) && SANITIZER_LINUX uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { unsigned int res; if (!fn || !child_stack) return -EINVAL; child_stack = (char *)child_stack - 2 * sizeof(unsigned int); ((unsigned int *)child_stack)[0] = (uptr)fn; ((unsigned int *)child_stack)[1] = (uptr)arg; register int r0 __asm__("r0") = flags; register void *r1 __asm__("r1") = child_stack; register int *r2 __asm__("r2") = parent_tidptr; register void *r3 __asm__("r3") = newtls; register int *r4 __asm__("r4") = child_tidptr; register int r7 __asm__("r7") = __NR_clone; #if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__) # define ARCH_HAS_BX #endif #if __ARM_ARCH > 4 # define ARCH_HAS_BLX #endif #ifdef ARCH_HAS_BX # ifdef ARCH_HAS_BLX # define BLX(R) "blx " #R "\n" # else # define BLX(R) "mov lr, pc; bx " #R "\n" # endif #else # define BLX(R) "mov lr, pc; mov pc," #R "\n" #endif __asm__ __volatile__( /* %r0 = syscall(%r7 = SYSCALL(clone), * %r0 = flags, * %r1 = child_stack, * %r2 = parent_tidptr, * %r3 = new_tls, * %r4 = child_tidptr) */ /* Do the system call */ "swi 0x0\n" /* if (%r0 != 0) * return %r0; */ "cmp r0, #0\n" "bne 1f\n" /* In the child, now. Call "fn(arg)". */ "ldr r0, [sp, #4]\n" "ldr ip, [sp], #8\n" BLX(ip) /* Call _exit(%r0). */ "mov r7, %7\n" "swi 0x0\n" "1:\n" "mov %0, r0\n" : "=r"(res) : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7), "i"(__NR_exit) : "memory"); return res; } #endif // defined(__x86_64__) && SANITIZER_LINUX #if SANITIZER_ANDROID #if __ANDROID_API__ < 21 extern "C" __attribute__((weak)) int dl_iterate_phdr( int (*)(struct dl_phdr_info *, size_t, void *), void *); #endif static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size, void *data) { // Any name starting with "lib" indicates a bug in L where library base names // are returned instead of paths. if (info->dlpi_name && info->dlpi_name[0] == 'l' && info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') { *(bool *)data = true; return 1; } return 0; } static atomic_uint32_t android_api_level; static AndroidApiLevel AndroidDetectApiLevel() { if (!&dl_iterate_phdr) return ANDROID_KITKAT; // K or lower bool base_name_seen = false; dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen); if (base_name_seen) return ANDROID_LOLLIPOP_MR1; // L MR1 return ANDROID_POST_LOLLIPOP; // post-L // Plain L (API level 21) is completely broken wrt ASan and not very // interesting to detect. } AndroidApiLevel AndroidGetApiLevel() { AndroidApiLevel level = (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed); if (level) return level; level = AndroidDetectApiLevel(); atomic_store(&android_api_level, level, memory_order_relaxed); return level; } #endif static HandleSignalMode GetHandleSignalModeImpl(int signum) { switch (signum) { case SIGABRT: return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: return common_flags()->handle_segv; case SIGBUS: return common_flags()->handle_sigbus; } return kHandleSignalNo; } HandleSignalMode GetHandleSignalMode(int signum) { HandleSignalMode result = GetHandleSignalModeImpl(signum); if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler) return kHandleSignalExclusive; return result; } #if !SANITIZER_GO void *internal_start_thread(void(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); #if SANITIZER_LINUX && !SANITIZER_ANDROID // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked // on any thread, setuid call hangs (see test/tsan/setuid.c). internal_sigdelset(&set, 33); #endif internal_sigprocmask(SIG_SETMASK, &set, &old); void *th; real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg); internal_sigprocmask(SIG_SETMASK, &old, nullptr); return th; } void internal_join_thread(void *th) { real_pthread_join(th, nullptr); } #else void *internal_start_thread(void (*func)(void *), void *arg) { return 0; } void internal_join_thread(void *th) {} #endif #if defined(__aarch64__) // Android headers in the older NDK releases miss this definition. struct __sanitizer_esr_context { struct _aarch64_ctx head; uint64_t esr; }; static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) { static const u32 kEsrMagic = 0x45535201; u8 *aux = ucontext->uc_mcontext.__reserved; while (true) { _aarch64_ctx *ctx = (_aarch64_ctx *)aux; if (ctx->size == 0) break; if (ctx->magic == kEsrMagic) { *esr = ((__sanitizer_esr_context *)ctx)->esr; return true; } aux += ctx->size; } return false; } #endif SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) { ucontext_t *ucontext = (ucontext_t *)context; #if defined(__x86_64__) || defined(__i386__) static const uptr PF_WRITE = 1U << 1; #if SANITIZER_FREEBSD uptr err = ucontext->uc_mcontext.mc_err; #else uptr err = ucontext->uc_mcontext.gregs[REG_ERR]; #endif return err & PF_WRITE ? WRITE : READ; #elif defined(__arm__) static const uptr FSR_WRITE = 1U << 11; uptr fsr = ucontext->uc_mcontext.error_code; return fsr & FSR_WRITE ? WRITE : READ; #elif defined(__aarch64__) static const u64 ESR_ELx_WNR = 1U << 6; u64 esr; if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; return esr & ESR_ELx_WNR ? WRITE : READ; #else (void)ucontext; return UNKNOWN; // FIXME: Implement. #endif } void SignalContext::DumpAllRegisters(void *context) { // FIXME: Implement this. } void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { #if defined(__arm__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.arm_pc; *bp = ucontext->uc_mcontext.arm_fp; *sp = ucontext->uc_mcontext.arm_sp; #elif defined(__aarch64__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.regs[29]; *sp = ucontext->uc_mcontext.sp; #elif defined(__hppa__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.sc_iaoq[0]; /* GCC uses %r3 whenever a frame pointer is needed. */ *bp = ucontext->uc_mcontext.sc_gr[3]; *sp = ucontext->uc_mcontext.sc_gr[30]; #elif defined(__x86_64__) # if SANITIZER_FREEBSD ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.mc_rip; *bp = ucontext->uc_mcontext.mc_rbp; *sp = ucontext->uc_mcontext.mc_rsp; # else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_RIP]; *bp = ucontext->uc_mcontext.gregs[REG_RBP]; *sp = ucontext->uc_mcontext.gregs[REG_RSP]; # endif #elif defined(__i386__) # if SANITIZER_FREEBSD ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.mc_eip; *bp = ucontext->uc_mcontext.mc_ebp; *sp = ucontext->uc_mcontext.mc_esp; # else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_EIP]; *bp = ucontext->uc_mcontext.gregs[REG_EBP]; *sp = ucontext->uc_mcontext.gregs[REG_ESP]; # endif #elif defined(__powerpc__) || defined(__powerpc64__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.regs->nip; *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; // The powerpc{,64}-linux ABIs do not specify r31 as the frame // pointer, but GCC always uses r31 when we need a frame pointer. *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; #elif defined(__sparc__) ucontext_t *ucontext = (ucontext_t*)context; uptr *stk_ptr; # if defined (__arch64__) *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; stk_ptr = (uptr *) (*sp + 2047); *bp = stk_ptr[15]; # else *pc = ucontext->uc_mcontext.gregs[REG_PC]; *sp = ucontext->uc_mcontext.gregs[REG_O6]; stk_ptr = (uptr *) *sp; *bp = stk_ptr[15]; # endif #elif defined(__mips__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.gregs[30]; *sp = ucontext->uc_mcontext.gregs[29]; #elif defined(__s390__) ucontext_t *ucontext = (ucontext_t*)context; # if defined(__s390x__) *pc = ucontext->uc_mcontext.psw.addr; # else *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff; # endif *bp = ucontext->uc_mcontext.gregs[11]; *sp = ucontext->uc_mcontext.gregs[15]; #else # error "Unsupported arch" #endif } void MaybeReexec() { // No need to re-exec on Linux. } void PrintModuleMap() { } void CheckNoDeepBind(const char *filename, int flag) { #ifdef RTLD_DEEPBIND if (flag & RTLD_DEEPBIND) { Report( "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag" " which is incompatibe with sanitizer runtime " "(see https://github.com/google/sanitizers/issues/611 for details" "). If you want to run %s library under sanitizers please remove " "RTLD_DEEPBIND from dlopen flags.\n", filename, filename); Die(); } #endif } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found) { UNREACHABLE("FindAvailableMemoryRange is not available"); return 0; } bool GetRandom(void *buffer, uptr length) { if (!buffer || !length || length > 256) return false; #if defined(__NR_getrandom) static atomic_uint8_t skip_getrandom_syscall; if (!atomic_load_relaxed(&skip_getrandom_syscall)) { // Up to 256 bytes, getrandom will not be interrupted. uptr res = internal_syscall(SYSCALL(getrandom), buffer, length, 0); int rverrno = 0; if (internal_iserror(res, &rverrno) && rverrno == ENOSYS) atomic_store_relaxed(&skip_getrandom_syscall, 1); else if (res == length) return true; } #endif uptr fd = internal_open("/dev/urandom", O_RDONLY); if (internal_iserror(fd)) return false; // internal_read deals with EINTR. uptr res = internal_read(fd, buffer, length); if (internal_iserror(res)) return false; internal_close(fd); return true; } } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h index ee336f7ddff3..11cad6b80933 100644 --- a/lib/sanitizer_common/sanitizer_linux.h +++ b/lib/sanitizer_common/sanitizer_linux.h @@ -1,134 +1,132 @@ //===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Linux-specific syscall wrappers and classes. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_LINUX_H #define SANITIZER_LINUX_H #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_posix.h" #include "sanitizer_platform_limits_posix.h" struct link_map; // Opaque type returned by dlopen(). -struct sigaltstack; namespace __sanitizer { // Dirent structure for getdents(). Note that this structure is different from // the one in , which is used by readdir(). struct linux_dirent; // Syscall wrappers. uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count); -uptr internal_sigaltstack(const struct sigaltstack* ss, - struct sigaltstack* oss); +uptr internal_sigaltstack(const void* ss, void* oss); uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset); // Linux-only syscalls. #if SANITIZER_LINUX uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); // Used only by sanitizer_stoptheworld. Signal handlers that are actually used // (like the process-wide error reporting SEGV handler) must use // internal_sigaction instead. int internal_sigaction_norestorer(int signum, const void *act, void *oldact); #if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO // Uses a raw system call to avoid interceptors. int internal_sigaction_syscall(int signum, const void *act, void *oldact); #endif void internal_sigdelset(__sanitizer_sigset_t *set, int signum); #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \ || defined(__arm__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif #endif // SANITIZER_LINUX // This class reads thread IDs from /proc//task using only syscalls. class ThreadLister { public: explicit ThreadLister(int pid); ~ThreadLister(); // GetNextTID returns -1 if the list of threads is exhausted, or if there has // been an error. int GetNextTID(); void Reset(); bool error(); private: bool GetDirectoryEntries(); int pid_; int descriptor_; InternalScopedBuffer buffer_; bool error_; struct linux_dirent* entry_; int bytes_read_; }; // Exposed for testing. uptr ThreadDescriptorSize(); uptr ThreadSelf(); uptr ThreadSelfOffset(); // Matches a library's file name against a base name (stripping path and version // information). bool LibraryNameIs(const char *full_name, const char *base_name); // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)); #if SANITIZER_ANDROID #if defined(__aarch64__) # define __get_tls() \ ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; }) #elif defined(__arm__) # define __get_tls() \ ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; }) #elif defined(__mips__) // On mips32r1, this goes via a kernel illegal instruction trap that's // optimized for v1. # define __get_tls() \ ({ register void** __v asm("v1"); \ __asm__(".set push\n" \ ".set mips32r2\n" \ "rdhwr %0,$29\n" \ ".set pop\n" : "=r"(__v)); \ __v; }) #elif defined(__i386__) # define __get_tls() \ ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; }) #elif defined(__x86_64__) # define __get_tls() \ ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; }) #else #error "Unsupported architecture." #endif // The Android Bionic team has allocated a TLS slot for TSan starting with N, // given that Android currently doesn't support ELF TLS. It is used to store // Sanitizers thread specific data. static const int TLS_SLOT_TSAN = 8; ALWAYS_INLINE uptr *get_android_tls_ptr() { return reinterpret_cast(&__get_tls()[TLS_SLOT_TSAN]); } #endif // SANITIZER_ANDROID } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX #endif // SANITIZER_LINUX_H diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc index 8df01815f9f7..1edd4157fd6b 100644 --- a/lib/sanitizer_common/sanitizer_mac.cc +++ b/lib/sanitizer_common/sanitizer_mac.cc @@ -1,979 +1,1000 @@ //===-- sanitizer_mac.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between various sanitizers' runtime libraries and // implements OSX-specific functions. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_MAC #include "sanitizer_mac.h" // Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so // the clients will most certainly use 64-bit ones as well. #ifndef _DARWIN_USE_64_BIT_INODE #define _DARWIN_USE_64_BIT_INODE 1 #endif #include #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_procmaps.h" #if !SANITIZER_IOS #include // for _NSGetEnviron #else extern char **environ; #endif #if defined(__has_include) && __has_include() #define SANITIZER_OS_TRACE 1 #include #else #define SANITIZER_OS_TRACE 0 #endif #if !SANITIZER_IOS #include // for _NSGetArgv and _NSGetEnviron #else extern "C" { extern char ***_NSGetArgv(void); } #endif #include #include // for dladdr() #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // From , but we don't have that file on iOS. extern "C" { extern char ***_NSGetArgv(void); extern char ***_NSGetEnviron(void); } // From , but we don't have that file on iOS. extern "C" { extern kern_return_t mach_vm_region_recurse( vm_map_t target_task, mach_vm_address_t *address, mach_vm_size_t *size, natural_t *nesting_depth, vm_region_recurse_info_t info, mach_msg_type_number_t *infoCnt); } namespace __sanitizer { #include "sanitizer_syscall_generic.inc" // Direct syscalls, don't call libmalloc hooks (but not available on 10.6). extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) SANITIZER_WEAK_ATTRIBUTE; extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE; // ---------------------- sanitizer_libc.h uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd, u64 offset) { if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL); if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset); return (uptr)mmap(addr, length, prot, flags, fd, offset); } uptr internal_munmap(void *addr, uptr length) { if (&__munmap) return __munmap(addr, length); return munmap(addr, length); } int internal_mprotect(void *addr, uptr length, int prot) { return mprotect(addr, length, prot); } uptr internal_close(fd_t fd) { return close(fd); } uptr internal_open(const char *filename, int flags) { return open(filename, flags); } uptr internal_open(const char *filename, int flags, u32 mode) { return open(filename, flags, mode); } uptr internal_read(fd_t fd, void *buf, uptr count) { return read(fd, buf, count); } uptr internal_write(fd_t fd, const void *buf, uptr count) { return write(fd, buf, count); } uptr internal_stat(const char *path, void *buf) { return stat(path, (struct stat *)buf); } uptr internal_lstat(const char *path, void *buf) { return lstat(path, (struct stat *)buf); } uptr internal_fstat(fd_t fd, void *buf) { return fstat(fd, (struct stat *)buf); } uptr internal_filesize(fd_t fd) { struct stat st; if (internal_fstat(fd, &st)) return -1; return (uptr)st.st_size; } uptr internal_dup2(int oldfd, int newfd) { return dup2(oldfd, newfd); } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { return readlink(path, buf, bufsize); } uptr internal_unlink(const char *path) { return unlink(path); } uptr internal_sched_yield() { return sched_yield(); } void internal__exit(int exitcode) { _exit(exitcode); } unsigned int internal_sleep(unsigned int seconds) { return sleep(seconds); } uptr internal_getpid() { return getpid(); } int internal_sigaction(int signum, const void *act, void *oldact) { return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact); } void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); } uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { // Don't use sigprocmask here, because it affects all threads. return pthread_sigmask(how, set, oldset); } // Doesn't call pthread_atfork() handlers (but not available on 10.6). extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE; int internal_fork() { if (&__fork) return __fork(); return fork(); } int internal_forkpty(int *amaster) { int master, slave; if (openpty(&master, &slave, nullptr, nullptr, nullptr) == -1) return -1; int pid = internal_fork(); if (pid == -1) { close(master); close(slave); return -1; } if (pid == 0) { close(master); if (login_tty(slave) != 0) { // We already forked, there's not much we can do. Let's quit. Report("login_tty failed (errno %d)\n", errno); internal__exit(1); } } else { *amaster = master; close(slave); } return pid; } uptr internal_rename(const char *oldpath, const char *newpath) { return rename(oldpath, newpath); } uptr internal_ftruncate(fd_t fd, uptr size) { return ftruncate(fd, size); } uptr internal_execve(const char *filename, char *const argv[], char *const envp[]) { return execve(filename, argv, envp); } uptr internal_waitpid(int pid, int *status, int options) { return waitpid(pid, status, options); } // ----------------- sanitizer_common.h bool FileExists(const char *filename) { struct stat st; if (stat(filename, &st)) return false; // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } tid_t GetTid() { tid_t tid; pthread_threadid_np(nullptr, &tid); return tid; } void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); uptr stacksize = pthread_get_stacksize_np(pthread_self()); // pthread_get_stacksize_np() returns an incorrect stack size for the main // thread on Mavericks. See // https://github.com/google/sanitizers/issues/261 if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization && stacksize == (1 << 19)) { struct rlimit rl; CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); // Most often rl.rlim_cur will be the desired 8M. if (rl.rlim_cur < kMaxThreadStackSize) { stacksize = rl.rlim_cur; } else { stacksize = kMaxThreadStackSize; } } void *stackaddr = pthread_get_stackaddr_np(pthread_self()); *stack_top = (uptr)stackaddr; *stack_bottom = *stack_top - stacksize; } char **GetEnviron() { #if !SANITIZER_IOS char ***env_ptr = _NSGetEnviron(); if (!env_ptr) { Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is " "called after libSystem_initializer().\n"); CHECK(env_ptr); } char **environ = *env_ptr; #endif CHECK(environ); return environ; } const char *GetEnv(const char *name) { char **env = GetEnviron(); uptr name_len = internal_strlen(name); while (*env != 0) { uptr len = internal_strlen(*env); if (len > name_len) { const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. return *env + name_len + 1; // String starting after =. } } env++; } return 0; } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { CHECK_LE(kMaxPathLength, buf_len); // On OS X the executable path is saved to the stack by dyld. Reading it // from there is much faster than calling dladdr, especially for large // binaries with symbols. InternalScopedString exe_path(kMaxPathLength); uint32_t size = exe_path.size(); if (_NSGetExecutablePath(exe_path.data(), &size) == 0 && realpath(exe_path.data(), buf) != 0) { return internal_strlen(buf); } return 0; } uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { return ReadBinaryName(buf, buf_len); } void ReExec() { UNIMPLEMENTED(); } uptr GetPageSize() { return sysconf(_SC_PAGESIZE); } BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); CHECK_EQ(OS_SPINLOCK_INIT, 0); CHECK_EQ(owner_, 0); OSSpinLockLock((OSSpinLock*)&opaque_storage_); } void BlockingMutex::Unlock() { OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); } void BlockingMutex::CheckLocked() { CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0); } u64 NanoTime() { return 0; } uptr GetTlsSize() { return 0; } void InitTlsSize() { } uptr TlsBaseAddr() { uptr segbase = 0; #if defined(__x86_64__) asm("movq %%gs:0,%0" : "=r"(segbase)); #elif defined(__i386__) asm("movl %%gs:0,%0" : "=r"(segbase)); #endif return segbase; } // The size of the tls on darwin does not appear to be well documented, // however the vm memory map suggests that it is 1024 uptrs in size, // with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386. uptr TlsSize() { #if defined(__x86_64__) || defined(__i386__) return 1024 * sizeof(uptr); #else return 0; #endif } void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if !SANITIZER_GO uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; *tls_addr = TlsBaseAddr(); *tls_size = TlsSize(); #else *stk_addr = 0; *stk_size = 0; *tls_addr = 0; *tls_size = 0; #endif } void ListOfModules::init() { clear(); MemoryMappingLayout memory_mapping(false); memory_mapping.DumpListOfModules(&modules_); } static HandleSignalMode GetHandleSignalModeImpl(int signum) { switch (signum) { case SIGABRT: return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: return common_flags()->handle_segv; case SIGBUS: return common_flags()->handle_sigbus; } return kHandleSignalNo; } HandleSignalMode GetHandleSignalMode(int signum) { // Handling fatal signals on watchOS and tvOS devices is disallowed. if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM)) return kHandleSignalNo; HandleSignalMode result = GetHandleSignalModeImpl(signum); if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler) return kHandleSignalExclusive; return result; } MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED; MacosVersion GetMacosVersionInternal() { int mib[2] = { CTL_KERN, KERN_OSRELEASE }; char version[100]; uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]); for (uptr i = 0; i < maxlen; i++) version[i] = '\0'; // Get the version length. CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1); CHECK_LT(len, maxlen); CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1); switch (version[0]) { case '9': return MACOS_VERSION_LEOPARD; case '1': { switch (version[1]) { case '0': return MACOS_VERSION_SNOW_LEOPARD; case '1': return MACOS_VERSION_LION; case '2': return MACOS_VERSION_MOUNTAIN_LION; case '3': return MACOS_VERSION_MAVERICKS; case '4': return MACOS_VERSION_YOSEMITE; default: if (IsDigit(version[1])) return MACOS_VERSION_UNKNOWN_NEWER; else return MACOS_VERSION_UNKNOWN; } } default: return MACOS_VERSION_UNKNOWN; } } MacosVersion GetMacosVersion() { atomic_uint32_t *cache = reinterpret_cast(&cached_macos_version); MacosVersion result = static_cast(atomic_load(cache, memory_order_acquire)); if (result == MACOS_VERSION_UNINITIALIZED) { result = GetMacosVersionInternal(); atomic_store(cache, result, memory_order_release); } return result; } bool PlatformHasDifferentMemcpyAndMemmove() { // On OS X 10.7 memcpy() and memmove() are both resolved // into memmove$VARIANT$sse42. // See also https://github.com/google/sanitizers/issues/34. // TODO(glider): need to check dynamically that memcpy() and memmove() are // actually the same function. return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD; } uptr GetRSS() { struct task_basic_info info; unsigned count = TASK_BASIC_INFO_COUNT; kern_return_t result = task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count); if (UNLIKELY(result != KERN_SUCCESS)) { Report("Cannot get task info. Error: %d\n", result); Die(); } return info.resident_size; } void *internal_start_thread(void(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); internal_sigprocmask(SIG_SETMASK, &set, &old); pthread_t th; pthread_create(&th, 0, (void*(*)(void *arg))func, arg); internal_sigprocmask(SIG_SETMASK, &old, 0); return th; } void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); } #if !SANITIZER_GO static BlockingMutex syslog_lock(LINKER_INITIALIZED); #endif void WriteOneLineToSyslog(const char *s) { #if !SANITIZER_GO syslog_lock.CheckLocked(); asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s); #endif } void LogMessageOnPrintf(const char *str) { // Log all printf output to CrashLog. if (common_flags()->abort_on_error) CRAppendCrashLogMessage(str); } void LogFullErrorReport(const char *buffer) { #if !SANITIZER_GO // Log with os_trace. This will make it into the crash log. #if SANITIZER_OS_TRACE if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) { // os_trace requires the message (format parameter) to be a string literal. if (internal_strncmp(SanitizerToolName, "AddressSanitizer", sizeof("AddressSanitizer") - 1) == 0) os_trace("Address Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer", sizeof("UndefinedBehaviorSanitizer") - 1) == 0) os_trace("Undefined Behavior Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer", sizeof("ThreadSanitizer") - 1) == 0) os_trace("Thread Sanitizer reported a failure."); else os_trace("Sanitizer tool reported a failure."); if (common_flags()->log_to_syslog) os_trace("Consult syslog for more information."); } #endif // Log to syslog. // The logging on OS X may call pthread_create so we need the threading // environment to be fully initialized. Also, this should never be called when // holding the thread registry lock since that may result in a deadlock. If // the reporting thread holds the thread registry mutex, and asl_log waits // for GCD to dispatch a new thread, the process will deadlock, because the // pthread_create wrapper needs to acquire the lock as well. BlockingMutexLock l(&syslog_lock); if (common_flags()->log_to_syslog) WriteToSyslog(buffer); // The report is added to CrashLog as part of logging all of Printf output. #endif } SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) { #if defined(__x86_64__) || defined(__i386__) ucontext_t *ucontext = static_cast(context); return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ; #else return UNKNOWN; #endif } void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { ucontext_t *ucontext = (ucontext_t*)context; # if defined(__aarch64__) *pc = ucontext->uc_mcontext->__ss.__pc; # if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0 *bp = ucontext->uc_mcontext->__ss.__fp; # else *bp = ucontext->uc_mcontext->__ss.__lr; # endif *sp = ucontext->uc_mcontext->__ss.__sp; # elif defined(__x86_64__) *pc = ucontext->uc_mcontext->__ss.__rip; *bp = ucontext->uc_mcontext->__ss.__rbp; *sp = ucontext->uc_mcontext->__ss.__rsp; # elif defined(__arm__) *pc = ucontext->uc_mcontext->__ss.__pc; *bp = ucontext->uc_mcontext->__ss.__r[7]; *sp = ucontext->uc_mcontext->__ss.__sp; # elif defined(__i386__) *pc = ucontext->uc_mcontext->__ss.__eip; *bp = ucontext->uc_mcontext->__ss.__ebp; *sp = ucontext->uc_mcontext->__ss.__esp; # else # error "Unknown architecture" # endif } #if !SANITIZER_GO static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES"; LowLevelAllocator allocator_for_env; // Change the value of the env var |name|, leaking the original value. // If |name_value| is NULL, the variable is deleted from the environment, // otherwise the corresponding "NAME=value" string is replaced with // |name_value|. void LeakyResetEnv(const char *name, const char *name_value) { char **env = GetEnviron(); uptr name_len = internal_strlen(name); while (*env != 0) { uptr len = internal_strlen(*env); if (len > name_len) { const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. if (name_value) { // Replace the old value with the new one. *env = const_cast(name_value); } else { // Shift the subsequent pointers back. char **del = env; do { del[0] = del[1]; } while (*del++); } } } env++; } } SANITIZER_WEAK_CXX_DEFAULT_IMPL bool ReexecDisabled() { return false; } extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber; static const double kMinDyldVersionWithAutoInterposition = 360.0; bool DyldNeedsEnvVariable() { // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users // still may want use them on older systems. On older Darwin platforms, dyld // doesn't export dyldVersionNumber symbol and we simply return true. if (!&dyldVersionNumber) return true; // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via // GetMacosVersion() doesn't work for the simulator. Let's instead check // `dyldVersionNumber`, which is exported by dyld, against a known version // number from the first OS release where this appeared. return dyldVersionNumber < kMinDyldVersionWithAutoInterposition; } void MaybeReexec() { if (ReexecDisabled()) return; // Make sure the dynamic runtime library is preloaded so that the // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec // ourselves. Dl_info info; RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info)); char *dyld_insert_libraries = const_cast(GetEnv(kDyldInsertLibraries)); uptr old_env_len = dyld_insert_libraries ? internal_strlen(dyld_insert_libraries) : 0; uptr fname_len = internal_strlen(info.dli_fname); const char *dylib_name = StripModuleName(info.dli_fname); uptr dylib_name_len = internal_strlen(dylib_name); bool lib_is_in_env = dyld_insert_libraries && internal_strstr(dyld_insert_libraries, dylib_name); if (DyldNeedsEnvVariable() && !lib_is_in_env) { // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // library. InternalScopedString program_name(1024); uint32_t buf_size = program_name.size(); _NSGetExecutablePath(program_name.data(), &buf_size); char *new_env = const_cast(info.dli_fname); if (dyld_insert_libraries) { // Append the runtime dylib name to the existing value of // DYLD_INSERT_LIBRARIES. new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2); internal_strncpy(new_env, dyld_insert_libraries, old_env_len); new_env[old_env_len] = ':'; // Copy fname_len and add a trailing zero. internal_strncpy(new_env + old_env_len + 1, info.dli_fname, fname_len + 1); // Ok to use setenv() since the wrappers don't depend on the value of // asan_inited. setenv(kDyldInsertLibraries, new_env, /*overwrite*/1); } else { // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name. setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); } VReport(1, "exec()-ing the program with\n"); VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); VReport(1, "to enable wrappers.\n"); execv(program_name.data(), *_NSGetArgv()); // We get here only if execv() failed. Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, " "which is required for the sanitizer to work. We tried to set the " "environment variable and re-execute itself, but execv() failed, " "possibly because of sandbox restrictions. Make sure to launch the " "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env); RAW_CHECK("execv failed" && 0); } // Verify that interceptors really work. We'll use dlsym to locate // "pthread_create", if interceptors are working, it should really point to // "wrap_pthread_create" within our own dylib. Dl_info info_pthread_create; void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create"); RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create)); if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) { Report( "ERROR: Interceptors are not working. This may be because %s is " "loaded too late (e.g. via dlopen). Please launch the executable " "with:\n%s=%s\n", SanitizerToolName, kDyldInsertLibraries, info.dli_fname); RAW_CHECK("interceptors not installed" && 0); } if (!lib_is_in_env) return; // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove // the dylib from the environment variable, because interceptors are installed // and we don't want our children to inherit the variable. uptr env_name_len = internal_strlen(kDyldInsertLibraries); // Allocate memory to hold the previous env var name, its value, the '=' // sign and the '\0' char. char *new_env = (char*)allocator_for_env.Allocate( old_env_len + 2 + env_name_len); RAW_CHECK(new_env); internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); new_env[env_name_len] = '='; char *new_env_pos = new_env + env_name_len + 1; // Iterate over colon-separated pieces of |dyld_insert_libraries|. char *piece_start = dyld_insert_libraries; char *piece_end = NULL; char *old_env_end = dyld_insert_libraries + old_env_len; do { if (piece_start[0] == ':') piece_start++; piece_end = internal_strchr(piece_start, ':'); if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; uptr piece_len = piece_end - piece_start; char *filename_start = (char *)internal_memrchr(piece_start, '/', piece_len); uptr filename_len = piece_len; if (filename_start) { filename_start += 1; filename_len = piece_len - (filename_start - piece_start); } else { filename_start = piece_start; } // If the current piece isn't the runtime library name, // append it to new_env. if ((dylib_name_len != filename_len) || (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) { if (new_env_pos != new_env + env_name_len + 1) { new_env_pos[0] = ':'; new_env_pos++; } internal_strncpy(new_env_pos, piece_start, piece_len); new_env_pos += piece_len; } // Move on to the next piece. piece_start = piece_end; } while (piece_start < old_env_end); // Can't use setenv() here, because it requires the allocator to be // initialized. // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in // a separate function called after InitializeAllocator(). if (new_env_pos == new_env + env_name_len + 1) new_env = NULL; LeakyResetEnv(kDyldInsertLibraries, new_env); } #endif // SANITIZER_GO char **GetArgv() { return *_NSGetArgv(); } #if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM // The task_vm_info struct is normally provided by the macOS SDK, but we need // fields only available in 10.12+. Declare the struct manually to be able to // build against older SDKs. struct __sanitizer_task_vm_info { - uptr _unused[(SANITIZER_WORDSIZE == 32) ? 20 : 19]; - uptr min_address; - uptr max_address; + mach_vm_size_t virtual_size; + integer_t region_count; + integer_t page_size; + mach_vm_size_t resident_size; + mach_vm_size_t resident_size_peak; + mach_vm_size_t device; + mach_vm_size_t device_peak; + mach_vm_size_t internal; + mach_vm_size_t internal_peak; + mach_vm_size_t external; + mach_vm_size_t external_peak; + mach_vm_size_t reusable; + mach_vm_size_t reusable_peak; + mach_vm_size_t purgeable_volatile_pmap; + mach_vm_size_t purgeable_volatile_resident; + mach_vm_size_t purgeable_volatile_virtual; + mach_vm_size_t compressed; + mach_vm_size_t compressed_peak; + mach_vm_size_t compressed_lifetime; + mach_vm_size_t phys_footprint; + mach_vm_address_t min_address; + mach_vm_address_t max_address; }; +#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t))) uptr GetTaskInfoMaxAddress() { - __sanitizer_task_vm_info vm_info = {{0}, 0, 0}; - mach_msg_type_number_t count = sizeof(vm_info) / sizeof(int); + __sanitizer_task_vm_info vm_info = {}; + mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT; int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count); if (err == 0) { return vm_info.max_address - 1; } else { // xnu cannot provide vm address limit return 0x200000000 - 1; } } #endif uptr GetMaxVirtualAddress() { #if SANITIZER_WORDSIZE == 64 # if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM // Get the maximum VM address static uptr max_vm = GetTaskInfoMaxAddress(); CHECK(max_vm); return max_vm; # else return (1ULL << 47) - 1; // 0x00007fffffffffffUL; # endif #else // SANITIZER_WORDSIZE == 32 return (1ULL << 32) - 1; // 0xffffffff; #endif // SANITIZER_WORDSIZE } uptr FindAvailableMemoryRange(uptr shadow_size, uptr alignment, uptr left_padding, uptr *largest_gap_found) { typedef vm_region_submap_short_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; // Start searching for available memory region past PAGEZERO, which is // 4KB on 32-bit and 4GB on 64-bit. mach_vm_address_t start_address = (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000; mach_vm_address_t address = start_address; mach_vm_address_t free_begin = start_address; kern_return_t kr = KERN_SUCCESS; if (largest_gap_found) *largest_gap_found = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; RegionInfo vminfo; mach_msg_type_number_t count = kRegionInfoSize; kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, (vm_region_info_t)&vminfo, &count); if (free_begin != address) { // We found a free region [free_begin..address-1]. uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); uptr gap_end = RoundDownTo((uptr)address, alignment); uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0; if (shadow_size < gap_size) { return gap_start; } if (largest_gap_found && *largest_gap_found < gap_size) { *largest_gap_found = gap_size; } } // Move to the next region. address += vmsize; free_begin = address; } // We looked at all free regions and could not find one large enough. return 0; } // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } void SignalContext::DumpAllRegisters(void *context) { Report("Register values:\n"); ucontext_t *ucontext = (ucontext_t*)context; # define DUMPREG64(r) \ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG32(r) \ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG_(r) Printf(" "); DUMPREG(r); # define DUMPREG__(r) Printf(" "); DUMPREG(r); # define DUMPREG___(r) Printf(" "); DUMPREG(r); # if defined(__x86_64__) # define DUMPREG(r) DUMPREG64(r) DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n"); DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n"); DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n"); DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n"); # elif defined(__i386__) # define DUMPREG(r) DUMPREG32(r) DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n"); DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n"); # elif defined(__aarch64__) # define DUMPREG(r) DUMPREG64(r) DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n"); DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n"); DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n"); DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n"); DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n"); DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n"); DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n"); DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n"); # elif defined(__arm__) # define DUMPREG(r) DUMPREG32(r) DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n"); DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n"); DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n"); DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n"); # else # error "Unknown architecture" # endif # undef DUMPREG64 # undef DUMPREG32 # undef DUMPREG_ # undef DUMPREG__ # undef DUMPREG___ # undef DUMPREG } static inline bool CompareBaseAddress(const LoadedModule &a, const LoadedModule &b) { return a.base_address() < b.base_address(); } void FormatUUID(char *out, uptr size, const u8 *uuid) { internal_snprintf(out, size, "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-" "%02X%02X%02X%02X%02X%02X>", uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]); } void PrintModuleMap() { Printf("Process module map:\n"); MemoryMappingLayout memory_mapping(false); InternalMmapVector modules(/*initial_capacity*/ 128); memory_mapping.DumpListOfModules(&modules); InternalSort(&modules, modules.size(), CompareBaseAddress); for (uptr i = 0; i < modules.size(); ++i) { char uuid_str[128]; FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid()); Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(), modules[i].max_executable_address(), modules[i].full_name(), ModuleArchToString(modules[i].arch()), uuid_str); } Printf("End of module map.\n"); } void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } // FIXME: implement on this platform. bool GetRandom(void *buffer, uptr length) { UNIMPLEMENTED(); } } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h index 49732aa32323..396f7c9346d6 100644 --- a/lib/sanitizer_common/sanitizer_platform.h +++ b/lib/sanitizer_common/sanitizer_platform.h @@ -1,273 +1,280 @@ //===-- sanitizer_platform.h ------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Common platform macros. //===----------------------------------------------------------------------===// #ifndef SANITIZER_PLATFORM_H #define SANITIZER_PLATFORM_H -#if !defined(__linux__) && !defined(__FreeBSD__) && \ +#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \ !defined(__APPLE__) && !defined(_WIN32) # error "This operating system is not supported" #endif #if defined(__linux__) # define SANITIZER_LINUX 1 #else # define SANITIZER_LINUX 0 #endif #if defined(__FreeBSD__) # define SANITIZER_FREEBSD 1 #else # define SANITIZER_FREEBSD 0 #endif +#if defined(__NetBSD__) +# define SANITIZER_NETBSD 1 +#else +# define SANITIZER_NETBSD 0 +#endif + #if defined(__APPLE__) # define SANITIZER_MAC 1 # include # if TARGET_OS_IPHONE # define SANITIZER_IOS 1 # else # define SANITIZER_IOS 0 # endif # if TARGET_IPHONE_SIMULATOR # define SANITIZER_IOSSIM 1 # else # define SANITIZER_IOSSIM 0 # endif #else # define SANITIZER_MAC 0 # define SANITIZER_IOS 0 # define SANITIZER_IOSSIM 0 #endif #if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH # define SANITIZER_WATCHOS 1 #else # define SANITIZER_WATCHOS 0 #endif #if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV # define SANITIZER_TVOS 1 #else # define SANITIZER_TVOS 0 #endif #if defined(_WIN32) # define SANITIZER_WINDOWS 1 #else # define SANITIZER_WINDOWS 0 #endif #if defined(_WIN64) # define SANITIZER_WINDOWS64 1 #else # define SANITIZER_WINDOWS64 0 #endif #if defined(__ANDROID__) # define SANITIZER_ANDROID 1 #else # define SANITIZER_ANDROID 0 #endif -#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC) +#define SANITIZER_POSIX \ + (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_NETBSD) #if __LP64__ || defined(_WIN64) # define SANITIZER_WORDSIZE 64 #else # define SANITIZER_WORDSIZE 32 #endif #if SANITIZER_WORDSIZE == 64 # define FIRST_32_SECOND_64(a, b) (b) #else # define FIRST_32_SECOND_64(a, b) (a) #endif #if defined(__x86_64__) && !defined(_LP64) # define SANITIZER_X32 1 #else # define SANITIZER_X32 0 #endif #if defined(__mips__) # define SANITIZER_MIPS 1 # if defined(__mips64) # define SANITIZER_MIPS32 0 # define SANITIZER_MIPS64 1 # else # define SANITIZER_MIPS32 1 # define SANITIZER_MIPS64 0 # endif #else # define SANITIZER_MIPS 0 # define SANITIZER_MIPS32 0 # define SANITIZER_MIPS64 0 #endif #if defined(__s390__) # define SANITIZER_S390 1 # if defined(__s390x__) # define SANITIZER_S390_31 0 # define SANITIZER_S390_64 1 # else # define SANITIZER_S390_31 1 # define SANITIZER_S390_64 0 # endif #else # define SANITIZER_S390 0 # define SANITIZER_S390_31 0 # define SANITIZER_S390_64 0 #endif #if defined(__powerpc__) # define SANITIZER_PPC 1 # if defined(__powerpc64__) # define SANITIZER_PPC32 0 # define SANITIZER_PPC64 1 // 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is // big-endian, and uses v1 ABI (known for its function descriptors), // while the new powerpc64le target is little-endian and uses v2. // In theory, you could convince gcc to compile for their evil twins // (eg. big-endian v2), but you won't find such combinations in the wild // (it'd require bootstrapping a whole system, which would be quite painful // - there's no target triple for that). LLVM doesn't support them either. # if _CALL_ELF == 2 # define SANITIZER_PPC64V1 0 # define SANITIZER_PPC64V2 1 # else # define SANITIZER_PPC64V1 1 # define SANITIZER_PPC64V2 0 # endif # else # define SANITIZER_PPC32 1 # define SANITIZER_PPC64 0 # define SANITIZER_PPC64V1 0 # define SANITIZER_PPC64V2 0 # endif #else # define SANITIZER_PPC 0 # define SANITIZER_PPC32 0 # define SANITIZER_PPC64 0 # define SANITIZER_PPC64V1 0 # define SANITIZER_PPC64V2 0 #endif #if defined(__arm__) # define SANITIZER_ARM 1 #else # define SANITIZER_ARM 0 #endif // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. // For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or // change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here. #ifndef SANITIZER_CAN_USE_ALLOCATOR64 # if SANITIZER_ANDROID && defined(__aarch64__) # define SANITIZER_CAN_USE_ALLOCATOR64 1 # elif defined(__mips64) || defined(__aarch64__) # define SANITIZER_CAN_USE_ALLOCATOR64 0 # else # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) # endif #endif // The range of addresses which can be returned my mmap. // FIXME: this value should be different on different platforms. Larger values // will still work but will consume more memory for TwoLevelByteMap. #if defined(__mips__) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) #elif defined(__aarch64__) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) #else # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) #endif // The AArch64 linux port uses the canonical syscall set as mandated by // the upstream linux community for all new ports. Other ports may still // use legacy syscalls. #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS # if defined(__aarch64__) && SANITIZER_LINUX # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 # else # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 # endif #endif // udi16 syscalls can only be used when the following conditions are // met: // * target is one of arm32, x86-32, sparc32, sh or m68k // * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15 // built against > linux-2.2 kernel headers // Since we don't want to include libc headers here, we check the // target only. #if defined(__arm__) || SANITIZER_X32 || defined(__sparc__) #define SANITIZER_USES_UID16_SYSCALLS 1 #else #define SANITIZER_USES_UID16_SYSCALLS 0 #endif #if defined(__mips__) # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10) #else # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) #endif // Assume obsolete RPC headers are available by default #if !defined(HAVE_RPC_XDR_H) && !defined(HAVE_TIRPC_RPC_XDR_H) # define HAVE_RPC_XDR_H (SANITIZER_LINUX && !SANITIZER_ANDROID) # define HAVE_TIRPC_RPC_XDR_H 0 #endif /// \macro MSC_PREREQ /// \brief Is the compiler MSVC of at least the specified version? /// The common \param version values to check for are: /// * 1800: Microsoft Visual Studio 2013 / 12.0 /// * 1900: Microsoft Visual Studio 2015 / 14.0 #ifdef _MSC_VER # define MSC_PREREQ(version) (_MSC_VER >= (version)) #else # define MSC_PREREQ(version) 0 #endif #if defined(__arm64__) && SANITIZER_IOS # define SANITIZER_NON_UNIQUE_TYPEINFO 1 #else # define SANITIZER_NON_UNIQUE_TYPEINFO 0 #endif // On linux, some architectures had an ABI transition from 64-bit long double // (ie. same as double) to 128-bit long double. On those, glibc symbols // involving long doubles come in two versions, and we need to pass the // correct one to dlvsym when intercepting them. #if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1) #define SANITIZER_NLDBL_VERSION "GLIBC_2.4" #endif #if SANITIZER_GO == 0 # define SANITIZER_GO 0 #endif // On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks. // pthread_exit() performs unwinding that leads to dlopen'ing libgcc_s.so. // dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize // that this allocation happens in dynamic linker and should be ignored. #if SANITIZER_PPC || defined(__thumb__) # define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1 #else # define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0 #endif #endif // SANITIZER_PLATFORM_H diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h index 1bc43e817230..0380cee92a00 100644 --- a/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -1,359 +1,381 @@ //===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines macro telling whether sanitizer tools can/should intercept // given library functions on a given platform. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H #define SANITIZER_PLATFORM_INTERCEPTORS_H #include "sanitizer_internal_defs.h" #if !SANITIZER_WINDOWS # define SI_WINDOWS 0 # define SI_NOT_WINDOWS 1 # include "sanitizer_platform_limits_posix.h" #else # define SI_WINDOWS 1 # define SI_NOT_WINDOWS 0 #endif #if SANITIZER_POSIX # define SI_POSIX 1 #else # define SI_POSIX 0 #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID # define SI_LINUX_NOT_ANDROID 1 #else # define SI_LINUX_NOT_ANDROID 0 #endif #if SANITIZER_ANDROID # define SI_ANDROID 1 #else # define SI_ANDROID 0 #endif #if SANITIZER_FREEBSD # define SI_FREEBSD 1 #else # define SI_FREEBSD 0 #endif +#if SANITIZER_NETBSD +# define SI_NETBSD 1 +#else +# define SI_NETBSD 0 +#endif + #if SANITIZER_LINUX # define SI_LINUX 1 #else # define SI_LINUX 0 #endif #if SANITIZER_MAC # define SI_MAC 1 # define SI_NOT_MAC 0 #else # define SI_MAC 0 # define SI_NOT_MAC 1 #endif #if SANITIZER_IOS # define SI_IOS 1 #else # define SI_IOS 0 #endif #if !SANITIZER_WINDOWS && !SANITIZER_MAC # define SI_UNIX_NOT_MAC 1 #else # define SI_UNIX_NOT_MAC 0 #endif #if SANITIZER_LINUX && !SANITIZER_FREEBSD # define SI_LINUX_NOT_FREEBSD 1 # else # define SI_LINUX_NOT_FREEBSD 0 #endif #define SANITIZER_INTERCEPT_STRLEN 1 #define SANITIZER_INTERCEPT_STRNLEN SI_NOT_MAC #define SANITIZER_INTERCEPT_STRCMP 1 #define SANITIZER_INTERCEPT_STRSTR 1 #define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_STRTOK 1 #define SANITIZER_INTERCEPT_STRCHR 1 #define SANITIZER_INTERCEPT_STRCHRNUL SI_UNIX_NOT_MAC #define SANITIZER_INTERCEPT_STRRCHR 1 #define SANITIZER_INTERCEPT_STRSPN 1 #define SANITIZER_INTERCEPT_STRPBRK 1 #define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_MEMSET 1 #define SANITIZER_INTERCEPT_MEMMOVE 1 #define SANITIZER_INTERCEPT_MEMCPY 1 #define SANITIZER_INTERCEPT_MEMCMP 1 #define SANITIZER_INTERCEPT_STRNDUP SI_POSIX #define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD #if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 # define SI_MAC_DEPLOYMENT_BELOW_10_7 1 #else # define SI_MAC_DEPLOYMENT_BELOW_10_7 0 #endif // memmem on Darwin doesn't exist on 10.6 // FIXME: enable memmem on Windows. #define SANITIZER_INTERCEPT_MEMMEM \ - SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7 + (SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7) #define SANITIZER_INTERCEPT_MEMCHR 1 -#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX +#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD) #define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FREAD SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FWRITE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_PREADV \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PRCTL SI_LINUX #define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID #ifndef SANITIZER_INTERCEPT_PRINTF # define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS -# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD +# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD) # define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID #endif #define SANITIZER_INTERCEPT_FREXP 1 #define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_GETPWENT \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX +#define SANITIZER_INTERCEPT_GETPWENT_R \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_SETPWENT (SI_MAC || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_CLOCK_GETTIME (SI_FREEBSD || SI_NETBSD || SI_LINUX) #define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX -#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R (SI_FREEBSD || SI_LINUX) +#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \ + (SI_FREEBSD || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SENDMSG SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SYSINFO SI_LINUX #define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID #if SI_LINUX_NOT_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ defined(__s390__)) #define SANITIZER_INTERCEPT_PTRACE 1 #else #define SANITIZER_INTERCEPT_PTRACE 0 #endif #define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_MBSNRTOWCS (SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_WCSNRTOMBS \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_WCRTOMB \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_CONFSTR \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCANDIR \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WORDEXP \ - SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SIGSETOPS \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_BACKTRACE SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_BACKTRACE \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX #define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_STATFS \ + (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATFS64 \ - (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID + ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_STATVFS \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ETHER_HOST \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_SHMCTL \ - ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) + (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_SHMCTL \ + (SI_NETBSD || ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \ + SANITIZER_WORDSIZE == 64)) // NOLINT #define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \ - SI_MAC || SI_LINUX_NOT_ANDROID + (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \ - SI_MAC || SI_LINUX_NOT_ANDROID + (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TTYNAME_R SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SINCOS SI_LINUX #define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX +#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX) #define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_RAND_R \ - SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_ICONV SI_FREEBSD || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID) +#define SANITIZER_INTERCEPT_ICONV \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS // FIXME: getline seems to be available on OSX 10.7 -#define SANITIZER_INTERCEPT_GETLINE SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETLINE \ + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) -#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD || SI_MAC +#define SANITIZER_INTERCEPT__EXIT \ + (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC) #define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_TLS_GET_ADDR \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETRESID SI_LINUX #define SANITIZER_INTERCEPT_GETIFADDRS \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC) #define SANITIZER_INTERCEPT_IF_INDEXTONAME \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC) #define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID #if SI_LINUX && defined(__arm__) #define SANITIZER_INTERCEPT_AEABI_MEM 1 #else #define SANITIZER_INTERCEPT_AEABI_MEM 0 #endif #define SANITIZER_INTERCEPT___BZERO SI_MAC -#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_NOT_WINDOWS) #define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC +#define SANITIZER_INTERCEPT_TSEARCH \ + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD) #define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM (SI_LINUX_NOT_ANDROID || SI_NETBSD) #define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS #ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE #define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \ - SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC + (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC) #endif -#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC +#define SANITIZER_INTERCEPT_GETPASS \ + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD) #define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_SEM SI_LINUX || SI_FREEBSD +#define SANITIZER_INTERCEPT_SEM (SI_LINUX || SI_FREEBSD || SI_NETBSD) #define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_MINCORE SI_LINUX +#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD) #define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX -#define SANITIZER_INTERCEPT_CTERMID SI_LINUX || SI_MAC || SI_FREEBSD -#define SANITIZER_INTERCEPT_CTERMID_R SI_MAC || SI_FREEBSD +#define SANITIZER_INTERCEPT_CTERMID \ + (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD) +#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD) -#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX || SI_MAC || SI_WINDOWS +#define SANITIZER_INTERCEPTOR_HOOKS (SI_LINUX || SI_MAC || SI_WINDOWS) #define SANITIZER_INTERCEPT_RECV_RECVFROM SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SEND_SENDTO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX -#define SANITIZER_INTERCEPT_STAT (SI_FREEBSD || SI_MAC || SI_ANDROID) -#define SANITIZER_INTERCEPT___XSTAT !SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_STAT \ + (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD) +#define SANITIZER_INTERCEPT___XSTAT \ + (!SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS) #define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT #define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_UTMP SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD -#define SANITIZER_INTERCEPT_UTMPX SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD +#define SANITIZER_INTERCEPT_UTMP (SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD) +#define SANITIZER_INTERCEPT_UTMPX (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD) #define SANITIZER_INTERCEPT_GETLOADAVG \ - SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD) -#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (!SI_FREEBSD && !SI_MAC) -#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC) -#define SANITIZER_INTERCEPT_PVALLOC (!SI_FREEBSD && !SI_MAC) -#define SANITIZER_INTERCEPT_CFREE (!SI_FREEBSD && !SI_MAC) +#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \ + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) +#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) +#define SANITIZER_INTERCEPT_PVALLOC (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) +#define SANITIZER_INTERCEPT_CFREE (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WCSCAT SI_NOT_WINDOWS #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc index 03f73ae88308..d7fa5f6451d1 100644 --- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -1,566 +1,566 @@ //===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // See sanitizer_stoptheworld.h for details. // This implementation was inspired by Markus Gutschke's linuxthreads.cc. // //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ defined(__aarch64__) || defined(__powerpc64__) || \ defined(__s390__) || defined(__i386__) || \ defined(__arm__)) #include "sanitizer_stoptheworld.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_atomic.h" #include #include // for CLONE_* definitions #include #include // for PR_* definitions #include // for PTRACE_* definitions #include // for pid_t #include // for iovec #include // for NT_PRSTATUS #if defined(__aarch64__) && !SANITIZER_ANDROID // GLIBC 2.20+ sys/user does not include asm/ptrace.h # include #endif #include // for user_regs_struct #if SANITIZER_ANDROID && SANITIZER_MIPS # include // for mips SP register in sys/user.h #endif #include // for signal-related stuff #ifdef sa_handler # undef sa_handler #endif #ifdef sa_sigaction # undef sa_sigaction #endif #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_libc.h" #include "sanitizer_linux.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" // This module works by spawning a Linux task which then attaches to every // thread in the caller process with ptrace. This suspends the threads, and // PTRACE_GETREGS can then be used to obtain their register state. The callback // supplied to StopTheWorld() is run in the tracer task while the threads are // suspended. // The tracer task must be placed in a different thread group for ptrace to // work, so it cannot be spawned as a pthread. Instead, we use the low-level // clone() interface (we want to share the address space with the caller // process, so we prefer clone() over fork()). // // We don't use any libc functions, relying instead on direct syscalls. There // are two reasons for this: // 1. calling a library function while threads are suspended could cause a // deadlock, if one of the treads happens to be holding a libc lock; // 2. it's generally not safe to call libc functions from the tracer task, // because clone() does not set up a thread-local storage for it. Any // thread-local variables used by libc will be shared between the tracer task // and the thread which spawned it. namespace __sanitizer { class SuspendedThreadsListLinux : public SuspendedThreadsList { public: SuspendedThreadsListLinux() : thread_ids_(1024) {} tid_t GetThreadID(uptr index) const; uptr ThreadCount() const; bool ContainsTid(tid_t thread_id) const; void Append(tid_t tid); PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer, uptr *sp) const; uptr RegisterCount() const; private: InternalMmapVector thread_ids_; }; // Structure for passing arguments into the tracer thread. struct TracerThreadArgument { StopTheWorldCallback callback; void *callback_argument; // The tracer thread waits on this mutex while the parent finishes its // preparations. BlockingMutex mutex; // Tracer thread signals its completion by setting done. atomic_uintptr_t done; uptr parent_pid; }; // This class handles thread suspending/unsuspending in the tracer thread. class ThreadSuspender { public: explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg) : arg(arg) , pid_(pid) { CHECK_GE(pid, 0); } bool SuspendAllThreads(); void ResumeAllThreads(); void KillAllThreads(); SuspendedThreadsListLinux &suspended_threads_list() { return suspended_threads_list_; } TracerThreadArgument *arg; private: SuspendedThreadsListLinux suspended_threads_list_; pid_t pid_; bool SuspendThread(tid_t thread_id); }; bool ThreadSuspender::SuspendThread(tid_t tid) { // Are we already attached to this thread? // Currently this check takes linear time, however the number of threads is // usually small. if (suspended_threads_list_.ContainsTid(tid)) return false; int pterrno; if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), &pterrno)) { // Either the thread is dead, or something prevented us from attaching. // Log this event and move on. VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid, pterrno); return false; } else { VReport(2, "Attached to thread %zu.\n", (uptr)tid); // The thread is not guaranteed to stop before ptrace returns, so we must // wait on it. Note: if the thread receives a signal concurrently, // we can get notification about the signal before notification about stop. // In such case we need to forward the signal to the thread, otherwise // the signal will be missed (as we do PTRACE_DETACH with arg=0) and // any logic relying on signals will break. After forwarding we need to // continue to wait for stopping, because the thread is not stopped yet. // We do ignore delivery of SIGSTOP, because we want to make stop-the-world // as invisible as possible. for (;;) { int status; uptr waitpid_status; HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL)); int wperrno; if (internal_iserror(waitpid_status, &wperrno)) { // Got a ECHILD error. I don't think this situation is possible, but it // doesn't hurt to report it. VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n", (uptr)tid, wperrno); internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr); return false; } if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) { internal_ptrace(PTRACE_CONT, tid, nullptr, (void*)(uptr)WSTOPSIG(status)); continue; } break; } suspended_threads_list_.Append(tid); return true; } } void ThreadSuspender::ResumeAllThreads() { for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) { pid_t tid = suspended_threads_list_.GetThreadID(i); int pterrno; if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr), &pterrno)) { VReport(2, "Detached from thread %d.\n", tid); } else { // Either the thread is dead, or we are already detached. // The latter case is possible, for instance, if this function was called // from a signal handler. VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno); } } } void ThreadSuspender::KillAllThreads() { for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), nullptr, nullptr); } bool ThreadSuspender::SuspendAllThreads() { ThreadLister thread_lister(pid_); bool added_threads; bool first_iteration = true; do { // Run through the directory entries once. added_threads = false; pid_t tid = thread_lister.GetNextTID(); while (tid >= 0) { if (SuspendThread(tid)) added_threads = true; tid = thread_lister.GetNextTID(); } if (thread_lister.error() || (first_iteration && !added_threads)) { // Detach threads and fail. ResumeAllThreads(); return false; } thread_lister.Reset(); first_iteration = false; } while (added_threads); return true; } // Pointer to the ThreadSuspender instance for use in signal handler. static ThreadSuspender *thread_suspender_instance = nullptr; // Synchronous signals that should not be blocked. static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGXCPU, SIGXFSZ }; static void TracerThreadDieCallback() { // Generally a call to Die() in the tracer thread should be fatal to the // parent process as well, because they share the address space. // This really only works correctly if all the threads are suspended at this // point. So we correctly handle calls to Die() from within the callback, but // not those that happen before or after the callback. Hopefully there aren't // a lot of opportunities for that to happen... ThreadSuspender *inst = thread_suspender_instance; if (inst && stoptheworld_tracer_pid == internal_getpid()) { inst->KillAllThreads(); thread_suspender_instance = nullptr; } } // Signal handler to wake up suspended threads when the tracer thread dies. static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) { SignalContext ctx = SignalContext::Create(siginfo, uctx); Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum, ctx.addr, ctx.pc, ctx.sp); ThreadSuspender *inst = thread_suspender_instance; if (inst) { if (signum == SIGABRT) inst->KillAllThreads(); else inst->ResumeAllThreads(); RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); thread_suspender_instance = nullptr; atomic_store(&inst->arg->done, 1, memory_order_relaxed); } internal__exit((signum == SIGABRT) ? 1 : 2); } // Size of alternative stack for signal handlers in the tracer thread. static const int kHandlerStackSize = 4096; // This function will be run as a cloned task. static int TracerThread(void* argument) { TracerThreadArgument *tracer_thread_argument = (TracerThreadArgument *)argument; internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); // Check if parent is already dead. if (internal_getppid() != tracer_thread_argument->parent_pid) internal__exit(4); // Wait for the parent thread to finish preparations. tracer_thread_argument->mutex.Lock(); tracer_thread_argument->mutex.Unlock(); RAW_CHECK(AddDieCallback(TracerThreadDieCallback)); ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument); // Global pointer for the signal handler. thread_suspender_instance = &thread_suspender; // Alternate stack for signal handling. InternalScopedBuffer handler_stack_memory(kHandlerStackSize); - struct sigaltstack handler_stack; + stack_t handler_stack; internal_memset(&handler_stack, 0, sizeof(handler_stack)); handler_stack.ss_sp = handler_stack_memory.data(); handler_stack.ss_size = kHandlerStackSize; internal_sigaltstack(&handler_stack, nullptr); // Install our handler for synchronous signals. Other signals should be // blocked by the mask we inherited from the parent thread. for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) { __sanitizer_sigaction act; internal_memset(&act, 0, sizeof(act)); act.sigaction = TracerThreadSignalHandler; act.sa_flags = SA_ONSTACK | SA_SIGINFO; internal_sigaction_norestorer(kSyncSignals[i], &act, 0); } int exit_code = 0; if (!thread_suspender.SuspendAllThreads()) { VReport(1, "Failed suspending threads.\n"); exit_code = 3; } else { tracer_thread_argument->callback(thread_suspender.suspended_threads_list(), tracer_thread_argument->callback_argument); thread_suspender.ResumeAllThreads(); exit_code = 0; } RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); thread_suspender_instance = nullptr; atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed); return exit_code; } class ScopedStackSpaceWithGuard { public: explicit ScopedStackSpaceWithGuard(uptr stack_size) { stack_size_ = stack_size; guard_size_ = GetPageSizeCached(); // FIXME: Omitting MAP_STACK here works in current kernels but might break // in the future. guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, "ScopedStackWithGuard"); CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_)); } ~ScopedStackSpaceWithGuard() { UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); } void *Bottom() const { return (void *)(guard_start_ + stack_size_ + guard_size_); } private: uptr stack_size_; uptr guard_size_; uptr guard_start_; }; // We have a limitation on the stack frame size, so some stuff had to be moved // into globals. static __sanitizer_sigset_t blocked_sigset; static __sanitizer_sigset_t old_sigset; class StopTheWorldScope { public: StopTheWorldScope() { // Make this process dumpable. Processes that are not dumpable cannot be // attached to. process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); if (!process_was_dumpable_) internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); } ~StopTheWorldScope() { // Restore the dumpable flag. if (!process_was_dumpable_) internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); } private: int process_was_dumpable_; }; // When sanitizer output is being redirected to file (i.e. by using log_path), // the tracer should write to the parent's log instead of trying to open a new // file. Alert the logging code to the fact that we have a tracer. struct ScopedSetTracerPID { explicit ScopedSetTracerPID(uptr tracer_pid) { stoptheworld_tracer_pid = tracer_pid; stoptheworld_tracer_ppid = internal_getpid(); } ~ScopedSetTracerPID() { stoptheworld_tracer_pid = 0; stoptheworld_tracer_ppid = 0; } }; void StopTheWorld(StopTheWorldCallback callback, void *argument) { StopTheWorldScope in_stoptheworld; // Prepare the arguments for TracerThread. struct TracerThreadArgument tracer_thread_argument; tracer_thread_argument.callback = callback; tracer_thread_argument.callback_argument = argument; tracer_thread_argument.parent_pid = internal_getpid(); atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed); const uptr kTracerStackSize = 2 * 1024 * 1024; ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); // Block the execution of TracerThread until after we have set ptrace // permissions. tracer_thread_argument.mutex.Lock(); // Signal handling story. // We don't want async signals to be delivered to the tracer thread, // so we block all async signals before creating the thread. An async signal // handler can temporary modify errno, which is shared with this thread. // We ought to use pthread_sigmask here, because sigprocmask has undefined // behavior in multithreaded programs. However, on linux sigprocmask is // equivalent to pthread_sigmask with the exception that pthread_sigmask // does not allow to block some signals used internally in pthread // implementation. We are fine with blocking them here, we are really not // going to pthread_cancel the thread. // The tracer thread should not raise any synchronous signals. But in case it // does, we setup a special handler for sync signals that properly kills the // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers // in the tracer thread won't interfere with user program. Double note: if a // user does something along the lines of 'kill -11 pid', that can kill the // process even if user setup own handler for SEGV. // Thing to watch out for: this code should not change behavior of user code // in any observable way. In particular it should not override user signal // handlers. internal_sigfillset(&blocked_sigset); for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) internal_sigdelset(&blocked_sigset, kSyncSignals[i]); int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); CHECK_EQ(rv, 0); uptr tracer_pid = internal_clone( TracerThread, tracer_stack.Bottom(), CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, &tracer_thread_argument, nullptr /* parent_tidptr */, nullptr /* newtls */, nullptr /* child_tidptr */); internal_sigprocmask(SIG_SETMASK, &old_sigset, 0); int local_errno = 0; if (internal_iserror(tracer_pid, &local_errno)) { VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno); tracer_thread_argument.mutex.Unlock(); } else { ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid); // On some systems we have to explicitly declare that we want to be traced // by the tracer thread. #ifdef PR_SET_PTRACER internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); #endif // Allow the tracer thread to start. tracer_thread_argument.mutex.Unlock(); // NOTE: errno is shared between this thread and the tracer thread. // internal_waitpid() may call syscall() which can access/spoil errno, // so we can't call it now. Instead we for the tracer thread to finish using // the spin loop below. Man page for sched_yield() says "In the Linux // implementation, sched_yield() always succeeds", so let's hope it does not // spoil errno. Note that this spin loop runs only for brief periods before // the tracer thread has suspended us and when it starts unblocking threads. while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0) sched_yield(); // Now the tracer thread is about to exit and does not touch errno, // wait for it. for (;;) { uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL); if (!internal_iserror(waitpid_status, &local_errno)) break; if (local_errno == EINTR) continue; VReport(1, "Waiting on the tracer thread failed (errno %d).\n", local_errno); break; } } } // Platform-specific methods from SuspendedThreadsList. #if SANITIZER_ANDROID && defined(__arm__) typedef pt_regs regs_struct; #define REG_SP ARM_sp #elif SANITIZER_LINUX && defined(__arm__) typedef user_regs regs_struct; #define REG_SP uregs[13] #elif defined(__i386__) || defined(__x86_64__) typedef user_regs_struct regs_struct; #if defined(__i386__) #define REG_SP esp #else #define REG_SP rsp #endif #elif defined(__powerpc__) || defined(__powerpc64__) typedef pt_regs regs_struct; #define REG_SP gpr[PT_R1] #elif defined(__mips__) typedef struct user regs_struct; # if SANITIZER_ANDROID # define REG_SP regs[EF_R29] # else # define REG_SP regs[EF_REG29] # endif #elif defined(__aarch64__) typedef struct user_pt_regs regs_struct; #define REG_SP sp #define ARCH_IOVEC_FOR_GETREGSET #elif defined(__s390__) typedef _user_regs_struct regs_struct; #define REG_SP gprs[15] #define ARCH_IOVEC_FOR_GETREGSET #else #error "Unsupported architecture" #endif // SANITIZER_ANDROID && defined(__arm__) tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { CHECK_LT(index, thread_ids_.size()); return thread_ids_[index]; } uptr SuspendedThreadsListLinux::ThreadCount() const { return thread_ids_.size(); } bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const { for (uptr i = 0; i < thread_ids_.size(); i++) { if (thread_ids_[i] == thread_id) return true; } return false; } void SuspendedThreadsListLinux::Append(tid_t tid) { thread_ids_.push_back(tid); } PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP( uptr index, uptr *buffer, uptr *sp) const { pid_t tid = GetThreadID(index); regs_struct regs; int pterrno; #ifdef ARCH_IOVEC_FOR_GETREGSET struct iovec regset_io; regset_io.iov_base = ®s; regset_io.iov_len = sizeof(regs_struct); bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid, (void*)NT_PRSTATUS, (void*)®set_io), &pterrno); #else bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr, ®s), &pterrno); #endif if (isErr) { VReport(1, "Could not get registers from thread %d (errno %d).\n", tid, pterrno); // ESRCH means that the given thread is not suspended or already dead. // Therefore it's unsafe to inspect its data (e.g. walk through stack) and // we should notify caller about this. return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL : REGISTERS_UNAVAILABLE; } *sp = regs.REG_SP; internal_memcpy(buffer, ®s, sizeof(regs)); return REGISTERS_AVAILABLE; } uptr SuspendedThreadsListLinux::RegisterCount() const { return sizeof(regs_struct) / sizeof(uptr); } } // namespace __sanitizer #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) // || defined(__aarch64__) || defined(__powerpc64__) // || defined(__s390__) || defined(__i386__) || defined(__arm__) diff --git a/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc b/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc index dec5459b2515..669365b80ecb 100644 --- a/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc @@ -1,178 +1,178 @@ //===-- sanitizer_bitvector_test.cc ---------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of Sanitizer runtime. // Tests for sanitizer_bitvector.h. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_bitvector.h" #include "sanitizer_test_utils.h" #include "gtest/gtest.h" #include #include #include #include using namespace __sanitizer; using namespace std; // Check the 'bv' == 's' and that the indexes go in increasing order. // Also check the BV::Iterator template static void CheckBV(const BV &bv, const set &s) { BV t; t.copyFrom(bv); set t_s(s); uptr last_idx = bv.size(); uptr count = 0; for (typename BV::Iterator it(bv); it.hasNext();) { uptr idx = it.next(); count++; if (last_idx != bv.size()) EXPECT_LT(last_idx, idx); last_idx = idx; EXPECT_TRUE(s.count(idx)); } EXPECT_EQ(count, s.size()); last_idx = bv.size(); while (!t.empty()) { uptr idx = t.getAndClearFirstOne(); if (last_idx != bv.size()) EXPECT_LT(last_idx, idx); last_idx = idx; EXPECT_TRUE(t_s.erase(idx)); } EXPECT_TRUE(t_s.empty()); } template void Print(const BV &bv) { BV t; t.copyFrom(bv); while (!t.empty()) { uptr idx = t.getAndClearFirstOne(); - fprintf(stderr, "%zd ", idx); + fprintf(stderr, "%lu ", idx); } fprintf(stderr, "\n"); } void Print(const set &s) { for (set::iterator it = s.begin(); it != s.end(); ++it) { - fprintf(stderr, "%zd ", *it); + fprintf(stderr, "%lu ", *it); } fprintf(stderr, "\n"); } template void TestBitVector(uptr expected_size) { std::mt19937 r; BV bv, bv1, t_bv; EXPECT_EQ(expected_size, BV::kSize); bv.clear(); EXPECT_TRUE(bv.empty()); bv.setBit(5); EXPECT_FALSE(bv.empty()); EXPECT_FALSE(bv.getBit(4)); EXPECT_FALSE(bv.getBit(6)); EXPECT_TRUE(bv.getBit(5)); bv.clearBit(5); EXPECT_FALSE(bv.getBit(5)); // test random bits bv.clear(); set s; for (uptr it = 0; it < 1000; it++) { uptr bit = ((uptr)my_rand() % bv.size()); EXPECT_EQ(bv.getBit(bit), s.count(bit) == 1); switch (my_rand() % 2) { case 0: EXPECT_EQ(bv.setBit(bit), s.insert(bit).second); break; case 1: size_t old_size = s.size(); s.erase(bit); EXPECT_EQ(bv.clearBit(bit), old_size > s.size()); break; } EXPECT_EQ(bv.getBit(bit), s.count(bit) == 1); } vectorbits(bv.size()); // Test setUnion, setIntersection, setDifference, // intersectsWith, and getAndClearFirstOne. for (uptr it = 0; it < 30; it++) { // iota for (size_t j = 0; j < bits.size(); j++) bits[j] = j; std::shuffle(bits.begin(), bits.end(), r); set s, s1, t_s; bv.clear(); bv1.clear(); uptr n_bits = ((uptr)my_rand() % bv.size()) + 1; uptr n_bits1 = (uptr)my_rand() % (bv.size() / 2); EXPECT_TRUE(n_bits > 0 && n_bits <= bv.size()); EXPECT_TRUE(n_bits1 < bv.size() / 2); for (uptr i = 0; i < n_bits; i++) { bv.setBit(bits[i]); s.insert(bits[i]); } CheckBV(bv, s); for (uptr i = 0; i < n_bits1; i++) { bv1.setBit(bits[bv.size() / 2 + i]); s1.insert(bits[bv.size() / 2 + i]); } CheckBV(bv1, s1); vector vec; set_intersection(s.begin(), s.end(), s1.begin(), s1.end(), back_insert_iterator >(vec)); EXPECT_EQ(bv.intersectsWith(bv1), !vec.empty()); // setUnion t_s = s; t_bv.copyFrom(bv); t_s.insert(s1.begin(), s1.end()); EXPECT_EQ(t_bv.setUnion(bv1), s.size() != t_s.size()); CheckBV(t_bv, t_s); // setIntersection t_s = set(vec.begin(), vec.end()); t_bv.copyFrom(bv); EXPECT_EQ(t_bv.setIntersection(bv1), s.size() != t_s.size()); CheckBV(t_bv, t_s); // setDifference vec.clear(); set_difference(s.begin(), s.end(), s1.begin(), s1.end(), back_insert_iterator >(vec)); t_s = set(vec.begin(), vec.end()); t_bv.copyFrom(bv); EXPECT_EQ(t_bv.setDifference(bv1), s.size() != t_s.size()); CheckBV(t_bv, t_s); } } TEST(SanitizerCommon, BasicBitVector) { TestBitVector >(8); TestBitVector >(16); TestBitVector >(SANITIZER_WORDSIZE); } TEST(SanitizerCommon, TwoLevelBitVector) { uptr ws = SANITIZER_WORDSIZE; TestBitVector > >(8 * 8); TestBitVector >(ws * ws); TestBitVector >(ws * ws * 2); TestBitVector >(ws * ws * 3); TestBitVector > >(16 * 16 * 3); } diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp index ec9132f90a4f..6f30ee987513 100644 --- a/lib/scudo/scudo_allocator.cpp +++ b/lib/scudo/scudo_allocator.cpp @@ -1,723 +1,744 @@ //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Scudo Hardened Allocator implementation. /// It uses the sanitizer_common allocator as a base and aims at mitigating /// heap corruption vulnerabilities. It provides a checksum-guarded chunk /// header, a delayed free list, and additional sanity checks. /// //===----------------------------------------------------------------------===// #include "scudo_allocator.h" #include "scudo_crc32.h" #include "scudo_tls.h" #include "scudo_utils.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_quarantine.h" -#include #include namespace __scudo { // Global static cookie, initialized at start-up. static uptr Cookie; // We default to software CRC32 if the alternatives are not supported, either // at compilation or at runtime. static atomic_uint8_t HashAlgorithm = { CRC32Software }; INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) { // If the hardware CRC32 feature is defined here, it was enabled everywhere, // as opposed to only for scudo_crc32.cpp. This means that other hardware // specific instructions were likely emitted at other places, and as a // result there is no reason to not use it here. #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) Crc = CRC32_INTRINSIC(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = CRC32_INTRINSIC(Crc, Array[i]); return Crc; #else if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) { Crc = computeHardwareCRC32(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = computeHardwareCRC32(Crc, Array[i]); return Crc; } Crc = computeSoftwareCRC32(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = computeSoftwareCRC32(Crc, Array[i]); return Crc; #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) } static ScudoBackendAllocator &getBackendAllocator(); struct ScudoChunk : UnpackedHeader { // We can't use the offset member of the chunk itself, as we would double // fetch it without any warranty that it wouldn't have been tampered. To // prevent this, we work with a local copy of the header. void *getAllocBeg(UnpackedHeader *Header) { return reinterpret_cast( reinterpret_cast(this) - (Header->Offset << MinAlignmentLog)); } // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { uptr Size = - getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header), + getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header), Header->FromPrimary); if (Size == 0) return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); } // Compute the checksum of the Chunk pointer and its ChunkHeader. u16 computeChecksum(UnpackedHeader *Header) const { UnpackedHeader ZeroChecksumHeader = *Header; ZeroChecksumHeader.Checksum = 0; uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); u32 Crc = computeCRC32(Cookie, reinterpret_cast(this), HeaderHolder, ARRAY_SIZE(HeaderHolder)); return static_cast(Crc); } // Checks the validity of a chunk by verifying its checksum. It doesn't // incur termination in the event of an invalid chunk. bool isValid() { UnpackedHeader NewUnpackedHeader; const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); NewUnpackedHeader = bit_cast(NewPackedHeader); return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader)); } // Nulls out a chunk header. When returning the chunk to the backend, there // is no need to store a valid ChunkAvailable header, as this would be // computationally expensive. Zeroing out serves the same purpose by making // the header invalid. In the extremely rare event where 0 would be a valid // checksum for the chunk, the state of the chunk is ChunkAvailable anyway. COMPILER_CHECK(ChunkAvailable == 0); void eraseHeader() { PackedHeader NullPackedHeader = 0; AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NullPackedHeader); } // Loads and unpacks the header, verifying the checksum in the process. void loadHeader(UnpackedHeader *NewUnpackedHeader) const { const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); *NewUnpackedHeader = bit_cast(NewPackedHeader); if (UNLIKELY(NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader))) { dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); } } // Packs and stores the header, computing the checksum in the process. void storeHeader(UnpackedHeader *NewUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NewPackedHeader); } // Packs and stores the header, computing the checksum in the process. We // compare the current header with the expected provided one to ensure that // we are not being raced by a corruption occurring in another thread. void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader, UnpackedHeader *OldUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); PackedHeader OldPackedHeader = bit_cast(*OldUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader, &OldPackedHeader, NewPackedHeader, memory_order_relaxed))) { dieWithMessage("ERROR: race on chunk header at address %p\n", this); } } }; ScudoChunk *getScudoChunk(uptr UserBeg) { return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); } struct AllocatorOptions { u32 QuarantineSizeMb; u32 ThreadLocalQuarantineSizeKb; bool MayReturnNull; s32 ReleaseToOSIntervalMs; bool DeallocationTypeMismatch; bool DeleteSizeMismatch; bool ZeroContents; void setFrom(const Flags *f, const CommonFlags *cf); void copyTo(Flags *f, CommonFlags *cf) const; }; void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { MayReturnNull = cf->allocator_may_return_null; ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; QuarantineSizeMb = f->QuarantineSizeMb; ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; DeallocationTypeMismatch = f->DeallocationTypeMismatch; DeleteSizeMismatch = f->DeleteSizeMismatch; ZeroContents = f->ZeroContents; } void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { cf->allocator_may_return_null = MayReturnNull; cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; f->QuarantineSizeMb = QuarantineSizeMb; f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; f->DeallocationTypeMismatch = DeallocationTypeMismatch; f->DeleteSizeMismatch = DeleteSizeMismatch; f->ZeroContents = ZeroContents; } static void initScudoInternal(const AllocatorOptions &Options); static bool ScudoInitIsRunning = false; void initScudo() { SanitizerToolName = "Scudo"; CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); ScudoInitIsRunning = true; // Check if hardware CRC32 is supported in the binary and by the platform, if // so, opt for the CRC32 hardware version of the checksum. if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature)) atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); initFlags(); AllocatorOptions Options; Options.setFrom(getFlags(), common_flags()); initScudoInternal(Options); // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use. ScudoInitIsRunning = false; } struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} // Chunk recycling function, returns a quarantined chunk to the backend, // first making sure it hasn't been tampered with. void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); if (UNLIKELY(Header.State != ChunkQuarantine)) { dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", Chunk); } Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); - getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary); + if (Header.FromPrimary) + getBackendAllocator().deallocatePrimary(Cache_, Ptr); + else + getBackendAllocator().deallocateSecondary(Ptr); } // Internal quarantine allocation and deallocation functions. We first check // that the batches are indeed serviced by the Primary. // TODO(kostyak): figure out the best way to protect the batches. COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); void *Allocate(uptr Size) { - return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true); + return getBackendAllocator().allocatePrimary(Cache_, Size); } void Deallocate(void *Ptr) { - getBackendAllocator().Deallocate(Cache_, Ptr, true); + getBackendAllocator().deallocatePrimary(Cache_, Ptr); } AllocatorCache *Cache_; }; typedef Quarantine ScudoQuarantine; typedef ScudoQuarantine::Cache ScudoQuarantineCache; COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { return &ThreadContext->Cache; } ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { return reinterpret_cast< ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); } ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) { return &ThreadContext->Prng; } struct ScudoAllocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); typedef ReturnNullOrDieOnFailure FailureHandler; ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; + StaticSpinMutex GlobalPrngMutex; + ScudoPrng GlobalPrng; + // The fallback caches are used when the thread local caches have been // 'detroyed' on thread tear-down. They are protected by a Mutex as they can // be accessed by different threads. StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; ScudoQuarantineCache FallbackQuarantineCache; ScudoPrng FallbackPrng; bool DeallocationTypeMismatch; bool ZeroContents; bool DeleteSizeMismatch; explicit ScudoAllocator(LinkerInitialized) : AllocatorQuarantine(LINKER_INITIALIZED), FallbackQuarantineCache(LINKER_INITIALIZED) {} void init(const AllocatorOptions &Options) { // Verify that the header offset field can hold the maximum offset. In the // case of the Secondary allocator, it takes care of alignment and the // offset will always be 0. In the case of the Primary, the worst case // scenario happens in the last size class, when the backend allocation // would already be aligned on the requested alignment, which would happen // to be the maximum alignment that would fit in that size class. As a // result, the maximum offset will be at most the maximum alignment for the // last size class minus the header size, in multiples of MinAlignment. UnpackedHeader Header = {}; - uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex( - SizeClassMap::kMaxSize - MinAlignment); - uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> - MinAlignmentLog; + uptr MaxPrimaryAlignment = + 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); + uptr MaxOffset = + (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; Header.Offset = MaxOffset; if (Header.Offset != MaxOffset) { dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " "header\n"); } // Verify that we can fit the maximum size or amount of unused bytes in the // header. Given that the Secondary fits the allocation to a page, the worst // case scenario happens in the Primary. It will depend on the second to // last and last class sizes, as well as the dynamic base for the Primary. // The following is an over-approximation that works for our needs. uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) { dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in " "the header\n"); } DeallocationTypeMismatch = Options.DeallocationTypeMismatch; DeleteSizeMismatch = Options.DeleteSizeMismatch; ZeroContents = Options.ZeroContents; SetAllocatorMayReturnNull(Options.MayReturnNull); - BackendAllocator.Init(Options.ReleaseToOSIntervalMs); + BackendAllocator.init(Options.ReleaseToOSIntervalMs); AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); - BackendAllocator.InitCache(&FallbackAllocatorCache); + GlobalPrng.init(); + Cookie = GlobalPrng.getU64(); + BackendAllocator.initCache(&FallbackAllocatorCache); FallbackPrng.init(); - Cookie = FallbackPrng.getU64(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { initThreadMaybe(); if (UNLIKELY(!UserPtr)) return false; uptr UserBeg = reinterpret_cast(UserPtr); if (!IsAligned(UserBeg, MinAlignment)) return false; return getScudoChunk(UserBeg)->isValid(); } // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { initThreadMaybe(); if (UNLIKELY(Alignment > MaxAlignment)) return FailureHandler::OnBadRequest(); if (UNLIKELY(Alignment < MinAlignment)) Alignment = MinAlignment; if (UNLIKELY(Size >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); if (UNLIKELY(Size == 0)) Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; uptr AlignedSize = (Alignment > MinAlignment) ? NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); // Primary and Secondary backed allocations have a different treatment. We // deal with alignment requirements of Primary serviced allocations here, // but the Secondary will take care of its own alignment needs. bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment); void *Ptr; u8 Salt; - uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize; - uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - Salt = getPrng(ThreadContext)->getU8(); - Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), - AllocationSize, AllocationAlignment, - FromPrimary); - ThreadContext->unlock(); + uptr AllocSize; + if (FromPrimary) { + AllocSize = AlignedSize; + ScudoThreadContext *ThreadContext = getThreadContextAndLock(); + if (LIKELY(ThreadContext)) { + Salt = getPrng(ThreadContext)->getU8(); + Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext), + AllocSize); + ThreadContext->unlock(); + } else { + SpinMutexLock l(&FallbackMutex); + Salt = FallbackPrng.getU8(); + Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache, + AllocSize); + } } else { - SpinMutexLock l(&FallbackMutex); - Salt = FallbackPrng.getU8(); - Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, - AllocationAlignment, FromPrimary); + { + SpinMutexLock l(&GlobalPrngMutex); + Salt = GlobalPrng.getU8(); + } + AllocSize = NeededSize; + Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment); } if (UNLIKELY(!Ptr)) return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) - memset(Ptr, 0, - BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary)); + memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize( + Ptr, /*FromPrimary=*/true)); UnpackedHeader Header = {}; uptr AllocBeg = reinterpret_cast(Ptr); uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; if (UNLIKELY(!IsAligned(UserBeg, Alignment))) { // Since the Secondary takes care of alignment, a non-aligned pointer // means it is from the Primary. It is also the only case where the offset // field of the header would be non-zero. CHECK(FromPrimary); UserBeg = RoundUpTo(UserBeg, Alignment); uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; } - CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize); + CHECK_LE(UserBeg + Size, AllocBeg + AllocSize); Header.State = ChunkAllocated; Header.AllocType = Type; if (FromPrimary) { - Header.FromPrimary = FromPrimary; + Header.FromPrimary = 1; Header.SizeOrUnusedBytes = Size; } else { // The secondary fits the allocations to a page, so the amount of unused // bytes is the difference between the end of the user allocation and the // next page boundary. uptr PageSize = GetPageSizeCached(); uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1); if (TrailingBytes) Header.SizeOrUnusedBytes = PageSize - TrailingBytes; } - Header.Salt = static_cast(Salt); + Header.Salt = Salt; getScudoChunk(UserBeg)->storeHeader(&Header); void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); return UserPtr; } // Place a chunk in the quarantine. In the event of a zero-sized quarantine, // we directly deallocate the chunk, otherwise the flow would lead to the // chunk being loaded (and checked) twice, and stored (and checksummed) once, // with no additional security value. void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, uptr Size) { bool FromPrimary = Header->FromPrimary; bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr, - FromPrimary); - ThreadContext->unlock(); + if (FromPrimary) { + ScudoThreadContext *ThreadContext = getThreadContextAndLock(); + if (LIKELY(ThreadContext)) { + getBackendAllocator().deallocatePrimary( + getAllocatorCache(ThreadContext), Ptr); + ThreadContext->unlock(); + } else { + SpinMutexLock Lock(&FallbackMutex); + getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr); + } } else { - SpinMutexLock Lock(&FallbackMutex); - getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr, - FromPrimary); + getBackendAllocator().deallocateSecondary(Ptr); } } else { UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), QuarantineCallback( getAllocatorCache(ThreadContext)), Chunk, Size); ThreadContext->unlock(); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, QuarantineCallback(&FallbackAllocatorCache), Chunk, Size); } } } // Deallocates a Chunk, which means adding it to the delayed free list (or // Quarantine). void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (UNLIKELY(!UserPtr)) return; uptr UserBeg = reinterpret_cast(UserPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to deallocate a chunk not properly " "aligned at address %p\n", UserPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when deallocating address " "%p\n", UserPtr); } if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. if (OldHeader.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) { dieWithMessage("ERROR: allocation type mismatch on address %p\n", UserPtr); } } } uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes; if (DeleteSizeMismatch) { if (DeleteSize && DeleteSize != Size) { dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", UserPtr); } } // If a small memory amount was allocated with a larger alignment, we want // to take that into account. Otherwise the Quarantine would be filled with // tiny chunks, taking a lot of VA memory. This is an approximation of the // usable size, that allows us to not call GetActuallyAllocatedSize. uptr LiableSize = Size + (OldHeader.Offset << MinAlignment); quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize); } // Reallocates a chunk. We can save on a new allocation if the new requested // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { initThreadMaybe(); uptr UserBeg = reinterpret_cast(OldPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to reallocate a chunk not properly " "aligned at address %p\n", OldPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when reallocating address " "%p\n", OldPtr); } if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", OldPtr); } uptr UsableSize = Chunk->getUsableSize(&OldHeader); // The new size still fits in the current chunk, and the size difference // is reasonable. if (NewSize <= UsableSize && (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = OldHeader.FromPrimary ? NewSize : UsableSize - NewSize; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); return OldPtr; } // Otherwise, we have to allocate a new chunk and copy the contents of the // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : UsableSize - OldHeader.SizeOrUnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize); } return NewPtr; } // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { initThreadMaybe(); if (UNLIKELY(!Ptr)) return 0; uptr UserBeg = reinterpret_cast(Ptr); ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader Header; Chunk->loadHeader(&Header); // Getting the usable size of a chunk only makes sense if it's allocated. if (UNLIKELY(Header.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", Ptr); } return Chunk->getUsableSize(&Header); } void *calloc(uptr NMemB, uptr Size) { initThreadMaybe(); - if (CheckForCallocOverflow(NMemB, Size)) + if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) return FailureHandler::OnBadRequest(); return allocate(NMemB * Size, MinAlignment, FromMalloc, true); } void commitBack(ScudoThreadContext *ThreadContext) { AllocatorCache *Cache = getAllocatorCache(ThreadContext); AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), QuarantineCallback(Cache)); - BackendAllocator.DestroyCache(Cache); + BackendAllocator.destroyCache(Cache); } uptr getStats(AllocatorStat StatType) { initThreadMaybe(); uptr stats[AllocatorStatCount]; - BackendAllocator.GetStats(stats); + BackendAllocator.getStats(stats); return stats[StatType]; } }; static ScudoAllocator Instance(LINKER_INITIALIZED); static ScudoBackendAllocator &getBackendAllocator() { return Instance.BackendAllocator; } static void initScudoInternal(const AllocatorOptions &Options) { Instance.init(Options); } void ScudoThreadContext::init() { - getBackendAllocator().InitCache(&Cache); + getBackendAllocator().initCache(&Cache); Prng.init(); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } void ScudoThreadContext::commitBack() { Instance.commitBack(this); } void *scudoMalloc(uptr Size, AllocType Type) { - return Instance.allocate(Size, MinAlignment, Type); + return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type)); } void scudoFree(void *Ptr, AllocType Type) { Instance.deallocate(Ptr, 0, Type); } void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { Instance.deallocate(Ptr, Size, Type); } void *scudoRealloc(void *Ptr, uptr Size) { if (!Ptr) - return Instance.allocate(Size, MinAlignment, FromMalloc); + return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); if (Size == 0) { Instance.deallocate(Ptr, 0, FromMalloc); return nullptr; } - return Instance.reallocate(Ptr, Size); + return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); } void *scudoCalloc(uptr NMemB, uptr Size) { - return Instance.calloc(NMemB, Size); + return SetErrnoOnNull(Instance.calloc(NMemB, Size)); } void *scudoValloc(uptr Size) { - return Instance.allocate(Size, GetPageSizeCached(), FromMemalign); + return SetErrnoOnNull( + Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); } void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); - Size = RoundUpTo(Size, PageSize); - if (Size == 0) { - // pvalloc(0) should allocate one page. - Size = PageSize; - } - return Instance.allocate(Size, PageSize, FromMemalign); + // pvalloc(0) should allocate one page. + Size = Size ? RoundUpTo(Size, PageSize) : PageSize; + return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); } void *scudoMemalign(uptr Alignment, uptr Size) { - if (UNLIKELY(!IsPowerOfTwo(Alignment))) + if (UNLIKELY(!IsPowerOfTwo(Alignment))) { + errno = errno_EINVAL; return ScudoAllocator::FailureHandler::OnBadRequest(); - return Instance.allocate(Size, Alignment, FromMemalign); + } + return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign)); } int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { - if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) { - *MemPtr = ScudoAllocator::FailureHandler::OnBadRequest(); - return EINVAL; + if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) { + ScudoAllocator::FailureHandler::OnBadRequest(); + return errno_EINVAL; } - *MemPtr = Instance.allocate(Size, Alignment, FromMemalign); - if (!*MemPtr) - return ENOMEM; + void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); + if (UNLIKELY(!Ptr)) + return errno_ENOMEM; + *MemPtr = Ptr; return 0; } void *scudoAlignedAlloc(uptr Alignment, uptr Size) { - // Alignment must be a power of 2, Size must be a multiple of Alignment. - if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0)) + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) { + errno = errno_EINVAL; return ScudoAllocator::FailureHandler::OnBadRequest(); - return Instance.allocate(Size, Alignment, FromMalloc); + } + return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); } uptr scudoMallocUsableSize(void *Ptr) { return Instance.getUsableSize(Ptr); } } // namespace __scudo using namespace __scudo; // MallocExtension helper functions uptr __sanitizer_get_current_allocated_bytes() { return Instance.getStats(AllocatorStatAllocated); } uptr __sanitizer_get_heap_size() { return Instance.getStats(AllocatorStatMapped); } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *Ptr) { return Instance.isValidPointer(Ptr); } uptr __sanitizer_get_allocated_size(const void *Ptr) { return Instance.getUsableSize(Ptr); } diff --git a/lib/scudo/scudo_allocator_combined.h b/lib/scudo/scudo_allocator_combined.h index 818272868880..7599c12abb6d 100644 --- a/lib/scudo/scudo_allocator_combined.h +++ b/lib/scudo/scudo_allocator_combined.h @@ -1,70 +1,76 @@ //===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Scudo Combined Allocator, dispatches allocation & deallocation requests to /// the Primary or the Secondary backend allocators. /// //===----------------------------------------------------------------------===// #ifndef SCUDO_ALLOCATOR_COMBINED_H_ #define SCUDO_ALLOCATOR_COMBINED_H_ #ifndef SCUDO_ALLOCATOR_H_ #error "This file must be included inside scudo_allocator.h." #endif template class ScudoCombinedAllocator { public: - void Init(s32 ReleaseToOSIntervalMs) { + void init(s32 ReleaseToOSIntervalMs) { Primary.Init(ReleaseToOSIntervalMs); Secondary.Init(); Stats.Init(); } - void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment, - bool FromPrimary) { - if (FromPrimary) - return Cache->Allocate(&Primary, Primary.ClassID(Size)); + // Primary allocations are always MinAlignment aligned, and as such do not + // require an Alignment parameter. + void *allocatePrimary(AllocatorCache *Cache, uptr Size) { + return Cache->Allocate(&Primary, Primary.ClassID(Size)); + } + + // Secondary allocations do not require a Cache, but do require an Alignment + // parameter. + void *allocateSecondary(uptr Size, uptr Alignment) { return Secondary.Allocate(&Stats, Size, Alignment); } - void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) { - if (FromPrimary) - Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr); - else - Secondary.Deallocate(&Stats, Ptr); + void deallocatePrimary(AllocatorCache *Cache, void *Ptr) { + Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr); + } + + void deallocateSecondary(void *Ptr) { + Secondary.Deallocate(&Stats, Ptr); } - uptr GetActuallyAllocatedSize(void *Ptr, bool FromPrimary) { + uptr getActuallyAllocatedSize(void *Ptr, bool FromPrimary) { if (FromPrimary) return PrimaryAllocator::ClassIdToSize(Primary.GetSizeClass(Ptr)); return Secondary.GetActuallyAllocatedSize(Ptr); } - void InitCache(AllocatorCache *Cache) { + void initCache(AllocatorCache *Cache) { Cache->Init(&Stats); } - void DestroyCache(AllocatorCache *Cache) { + void destroyCache(AllocatorCache *Cache) { Cache->Destroy(&Primary, &Stats); } - void GetStats(AllocatorStatCounters StatType) const { + void getStats(AllocatorStatCounters StatType) const { Stats.Get(StatType); } private: PrimaryAllocator Primary; SecondaryAllocator Secondary; AllocatorGlobalStats Stats; }; #endif // SCUDO_ALLOCATOR_COMBINED_H_ diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc index 9ee9104283f8..ef984a45cd9d 100644 --- a/lib/tsan/rtl/tsan_clock.cc +++ b/lib/tsan/rtl/tsan_clock.cc @@ -1,426 +1,598 @@ //===-- tsan_clock.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_clock.h" #include "tsan_rtl.h" #include "sanitizer_common/sanitizer_placement_new.h" // SyncClock and ThreadClock implement vector clocks for sync variables // (mutexes, atomic variables, file descriptors, etc) and threads, respectively. // ThreadClock contains fixed-size vector clock for maximum number of threads. // SyncClock contains growable vector clock for currently necessary number of // threads. // Together they implement very simple model of operations, namely: // // void ThreadClock::acquire(const SyncClock *src) { // for (int i = 0; i < kMaxThreads; i++) // clock[i] = max(clock[i], src->clock[i]); // } // // void ThreadClock::release(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = max(dst->clock[i], clock[i]); // } // // void ThreadClock::ReleaseStore(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = clock[i]; // } // // void ThreadClock::acq_rel(SyncClock *dst) { // acquire(dst); // release(dst); // } // // Conformance to this model is extensively verified in tsan_clock_test.cc. // However, the implementation is significantly more complex. The complexity // allows to implement important classes of use cases in O(1) instead of O(N). // // The use cases are: // 1. Singleton/once atomic that has a single release-store operation followed // by zillions of acquire-loads (the acquire-load is O(1)). // 2. Thread-local mutex (both lock and unlock can be O(1)). // 3. Leaf mutex (unlock is O(1)). // 4. A mutex shared by 2 threads (both lock and unlock can be O(1)). // 5. An atomic with a single writer (writes can be O(1)). // The implementation dynamically adopts to workload. So if an atomic is in // read-only phase, these reads will be O(1); if it later switches to read/write // phase, the implementation will correctly handle that by switching to O(N). // // Thread-safety note: all const operations on SyncClock's are conducted under // a shared lock; all non-const operations on SyncClock's are conducted under // an exclusive lock; ThreadClock's are private to respective threads and so // do not need any protection. // -// Description of ThreadClock state: -// clk_ - fixed size vector clock. -// nclk_ - effective size of the vector clock (the rest is zeros). -// tid_ - index of the thread associated with he clock ("current thread"). -// last_acquire_ - current thread time when it acquired something from -// other threads. -// // Description of SyncClock state: // clk_ - variable size vector clock, low kClkBits hold timestamp, // the remaining bits hold "acquired" flag (the actual value is thread's // reused counter); // if acquried == thr->reused_, then the respective thread has already -// acquired this clock (except possibly dirty_tids_). -// dirty_tids_ - holds up to two indeces in the vector clock that other threads +// acquired this clock (except possibly for dirty elements). +// dirty_ - holds up to two indeces in the vector clock that other threads // need to acquire regardless of "acquired" flag value; // release_store_tid_ - denotes that the clock state is a result of // release-store operation by the thread with release_store_tid_ index. // release_store_reused_ - reuse count of release_store_tid_. // We don't have ThreadState in these methods, so this is an ugly hack that // works only in C++. #if !SANITIZER_GO # define CPP_STAT_INC(typ) StatInc(cur_thread(), typ) #else # define CPP_STAT_INC(typ) (void)0 #endif namespace __tsan { +static atomic_uint32_t *ref_ptr(ClockBlock *cb) { + return reinterpret_cast(&cb->table[ClockBlock::kRefIdx]); +} + +// Drop reference to the first level block idx. +static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) { + ClockBlock *cb = ctx->clock_alloc.Map(idx); + atomic_uint32_t *ref = ref_ptr(cb); + u32 v = atomic_load(ref, memory_order_acquire); + for (;;) { + CHECK_GT(v, 0); + if (v == 1) + break; + if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel)) + return; + } + // First level block owns second level blocks, so them as well. + for (uptr i = 0; i < blocks; i++) + ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]); + ctx->clock_alloc.Free(c, idx); +} + ThreadClock::ThreadClock(unsigned tid, unsigned reused) : tid_(tid) - , reused_(reused + 1) { // 0 has special meaning + , reused_(reused + 1) // 0 has special meaning + , cached_idx_() + , cached_size_() + , cached_blocks_() { CHECK_LT(tid, kMaxTidInClock); CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); nclk_ = tid_ + 1; last_acquire_ = 0; internal_memset(clk_, 0, sizeof(clk_)); - clk_[tid_].reused = reused_; } void ThreadClock::ResetCached(ClockCache *c) { + if (cached_idx_) { + UnrefClockBlock(c, cached_idx_, cached_blocks_); + cached_idx_ = 0; + cached_size_ = 0; + cached_blocks_ = 0; + } } -void ThreadClock::acquire(ClockCache *c, const SyncClock *src) { +void ThreadClock::acquire(ClockCache *c, SyncClock *src) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(src->size_, kMaxTid); CPP_STAT_INC(StatClockAcquire); // Check if it's empty -> no need to do anything. const uptr nclk = src->size_; if (nclk == 0) { CPP_STAT_INC(StatClockAcquireEmpty); return; } - // Check if we've already acquired src after the last release operation on src bool acquired = false; - if (nclk > tid_) { - if (src->elem(tid_).reused == reused_) { - for (unsigned i = 0; i < kDirtyTids; i++) { - unsigned tid = src->dirty_tids_[i]; - if (tid != kInvalidTid) { - u64 epoch = src->elem(tid).epoch; - if (clk_[tid].epoch < epoch) { - clk_[tid].epoch = epoch; - acquired = true; - } - } - } - if (acquired) { - CPP_STAT_INC(StatClockAcquiredSomething); - last_acquire_ = clk_[tid_].epoch; + for (unsigned i = 0; i < kDirtyTids; i++) { + SyncClock::Dirty dirty = src->dirty_[i]; + unsigned tid = dirty.tid; + if (tid != kInvalidTid) { + if (clk_[tid] < dirty.epoch) { + clk_[tid] = dirty.epoch; + acquired = true; } - return; } } - // O(N) acquire. - CPP_STAT_INC(StatClockAcquireFull); - nclk_ = max(nclk_, nclk); - for (uptr i = 0; i < nclk; i++) { - u64 epoch = src->elem(i).epoch; - if (clk_[i].epoch < epoch) { - clk_[i].epoch = epoch; - acquired = true; + // Check if we've already acquired src after the last release operation on src + if (tid_ >= nclk || src->elem(tid_).reused != reused_) { + // O(N) acquire. + CPP_STAT_INC(StatClockAcquireFull); + nclk_ = max(nclk_, nclk); + u64 *dst_pos = &clk_[0]; + for (ClockElem &src_elem : *src) { + u64 epoch = src_elem.epoch; + if (*dst_pos < epoch) { + *dst_pos = epoch; + acquired = true; + } + dst_pos++; } - } - // Remember that this thread has acquired this clock. - if (nclk > tid_) - src->elem(tid_).reused = reused_; + // Remember that this thread has acquired this clock. + if (nclk > tid_) + src->elem(tid_).reused = reused_; + } if (acquired) { CPP_STAT_INC(StatClockAcquiredSomething); - last_acquire_ = clk_[tid_].epoch; + last_acquire_ = clk_[tid_]; + ResetCached(c); } } -void ThreadClock::release(ClockCache *c, SyncClock *dst) const { +void ThreadClock::release(ClockCache *c, SyncClock *dst) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); if (dst->size_ == 0) { // ReleaseStore will correctly set release_store_tid_, // which can be important for future operations. ReleaseStore(c, dst); return; } CPP_STAT_INC(StatClockRelease); // Check if we need to resize dst. if (dst->size_ < nclk_) dst->Resize(c, nclk_); // Check if we had not acquired anything from other threads // since the last release on dst. If so, we need to update // only dst->elem(tid_). if (dst->elem(tid_).epoch > last_acquire_) { - UpdateCurrentThread(dst); + UpdateCurrentThread(c, dst); if (dst->release_store_tid_ != tid_ || dst->release_store_reused_ != reused_) dst->release_store_tid_ = kInvalidTid; return; } // O(N) release. CPP_STAT_INC(StatClockReleaseFull); + dst->Unshare(c); // First, remember whether we've acquired dst. bool acquired = IsAlreadyAcquired(dst); if (acquired) CPP_STAT_INC(StatClockReleaseAcquired); // Update dst->clk_. - for (uptr i = 0; i < nclk_; i++) { - ClockElem &ce = dst->elem(i); - ce.epoch = max(ce.epoch, clk_[i].epoch); + dst->FlushDirty(); + uptr i = 0; + for (ClockElem &ce : *dst) { + ce.epoch = max(ce.epoch, clk_[i]); ce.reused = 0; + i++; } // Clear 'acquired' flag in the remaining elements. if (nclk_ < dst->size_) CPP_STAT_INC(StatClockReleaseClearTail); for (uptr i = nclk_; i < dst->size_; i++) dst->elem(i).reused = 0; - for (unsigned i = 0; i < kDirtyTids; i++) - dst->dirty_tids_[i] = kInvalidTid; dst->release_store_tid_ = kInvalidTid; dst->release_store_reused_ = 0; // If we've acquired dst, remember this fact, // so that we don't need to acquire it on next acquire. if (acquired) dst->elem(tid_).reused = reused_; } -void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const { +void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); CPP_STAT_INC(StatClockStore); + if (dst->size_ == 0 && cached_idx_ != 0) { + // Reuse the cached clock. + // Note: we could reuse/cache the cached clock in more cases: + // we could update the existing clock and cache it, or replace it with the + // currently cached clock and release the old one. And for a shared + // existing clock, we could replace it with the currently cached; + // or unshare, update and cache. But, for simplicity, we currnetly reuse + // cached clock only when the target clock is empty. + dst->tab_ = ctx->clock_alloc.Map(cached_idx_); + dst->tab_idx_ = cached_idx_; + dst->size_ = cached_size_; + dst->blocks_ = cached_blocks_; + CHECK_EQ(dst->dirty_[0].tid, kInvalidTid); + // The cached clock is shared (immutable), + // so this is where we store the current clock. + dst->dirty_[0].tid = tid_; + dst->dirty_[0].epoch = clk_[tid_]; + dst->release_store_tid_ = tid_; + dst->release_store_reused_ = reused_; + // Rememeber that we don't need to acquire it in future. + dst->elem(tid_).reused = reused_; + // Grab a reference. + atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); + return; + } + // Check if we need to resize dst. if (dst->size_ < nclk_) dst->Resize(c, nclk_); if (dst->release_store_tid_ == tid_ && dst->release_store_reused_ == reused_ && dst->elem(tid_).epoch > last_acquire_) { CPP_STAT_INC(StatClockStoreFast); - UpdateCurrentThread(dst); + UpdateCurrentThread(c, dst); return; } // O(N) release-store. CPP_STAT_INC(StatClockStoreFull); - for (uptr i = 0; i < nclk_; i++) { - ClockElem &ce = dst->elem(i); - ce.epoch = clk_[i].epoch; + dst->Unshare(c); + // Note: dst can be larger than this ThreadClock. + // This is fine since clk_ beyond size is all zeros. + uptr i = 0; + for (ClockElem &ce : *dst) { + ce.epoch = clk_[i]; ce.reused = 0; + i++; } - // Clear the tail of dst->clk_. - if (nclk_ < dst->size_) { - for (uptr i = nclk_; i < dst->size_; i++) { - ClockElem &ce = dst->elem(i); - ce.epoch = 0; - ce.reused = 0; - } - CPP_STAT_INC(StatClockStoreTail); - } - for (unsigned i = 0; i < kDirtyTids; i++) - dst->dirty_tids_[i] = kInvalidTid; + for (uptr i = 0; i < kDirtyTids; i++) + dst->dirty_[i].tid = kInvalidTid; dst->release_store_tid_ = tid_; dst->release_store_reused_ = reused_; // Rememeber that we don't need to acquire it in future. dst->elem(tid_).reused = reused_; + + // If the resulting clock is cachable, cache it for future release operations. + // The clock is always cachable if we released to an empty sync object. + if (cached_idx_ == 0 && dst->Cachable()) { + // Grab a reference to the ClockBlock. + atomic_uint32_t *ref = ref_ptr(dst->tab_); + if (atomic_load(ref, memory_order_acquire) == 1) + atomic_store_relaxed(ref, 2); + else + atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); + cached_idx_ = dst->tab_idx_; + cached_size_ = dst->size_; + cached_blocks_ = dst->blocks_; + } } void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) { CPP_STAT_INC(StatClockAcquireRelease); acquire(c, dst); ReleaseStore(c, dst); } // Updates only single element related to the current thread in dst->clk_. -void ThreadClock::UpdateCurrentThread(SyncClock *dst) const { +void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const { // Update the threads time, but preserve 'acquired' flag. - dst->elem(tid_).epoch = clk_[tid_].epoch; - for (unsigned i = 0; i < kDirtyTids; i++) { - if (dst->dirty_tids_[i] == tid_) { + SyncClock::Dirty *dirty = &dst->dirty_[i]; + const unsigned tid = dirty->tid; + if (tid == tid_ || tid == kInvalidTid) { CPP_STAT_INC(StatClockReleaseFast); - return; - } - if (dst->dirty_tids_[i] == kInvalidTid) { - CPP_STAT_INC(StatClockReleaseFast); - dst->dirty_tids_[i] = tid_; + dirty->tid = tid_; + dirty->epoch = clk_[tid_]; return; } } // Reset all 'acquired' flags, O(N). + // We are going to touch dst elements, so we need to unshare it. + dst->Unshare(c); CPP_STAT_INC(StatClockReleaseSlow); + dst->elem(tid_).epoch = clk_[tid_]; for (uptr i = 0; i < dst->size_; i++) dst->elem(i).reused = 0; - for (unsigned i = 0; i < kDirtyTids; i++) - dst->dirty_tids_[i] = kInvalidTid; + dst->FlushDirty(); } -// Checks whether the current threads has already acquired src. +// Checks whether the current thread has already acquired src. bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { if (src->elem(tid_).reused != reused_) return false; for (unsigned i = 0; i < kDirtyTids; i++) { - unsigned tid = src->dirty_tids_[i]; - if (tid != kInvalidTid) { - if (clk_[tid].epoch < src->elem(tid).epoch) + SyncClock::Dirty dirty = src->dirty_[i]; + if (dirty.tid != kInvalidTid) { + if (clk_[dirty.tid] < dirty.epoch) return false; } } return true; } // Sets a single element in the vector clock. // This function is called only from weird places like AcquireGlobal. void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) { DCHECK_LT(tid, kMaxTid); - DCHECK_GE(v, clk_[tid].epoch); - clk_[tid].epoch = v; + DCHECK_GE(v, clk_[tid]); + clk_[tid] = v; if (nclk_ <= tid) nclk_ = tid + 1; - last_acquire_ = clk_[tid_].epoch; + last_acquire_ = clk_[tid_]; + ResetCached(c); } void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) { printf("clock=["); for (uptr i = 0; i < nclk_; i++) - printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch); - printf("] reused=["); - for (uptr i = 0; i < nclk_; i++) - printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused); - printf("] tid=%u/%u last_acq=%llu", - tid_, reused_, last_acquire_); + printf("%s%llu", i == 0 ? "" : ",", clk_[i]); + printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_); } SyncClock::SyncClock() { ResetImpl(); } SyncClock::~SyncClock() { // Reset must be called before dtor. CHECK_EQ(size_, 0); + CHECK_EQ(blocks_, 0); CHECK_EQ(tab_, 0); CHECK_EQ(tab_idx_, 0); } void SyncClock::Reset(ClockCache *c) { - if (size_ == 0) { - // nothing - } else if (size_ <= ClockBlock::kClockCount) { - // One-level table. - ctx->clock_alloc.Free(c, tab_idx_); - } else { - // Two-level table. - for (uptr i = 0; i < size_; i += ClockBlock::kClockCount) - ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]); - ctx->clock_alloc.Free(c, tab_idx_); - } + if (size_) + UnrefClockBlock(c, tab_idx_, blocks_); ResetImpl(); } void SyncClock::ResetImpl() { tab_ = 0; tab_idx_ = 0; size_ = 0; + blocks_ = 0; release_store_tid_ = kInvalidTid; release_store_reused_ = 0; for (uptr i = 0; i < kDirtyTids; i++) - dirty_tids_[i] = kInvalidTid; + dirty_[i].tid = kInvalidTid; } void SyncClock::Resize(ClockCache *c, uptr nclk) { CPP_STAT_INC(StatClockReleaseResize); - if (RoundUpTo(nclk, ClockBlock::kClockCount) <= - RoundUpTo(size_, ClockBlock::kClockCount)) { - // Growing within the same block. + Unshare(c); + if (nclk <= capacity()) { // Memory is already allocated, just increase the size. size_ = nclk; return; } - if (nclk <= ClockBlock::kClockCount) { + if (size_ == 0) { // Grow from 0 to one-level table. CHECK_EQ(size_, 0); + CHECK_EQ(blocks_, 0); CHECK_EQ(tab_, 0); CHECK_EQ(tab_idx_, 0); - size_ = nclk; - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - return; - } - // Growing two-level table. - if (size_ == 0) { - // Allocate first level table. - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - } else if (size_ <= ClockBlock::kClockCount) { - // Transform one-level table to two-level table. - u32 old = tab_idx_; tab_idx_ = ctx->clock_alloc.Alloc(c); tab_ = ctx->clock_alloc.Map(tab_idx_); internal_memset(tab_, 0, sizeof(*tab_)); - tab_->table[0] = old; + atomic_store_relaxed(ref_ptr(tab_), 1); + size_ = 1; + } else if (size_ > blocks_ * ClockBlock::kClockCount) { + u32 idx = ctx->clock_alloc.Alloc(c); + ClockBlock *new_cb = ctx->clock_alloc.Map(idx); + uptr top = size_ - blocks_ * ClockBlock::kClockCount; + CHECK_LT(top, ClockBlock::kClockCount); + const uptr move = top * sizeof(tab_->clock[0]); + internal_memcpy(&new_cb->clock[0], tab_->clock, move); + internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move); + internal_memset(tab_->clock, 0, move); + append_block(idx); } - // At this point we have first level table allocated. + // At this point we have first level table allocated and all clock elements + // are evacuated from it to a second level block. // Add second level tables as necessary. - for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount); - i < nclk; i += ClockBlock::kClockCount) { + while (nclk > capacity()) { u32 idx = ctx->clock_alloc.Alloc(c); ClockBlock *cb = ctx->clock_alloc.Map(idx); internal_memset(cb, 0, sizeof(*cb)); - CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0); - tab_->table[i/ClockBlock::kClockCount] = idx; + append_block(idx); } size_ = nclk; } -ClockElem &SyncClock::elem(unsigned tid) const { +// Flushes all dirty elements into the main clock array. +void SyncClock::FlushDirty() { + for (unsigned i = 0; i < kDirtyTids; i++) { + Dirty *dirty = &dirty_[i]; + if (dirty->tid != kInvalidTid) { + CHECK_LT(dirty->tid, size_); + elem(dirty->tid).epoch = dirty->epoch; + dirty->tid = kInvalidTid; + } + } +} + +bool SyncClock::IsShared() const { + if (size_ == 0) + return false; + atomic_uint32_t *ref = ref_ptr(tab_); + u32 v = atomic_load(ref, memory_order_acquire); + CHECK_GT(v, 0); + return v > 1; +} + +// Unshares the current clock if it's shared. +// Shared clocks are immutable, so they need to be unshared before any updates. +// Note: this does not apply to dirty entries as they are not shared. +void SyncClock::Unshare(ClockCache *c) { + if (!IsShared()) + return; + // First, copy current state into old. + SyncClock old; + old.tab_ = tab_; + old.tab_idx_ = tab_idx_; + old.size_ = size_; + old.blocks_ = blocks_; + old.release_store_tid_ = release_store_tid_; + old.release_store_reused_ = release_store_reused_; + for (unsigned i = 0; i < kDirtyTids; i++) + old.dirty_[i] = dirty_[i]; + // Then, clear current object. + ResetImpl(); + // Allocate brand new clock in the current object. + Resize(c, old.size_); + // Now copy state back into this object. + Iter old_iter(&old); + for (ClockElem &ce : *this) { + ce = *old_iter; + ++old_iter; + } + release_store_tid_ = old.release_store_tid_; + release_store_reused_ = old.release_store_reused_; + for (unsigned i = 0; i < kDirtyTids; i++) + dirty_[i] = old.dirty_[i]; + // Drop reference to old and delete if necessary. + old.Reset(c); +} + +// Can we cache this clock for future release operations? +ALWAYS_INLINE bool SyncClock::Cachable() const { + if (size_ == 0) + return false; + for (unsigned i = 0; i < kDirtyTids; i++) { + if (dirty_[i].tid != kInvalidTid) + return false; + } + return atomic_load_relaxed(ref_ptr(tab_)) == 1; +} + +// elem linearizes the two-level structure into linear array. +// Note: this is used only for one time accesses, vector operations use +// the iterator as it is much faster. +ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const { DCHECK_LT(tid, size_); - if (size_ <= ClockBlock::kClockCount) + const uptr block = tid / ClockBlock::kClockCount; + DCHECK_LE(block, blocks_); + tid %= ClockBlock::kClockCount; + if (block == blocks_) return tab_->clock[tid]; - u32 idx = tab_->table[tid / ClockBlock::kClockCount]; + u32 idx = get_block(block); ClockBlock *cb = ctx->clock_alloc.Map(idx); - return cb->clock[tid % ClockBlock::kClockCount]; + return cb->clock[tid]; +} + +ALWAYS_INLINE uptr SyncClock::capacity() const { + if (size_ == 0) + return 0; + uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]); + // How many clock elements we can fit into the first level block. + // +1 for ref counter. + uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio; + return blocks_ * ClockBlock::kClockCount + top; +} + +ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const { + DCHECK(size_); + DCHECK_LT(bi, blocks_); + return tab_->table[ClockBlock::kBlockIdx - bi]; +} + +ALWAYS_INLINE void SyncClock::append_block(u32 idx) { + uptr bi = blocks_++; + CHECK_EQ(get_block(bi), 0); + tab_->table[ClockBlock::kBlockIdx - bi] = idx; +} + +// Used only by tests. +u64 SyncClock::get(unsigned tid) const { + for (unsigned i = 0; i < kDirtyTids; i++) { + Dirty dirty = dirty_[i]; + if (dirty.tid == tid) + return dirty.epoch; + } + return elem(tid).epoch; +} + +// Used only by Iter test. +u64 SyncClock::get_clean(unsigned tid) const { + return elem(tid).epoch; } void SyncClock::DebugDump(int(*printf)(const char *s, ...)) { printf("clock=["); for (uptr i = 0; i < size_; i++) printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch); printf("] reused=["); for (uptr i = 0; i < size_; i++) printf("%s%llu", i == 0 ? "" : ",", elem(i).reused); - printf("] release_store_tid=%d/%d dirty_tids=%d/%d", + printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]", release_store_tid_, release_store_reused_, - dirty_tids_[0], dirty_tids_[1]); + dirty_[0].tid, dirty_[0].epoch, + dirty_[1].tid, dirty_[1].epoch); +} + +void SyncClock::Iter::Next() { + // Finished with the current block, move on to the next one. + block_++; + if (block_ < parent_->blocks_) { + // Iterate over the next second level block. + u32 idx = parent_->get_block(block_); + ClockBlock *cb = ctx->clock_alloc.Map(idx); + pos_ = &cb->clock[0]; + end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount, + ClockBlock::kClockCount); + return; + } + if (block_ == parent_->blocks_ && + parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) { + // Iterate over elements in the first level block. + pos_ = &parent_->tab_->clock[0]; + end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount, + ClockBlock::kClockCount); + return; + } + parent_ = nullptr; // denotes end } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_clock.h b/lib/tsan/rtl/tsan_clock.h index 378b550fd11b..a891d7bbd889 100644 --- a/lib/tsan/rtl/tsan_clock.h +++ b/lib/tsan/rtl/tsan_clock.h @@ -1,131 +1,226 @@ //===-- tsan_clock.h --------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #ifndef TSAN_CLOCK_H #define TSAN_CLOCK_H #include "tsan_defs.h" #include "tsan_dense_alloc.h" namespace __tsan { -struct ClockElem { - u64 epoch : kClkBits; - u64 reused : 64 - kClkBits; -}; - -struct ClockBlock { - static const uptr kSize = 512; - static const uptr kTableSize = kSize / sizeof(u32); - static const uptr kClockCount = kSize / sizeof(ClockElem); - - union { - u32 table[kTableSize]; - ClockElem clock[kClockCount]; - }; - - ClockBlock() { - } -}; - typedef DenseSlabAlloc ClockAlloc; typedef DenseSlabAllocCache ClockCache; // The clock that lives in sync variables (mutexes, atomics, etc). class SyncClock { public: SyncClock(); ~SyncClock(); - uptr size() const { - return size_; - } + uptr size() const; - u64 get(unsigned tid) const { - return elem(tid).epoch; - } + // These are used only in tests. + u64 get(unsigned tid) const; + u64 get_clean(unsigned tid) const; void Resize(ClockCache *c, uptr nclk); void Reset(ClockCache *c); void DebugDump(int(*printf)(const char *s, ...)); + // Clock element iterator. + // Note: it iterates only over the table without regard to dirty entries. + class Iter { + public: + explicit Iter(SyncClock* parent); + Iter& operator++(); + bool operator!=(const Iter& other); + ClockElem &operator*(); + + private: + SyncClock *parent_; + // [pos_, end_) is the current continuous range of clock elements. + ClockElem *pos_; + ClockElem *end_; + int block_; // Current number of second level block. + + NOINLINE void Next(); + }; + + Iter begin(); + Iter end(); + private: - friend struct ThreadClock; + friend class ThreadClock; + friend class Iter; static const uptr kDirtyTids = 2; + struct Dirty { + u64 epoch : kClkBits; + u64 tid : 64 - kClkBits; // kInvalidId if not active + }; + unsigned release_store_tid_; unsigned release_store_reused_; - unsigned dirty_tids_[kDirtyTids]; - // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc. - // If size_ <= 64, then tab_ points to an array with 64 ClockElem's. - // Otherwise, tab_ points to an array with 128 u32 elements, + Dirty dirty_[kDirtyTids]; + // If size_ is 0, tab_ is nullptr. + // If size <= 64 (kClockCount), tab_ contains pointer to an array with + // 64 ClockElem's (ClockBlock::clock). + // Otherwise, tab_ points to an array with up to 127 u32 elements, // each pointing to the second-level 512b block with 64 ClockElem's. + // Unused space in the first level ClockBlock is used to store additional + // clock elements. + // The last u32 element in the first level ClockBlock is always used as + // reference counter. + // + // See the following scheme for details. + // All memory blocks are 512 bytes (allocated from ClockAlloc). + // Clock (clk) elements are 64 bits. + // Idx and ref are 32 bits. + // + // tab_ + // | + // \/ + // +----------------------------------------------------+ + // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref | + // +----------------------------------------------------+ + // | | + // | \/ + // | +----------------+ + // | | clk0 ... clk63 | + // | +----------------+ + // \/ + // +------------------+ + // | clk64 ... clk127 | + // +------------------+ + // + // Note: dirty entries, if active, always override what's stored in the clock. ClockBlock *tab_; u32 tab_idx_; - u32 size_; + u16 size_; + u16 blocks_; // Number of second level blocks. + void Unshare(ClockCache *c); + bool IsShared() const; + bool Cachable() const; void ResetImpl(); + void FlushDirty(); + uptr capacity() const; + u32 get_block(uptr bi) const; + void append_block(u32 idx); ClockElem &elem(unsigned tid) const; }; // The clock that lives in threads. -struct ThreadClock { +class ThreadClock { public: typedef DenseSlabAllocCache Cache; explicit ThreadClock(unsigned tid, unsigned reused = 0); - u64 get(unsigned tid) const { - DCHECK_LT(tid, kMaxTidInClock); - return clk_[tid].epoch; - } - + u64 get(unsigned tid) const; void set(ClockCache *c, unsigned tid, u64 v); + void set(u64 v); + void tick(); + uptr size() const; - void set(u64 v) { - DCHECK_GE(v, clk_[tid_].epoch); - clk_[tid_].epoch = v; - } - - void tick() { - clk_[tid_].epoch++; - } - - uptr size() const { - return nclk_; - } - - void acquire(ClockCache *c, const SyncClock *src); - void release(ClockCache *c, SyncClock *dst) const; + void acquire(ClockCache *c, SyncClock *src); + void release(ClockCache *c, SyncClock *dst); void acq_rel(ClockCache *c, SyncClock *dst); - void ReleaseStore(ClockCache *c, SyncClock *dst) const; + void ReleaseStore(ClockCache *c, SyncClock *dst); void ResetCached(ClockCache *c); void DebugReset(); void DebugDump(int(*printf)(const char *s, ...)); private: static const uptr kDirtyTids = SyncClock::kDirtyTids; + // Index of the thread associated with he clock ("current thread"). const unsigned tid_; - const unsigned reused_; + const unsigned reused_; // tid_ reuse count. + // Current thread time when it acquired something from other threads. u64 last_acquire_; + + // Cached SyncClock (without dirty entries and release_store_tid_). + // We reuse it for subsequent store-release operations without intervening + // acquire operations. Since it is shared (and thus constant), clock value + // for the current thread is then stored in dirty entries in the SyncClock. + // We host a refernece to the table while it is cached here. + u32 cached_idx_; + u16 cached_size_; + u16 cached_blocks_; + + // Number of active elements in the clk_ table (the rest is zeros). uptr nclk_; - ClockElem clk_[kMaxTidInClock]; + u64 clk_[kMaxTidInClock]; // Fixed size vector clock. bool IsAlreadyAcquired(const SyncClock *src) const; - void UpdateCurrentThread(SyncClock *dst) const; + void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const; }; +ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const { + DCHECK_LT(tid, kMaxTidInClock); + return clk_[tid]; +} + +ALWAYS_INLINE void ThreadClock::set(u64 v) { + DCHECK_GE(v, clk_[tid_]); + clk_[tid_] = v; +} + +ALWAYS_INLINE void ThreadClock::tick() { + clk_[tid_]++; +} + +ALWAYS_INLINE uptr ThreadClock::size() const { + return nclk_; +} + +ALWAYS_INLINE SyncClock::Iter SyncClock::begin() { + return Iter(this); +} + +ALWAYS_INLINE SyncClock::Iter SyncClock::end() { + return Iter(nullptr); +} + +ALWAYS_INLINE uptr SyncClock::size() const { + return size_; +} + +ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent) + : parent_(parent) + , pos_(nullptr) + , end_(nullptr) + , block_(-1) { + if (parent) + Next(); +} + +ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() { + pos_++; + if (UNLIKELY(pos_ >= end_)) + Next(); + return *this; +} + +ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) { + return parent_ != other.parent_; +} + +ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() { + return *pos_; +} } // namespace __tsan #endif // TSAN_CLOCK_H diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h index 8977fea7c552..3c775debfb09 100644 --- a/lib/tsan/rtl/tsan_defs.h +++ b/lib/tsan/rtl/tsan_defs.h @@ -1,171 +1,196 @@ //===-- tsan_defs.h ---------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #ifndef TSAN_DEFS_H #define TSAN_DEFS_H #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "tsan_stat.h" #include "ubsan/ubsan_platform.h" // Setup defaults for compile definitions. #ifndef TSAN_NO_HISTORY # define TSAN_NO_HISTORY 0 #endif #ifndef TSAN_COLLECT_STATS # define TSAN_COLLECT_STATS 0 #endif #ifndef TSAN_CONTAINS_UBSAN # if CAN_SANITIZE_UB && !SANITIZER_GO # define TSAN_CONTAINS_UBSAN 1 # else # define TSAN_CONTAINS_UBSAN 0 # endif #endif namespace __tsan { +const int kClkBits = 42; +const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; + +struct ClockElem { + u64 epoch : kClkBits; + u64 reused : 64 - kClkBits; // tid reuse count +}; + +struct ClockBlock { + static const uptr kSize = 512; + static const uptr kTableSize = kSize / sizeof(u32); + static const uptr kClockCount = kSize / sizeof(ClockElem); + static const uptr kRefIdx = kTableSize - 1; + static const uptr kBlockIdx = kTableSize - 2; + + union { + u32 table[kTableSize]; + ClockElem clock[kClockCount]; + }; + + ClockBlock() { + } +}; + const int kTidBits = 13; -const unsigned kMaxTid = 1 << kTidBits; +// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is +// occupied by reference counter, so total number of elements we can store +// in SyncClock is kClockCount * (kTableSize - 1). +const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount; #if !SANITIZER_GO const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. #else const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory. #endif -const int kClkBits = 42; -const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; const uptr kShadowStackSize = 64 * 1024; // Count of shadow values in a shadow cell. const uptr kShadowCnt = 4; // That many user bytes are mapped onto a single shadow cell. const uptr kShadowCell = 8; // Size of a single shadow value (u64). const uptr kShadowSize = 8; // Shadow memory is kShadowMultiplier times larger than user memory. const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; // That many user bytes are mapped onto a single meta shadow cell. // Must be less or equal to minimal memory allocator alignment. const uptr kMetaShadowCell = 8; // Size of a single meta shadow value (u32). const uptr kMetaShadowSize = 4; #if TSAN_NO_HISTORY const bool kCollectHistory = false; #else const bool kCollectHistory = true; #endif -const unsigned kInvalidTid = (unsigned)-1; +const u16 kInvalidTid = kMaxTid + 1; // The following "build consistency" machinery ensures that all source files // are built in the same configuration. Inconsistent builds lead to // hard to debug crashes. #if SANITIZER_DEBUG void build_consistency_debug(); #else void build_consistency_release(); #endif #if TSAN_COLLECT_STATS void build_consistency_stats(); #else void build_consistency_nostats(); #endif static inline void USED build_consistency() { #if SANITIZER_DEBUG build_consistency_debug(); #else build_consistency_release(); #endif #if TSAN_COLLECT_STATS build_consistency_stats(); #else build_consistency_nostats(); #endif } template T min(T a, T b) { return a < b ? a : b; } template T max(T a, T b) { return a > b ? a : b; } template T RoundUp(T p, u64 align) { DCHECK_EQ(align & (align - 1), 0); return (T)(((u64)p + align - 1) & ~(align - 1)); } template T RoundDown(T p, u64 align) { DCHECK_EQ(align & (align - 1), 0); return (T)((u64)p & ~(align - 1)); } // Zeroizes high part, returns 'bits' lsb bits. template T GetLsb(T v, int bits) { return (T)((u64)v & ((1ull << bits) - 1)); } struct MD5Hash { u64 hash[2]; bool operator==(const MD5Hash &other) const; }; MD5Hash md5_hash(const void *data, uptr size); struct Processor; struct ThreadState; class ThreadContext; struct Context; struct ReportStack; class ReportDesc; class RegionAlloc; // Descriptor of user's memory block. struct MBlock { u64 siz : 48; u64 tag : 16; u32 stk; u16 tid; }; COMPILER_CHECK(sizeof(MBlock) == 16); enum ExternalTag : uptr { kExternalTagNone = 0, kExternalTagSwiftModifyingAccess = 1, kExternalTagFirstUserAvailable = 2, kExternalTagMax = 1024, // Don't set kExternalTagMax over 65,536, since MBlock only stores tags // as 16-bit values, see tsan_defs.h. }; } // namespace __tsan #endif // TSAN_DEFS_H diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 1434cf688ce9..f79dccddba9f 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -1,303 +1,304 @@ //===-- tsan_mman.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_mman.h" #include "tsan_rtl.h" #include "tsan_report.h" #include "tsan_flags.h" // May be overriden by front-end. SANITIZER_WEAK_DEFAULT_IMPL void __sanitizer_malloc_hook(void *ptr, uptr size) { (void)ptr; (void)size; } SANITIZER_WEAK_DEFAULT_IMPL void __sanitizer_free_hook(void *ptr) { (void)ptr; } namespace __tsan { struct MapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. DontNeedShadowFor(p, size); // Mark the corresponding meta shadow memory as not needed. // Note the block does not contain any meta info at this point // (this happens after free). const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; const uptr kPageSize = GetPageSizeCached() * kMetaRatio; // Block came from LargeMmapAllocator, so must be large. // We rely on this in the calculations below. CHECK_GE(size, 2 * kPageSize); uptr diff = RoundUp(p, kPageSize) - p; if (diff != 0) { p += diff; size -= diff; } diff = p + size - RoundDown(p + size, kPageSize); if (diff != 0) size -= diff; uptr p_meta = (uptr)MemToMeta(p); ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio); } }; static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); Allocator *allocator() { return reinterpret_cast(&allocator_placeholder); } struct GlobalProc { Mutex mtx; Processor *proc; GlobalProc() : mtx(MutexTypeGlobalProc, StatMtxGlobalProc) , proc(ProcCreate()) { } }; static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64); GlobalProc *global_proc() { return reinterpret_cast(&global_proc_placeholder); } ScopedGlobalProcessor::ScopedGlobalProcessor() { GlobalProc *gp = global_proc(); ThreadState *thr = cur_thread(); if (thr->proc()) return; // If we don't have a proc, use the global one. // There are currently only two known case where this path is triggered: // __interceptor_free // __nptl_deallocate_tsd // start_thread // clone // and: // ResetRange // __interceptor_munmap // __deallocate_stack // start_thread // clone // Ideally, we destroy thread state (and unwire proc) when a thread actually // exits (i.e. when we join/wait it). Then we would not need the global proc gp->mtx.Lock(); ProcWire(gp->proc, thr); } ScopedGlobalProcessor::~ScopedGlobalProcessor() { GlobalProc *gp = global_proc(); ThreadState *thr = cur_thread(); if (thr->proc() != gp->proc) return; ProcUnwire(gp->proc, thr); gp->mtx.Unlock(); } void InitializeAllocator() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator()->Init(common_flags()->allocator_release_to_os_interval_ms); } void InitializeAllocatorLate() { new(global_proc()) GlobalProc(); } void AllocatorProcStart(Processor *proc) { allocator()->InitCache(&proc->alloc_cache); internal_allocator()->InitCache(&proc->internal_alloc_cache); } void AllocatorProcFinish(Processor *proc) { allocator()->DestroyCache(&proc->alloc_cache); internal_allocator()->DestroyCache(&proc->internal_alloc_cache); } void AllocatorPrintStats() { allocator()->PrintStats(); } static void SignalUnsafeCall(ThreadState *thr, uptr pc) { if (atomic_load_relaxed(&thr->in_signal_handler) == 0 || !flags()->report_signal_unsafe) return; VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); rep.AddStack(stack, true); OutputReport(thr, rep); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) return Allocator::FailureHandler::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (p == 0) return 0; if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) SignalUnsafeCall(thr, pc); return p; } void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { if (CheckForCallocOverflow(size, n)) return Allocator::FailureHandler::OnBadRequest(); void *p = user_alloc(thr, pc, n * size); if (p) internal_memset(p, 0, n * size); return p; } void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { ScopedGlobalProcessor sgp; if (ctx && ctx->initialized) OnUserFree(thr, pc, (uptr)p, true); allocator()->Deallocate(&thr->proc()->alloc_cache, p); if (signal) SignalUnsafeCall(thr, pc); } void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); ctx->metamap.AllocBlock(thr, pc, p, sz); if (write && thr->ignore_reads_and_writes == 0) MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); else MemoryResetRange(thr, pc, (uptr)p, sz); } void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { CHECK_NE(p, (void*)0); uptr sz = ctx->metamap.FreeBlock(thr->proc(), p); DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); if (write && thr->ignore_reads_and_writes == 0) MemoryRangeFreed(thr, pc, (uptr)p, sz); } void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { // FIXME: Handle "shrinking" more efficiently, // it seems that some software actually does this. void *p2 = user_alloc(thr, pc, sz); if (p2 == 0) return 0; if (p) { uptr oldsz = user_alloc_usable_size(p); internal_memcpy(p2, p, min(oldsz, sz)); user_free(thr, pc, p); } return p2; } uptr user_alloc_usable_size(const void *p) { if (p == 0) return 0; MBlock *b = ctx->metamap.GetBlock((uptr)p); if (!b) return 0; // Not a valid pointer. if (b->siz == 0) return 1; // Zero-sized allocations are actually 1 byte. return b->siz; } void invoke_malloc_hook(void *ptr, uptr size) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_malloc_hook(ptr, size); RunMallocHooks(ptr, size); } void invoke_free_hook(void *ptr) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_free_hook(ptr); RunFreeHooks(ptr); } void *internal_alloc(MBlockType typ, uptr sz) { ThreadState *thr = cur_thread(); if (thr->nomalloc) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } return InternalAlloc(sz, &thr->proc()->internal_alloc_cache); } void internal_free(void *p) { ThreadState *thr = cur_thread(); if (thr->nomalloc) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } InternalFree(p, &thr->proc()->internal_alloc_cache); } } // namespace __tsan using namespace __tsan; extern "C" { uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; allocator()->GetStats(stats); return stats[AllocatorStatAllocated]; } uptr __sanitizer_get_heap_size() { uptr stats[AllocatorStatCount]; allocator()->GetStats(stats); return stats[AllocatorStatMapped]; } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *p) { return allocator()->GetBlockBegin(p) != 0; } uptr __sanitizer_get_allocated_size(const void *p) { return user_alloc_usable_size(p); } void __tsan_on_thread_idle() { ThreadState *thr = cur_thread(); thr->clock.ResetCached(&thr->proc()->clock_cache); thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); allocator()->SwallowCache(&thr->proc()->alloc_cache); internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache); ctx->metamap.OnProcIdle(thr->proc()); } } // extern "C" diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc index 0ba01babe69a..ead1e5704989 100644 --- a/lib/tsan/rtl/tsan_platform_linux.cc +++ b/lib/tsan/rtl/tsan_platform_linux.cc @@ -1,404 +1,404 @@ //===-- tsan_platform_linux.cc --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Linux- and FreeBSD-specific code. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_flags.h" #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #include #include #endif #include #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #define __need_res_state #include #endif #ifdef sa_handler # undef sa_handler #endif #ifdef sa_sigaction # undef sa_sigaction #endif #if SANITIZER_FREEBSD extern "C" void *__libc_stack_end; void *__libc_stack_end = 0; #endif #if SANITIZER_LINUX && defined(__aarch64__) void InitializeGuardPtr() __attribute__((visibility("hidden"))); #endif namespace __tsan { #ifdef TSAN_RUNTIME_VMA // Runtime detected VMA size. uptr vmaSize; #endif enum { MemTotal = 0, MemShadow = 1, MemMeta = 2, MemFile = 3, MemMmap = 4, MemTrace = 5, MemHeap = 6, MemOther = 7, MemCount = 8, }; void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem, uptr stats_size) { mem[MemTotal] += rss; if (p >= ShadowBeg() && p < ShadowEnd()) mem[MemShadow] += rss; else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) mem[MemMeta] += rss; #if !SANITIZER_GO else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #else else if (p >= AppMemBeg() && p < AppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #endif else if (p >= TraceMemBeg() && p < TraceMemEnd()) mem[MemTrace] += rss; else mem[MemOther] += rss; } void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { uptr mem[MemCount]; internal_memset(mem, 0, sizeof(mem[0]) * MemCount); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); StackDepotStats *stacks = StackDepotGetStats(); internal_snprintf(buf, buf_size, "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20, stacks->allocated >> 20, stacks->n_uniq_ids, nlive, nthread); } #if SANITIZER_LINUX void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, void *argument) { ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); } #endif void FlushShadowMemory() { #if SANITIZER_LINUX StopTheWorld(FlushShadowMemoryCallback, 0); #endif } #if !SANITIZER_GO // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { // First create temp file. const char *tmpdir = GetEnv("TMPDIR"); if (tmpdir == 0) tmpdir = GetEnv("TEST_TMPDIR"); #ifdef P_tmpdir if (tmpdir == 0) tmpdir = P_tmpdir; #endif if (tmpdir == 0) return; char name[256]; internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", tmpdir, (int)internal_getpid()); uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); if (internal_iserror(openrv)) return; internal_unlink(name); // Unlink it now, so that we can reuse the buffer. fd_t fd = openrv; // Fill the file with kShadowRodata. const uptr kMarkerSize = 512 * 1024 / sizeof(u64); InternalScopedBuffer marker(kMarkerSize); // volatile to prevent insertion of memset for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) *p = kShadowRodata; internal_write(fd, marker.data(), marker.size()); // Map the file into memory. uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); if (internal_iserror(page)) { internal_close(fd); return; } // Map the file into shadow of .rodata sections. MemoryMappingLayout proc_maps(/*cache_enabled*/true); // Reusing the buffer 'name'. MemoryMappedSegment segment(name, ARRAY_SIZE(name)); while (proc_maps.Next(&segment)) { if (segment.filename[0] != 0 && segment.filename[0] != '[' && segment.IsReadable() && segment.IsExecutable() && !segment.IsWritable() && IsAppMem(segment.start)) { // Assume it's .rodata char *shadow_start = (char *)MemToShadow(segment.start); char *shadow_end = (char *)MemToShadow(segment.end); for (char *p = shadow_start; p < shadow_end; p += marker.size()) { internal_mmap(p, Min(marker.size(), shadow_end - p), PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); } } } internal_close(fd); } void InitializeShadowMemoryPlatform() { MapRodata(); } #endif // #if !SANITIZER_GO void InitializePlatformEarly() { #ifdef TSAN_RUNTIME_VMA vmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); #if defined(__aarch64__) if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize); Die(); } #elif defined(__powerpc64__) if (vmaSize != 44 && vmaSize != 46) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize); Die(); } #endif #endif } void InitializePlatform() { DisableCoreDumperIfNecessary(); // Go maps shadow memory lazily and works fine with limited address space. // Unlimited stack is not a problem as well, because the executable // is not compiled with -pie. if (!SANITIZER_GO) { bool reexec = false; // TSan doesn't play well with unlimited stack size (as stack // overlaps with shadow memory). If we detect unlimited stack size, // we re-exec the program with limited stack size as a best effort. if (StackSizeIsUnlimited()) { const uptr kMaxStackSize = 32 * 1024 * 1024; VReport(1, "Program is run with unlimited stack size, which wouldn't " "work with ThreadSanitizer.\n" "Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize); SetStackSizeLimitInBytes(kMaxStackSize); reexec = true; } if (!AddressSpaceIsUnlimited()) { Report("WARNING: Program is run with limited virtual address space," " which wouldn't work with ThreadSanitizer.\n"); Report("Re-execing with unlimited virtual address space.\n"); SetAddressSpaceUnlimited(); reexec = true; } #if SANITIZER_LINUX && defined(__aarch64__) // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in // linux kernel, the random gap between stack and mapped area is increased // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover // this big range, we should disable randomized virtual space on aarch64. int old_personality = personality(0xffffffff); if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { VReport(1, "WARNING: Program is run with randomized virtual address " "space, which wouldn't work with ThreadSanitizer.\n" "Re-execing with fixed virtual address space.\n"); CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); reexec = true; } // Initialize the guard pointer used in {sig}{set,long}jump. InitializeGuardPtr(); #endif if (reexec) ReExec(); } #if !SANITIZER_GO CheckAndProtect(); InitTlsSize(); #endif } #if !SANITIZER_GO // Extract file descriptors passed to glibc internal __res_iclose function. // This is required to properly "close" the fds, because we do not see internal // closes within glibc. The code is a pure hack. int ExtractResolvFDs(void *state, int *fds, int nfd) { #if SANITIZER_LINUX && !SANITIZER_ANDROID int cnt = 0; - __res_state *statp = (__res_state*)state; + struct __res_state *statp = (struct __res_state*)state; for (int i = 0; i < MAXNS && cnt < nfd; i++) { if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) fds[cnt++] = statp->_u._ext.nssocks[i]; } return cnt; #else return 0; #endif } // Extract file descriptors passed via UNIX domain sockets. // This is requried to properly handle "open" of these fds. // see 'man recvmsg' and 'man 3 cmsg'. int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { int res = 0; msghdr *msg = (msghdr*)msgp; struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) continue; int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); for (int i = 0; i < n; i++) { fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; if (res == nfd) return res; } } return res; } void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { // Check that the thr object is in tls; const uptr thr_beg = (uptr)thr; const uptr thr_end = (uptr)thr + sizeof(*thr); CHECK_GE(thr_beg, tls_addr); CHECK_LE(thr_beg, tls_addr + tls_size); CHECK_GE(thr_end, tls_addr); CHECK_LE(thr_end, tls_addr + tls_size); // Since the thr object is huge, skip it. MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr); MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end, tls_addr + tls_size - thr_end); } // Note: this function runs with async signals enabled, // so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { // pthread_cleanup_push/pop are hardcore macros mess. // We can't intercept nor call them w/o including pthread.h. int res; pthread_cleanup_push(cleanup, arg); res = fn(c, m, abstime); pthread_cleanup_pop(0); return res; } #endif #if !SANITIZER_GO void ReplaceSystemMalloc() { } #endif #if !SANITIZER_GO #if SANITIZER_ANDROID // On Android, one thread can call intercepted functions after // DestroyThreadState(), so add a fake thread state for "dead" threads. static ThreadState *dead_thread_state = nullptr; ThreadState *cur_thread() { ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { thr = reinterpret_cast(MmapOrDie(sizeof(ThreadState), "ThreadState")); *get_android_tls_ptr() = reinterpret_cast(thr); if (dead_thread_state == nullptr) { dead_thread_state = reinterpret_cast( MmapOrDie(sizeof(ThreadState), "ThreadState")); dead_thread_state->fast_state.SetIgnoreBit(); dead_thread_state->ignore_interceptors = 1; dead_thread_state->is_dead = true; *const_cast(&dead_thread_state->tid) = -1; CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), PROT_READ)); } } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } return thr; } void cur_thread_finalize() { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr != dead_thread_state) { *get_android_tls_ptr() = reinterpret_cast(dead_thread_state); UnmapOrDie(thr, sizeof(ThreadState)); } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } #endif // SANITIZER_ANDROID #endif // if !SANITIZER_GO } // namespace __tsan #endif // SANITIZER_LINUX || SANITIZER_FREEBSD diff --git a/lib/tsan/tests/unit/tsan_clock_test.cc b/lib/tsan/tests/unit/tsan_clock_test.cc index 73104dd6b9d4..f6230e1be565 100644 --- a/lib/tsan/tests/unit/tsan_clock_test.cc +++ b/lib/tsan/tests/unit/tsan_clock_test.cc @@ -1,471 +1,494 @@ //===-- tsan_clock_test.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_clock.h" #include "tsan_rtl.h" #include "gtest/gtest.h" #include #include namespace __tsan { ClockCache cache; TEST(Clock, VectorBasic) { ThreadClock clk(0); ASSERT_EQ(clk.size(), 1U); clk.tick(); ASSERT_EQ(clk.size(), 1U); ASSERT_EQ(clk.get(0), 1U); clk.set(&cache, 3, clk.get(3) + 1); ASSERT_EQ(clk.size(), 4U); ASSERT_EQ(clk.get(0), 1U); ASSERT_EQ(clk.get(1), 0U); ASSERT_EQ(clk.get(2), 0U); ASSERT_EQ(clk.get(3), 1U); clk.set(&cache, 3, clk.get(3) + 1); ASSERT_EQ(clk.get(3), 2U); } TEST(Clock, ChunkedBasic) { ThreadClock vector(0); SyncClock chunked; ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 0U); vector.acquire(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 0U); vector.release(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 1U); vector.acq_rel(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 1U); chunked.Reset(&cache); } +static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66, + 100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254, + 255}; + +TEST(Clock, Iter) { + const uptr n = ARRAY_SIZE(interesting_sizes); + for (uptr fi = 0; fi < n; fi++) { + const uptr size = interesting_sizes[fi]; + SyncClock sync; + ThreadClock vector(0); + for (uptr i = 0; i < size; i++) + vector.set(&cache, i, i + 1); + if (size != 0) + vector.release(&cache, &sync); + uptr i = 0; + for (ClockElem &ce : sync) { + ASSERT_LT(i, size); + ASSERT_EQ(sync.get_clean(i), ce.epoch); + i++; + } + ASSERT_EQ(i, size); + sync.Reset(&cache); + } +} + TEST(Clock, AcquireRelease) { ThreadClock vector1(100); vector1.tick(); SyncClock chunked; vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 101U); ThreadClock vector2(0); vector2.acquire(&cache, &chunked); ASSERT_EQ(vector2.size(), 101U); ASSERT_EQ(vector2.get(0), 0U); ASSERT_EQ(vector2.get(1), 0U); ASSERT_EQ(vector2.get(99), 0U); ASSERT_EQ(vector2.get(100), 1U); chunked.Reset(&cache); } TEST(Clock, RepeatedAcquire) { ThreadClock thr1(1); thr1.tick(); ThreadClock thr2(2); thr2.tick(); SyncClock sync; thr1.ReleaseStore(&cache, &sync); thr2.acquire(&cache, &sync); thr2.acquire(&cache, &sync); sync.Reset(&cache); } TEST(Clock, ManyThreads) { SyncClock chunked; for (unsigned i = 0; i < 200; i++) { ThreadClock vector(0); vector.tick(); vector.set(&cache, i, i + 1); vector.release(&cache, &chunked); ASSERT_EQ(i + 1, chunked.size()); vector.acquire(&cache, &chunked); ASSERT_EQ(i + 1, vector.size()); } for (unsigned i = 0; i < 200; i++) { printf("i=%d\n", i); ASSERT_EQ(i + 1, chunked.get(i)); } ThreadClock vector(1); vector.acquire(&cache, &chunked); ASSERT_EQ(200U, vector.size()); for (unsigned i = 0; i < 200; i++) ASSERT_EQ(i + 1, vector.get(i)); chunked.Reset(&cache); } TEST(Clock, DifferentSizes) { { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(20); vector2.tick(); { SyncClock chunked; vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 11U); vector2.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector2.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector1.release(&cache, &chunked); vector2.acquire(&cache, &chunked); ASSERT_EQ(vector2.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector2.release(&cache, &chunked); vector1.acquire(&cache, &chunked); ASSERT_EQ(vector1.size(), 21U); chunked.Reset(&cache); } } } TEST(Clock, Growth) { { ThreadClock vector(10); vector.tick(); vector.set(&cache, 5, 42); SyncClock sync; vector.release(&cache, &sync); ASSERT_EQ(sync.size(), 11U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(1), 0ULL); ASSERT_EQ(sync.get(5), 42ULL); ASSERT_EQ(sync.get(9), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); sync.Reset(&cache); } { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(20); vector2.tick(); SyncClock sync; vector1.release(&cache, &sync); vector2.release(&cache, &sync); ASSERT_EQ(sync.size(), 21U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); ASSERT_EQ(sync.get(19), 0ULL); ASSERT_EQ(sync.get(20), 1ULL); sync.Reset(&cache); } { ThreadClock vector(100); vector.tick(); vector.set(&cache, 5, 42); vector.set(&cache, 90, 84); SyncClock sync; vector.release(&cache, &sync); ASSERT_EQ(sync.size(), 101U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(1), 0ULL); ASSERT_EQ(sync.get(5), 42ULL); ASSERT_EQ(sync.get(60), 0ULL); ASSERT_EQ(sync.get(70), 0ULL); ASSERT_EQ(sync.get(90), 84ULL); ASSERT_EQ(sync.get(99), 0ULL); ASSERT_EQ(sync.get(100), 1ULL); sync.Reset(&cache); } { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(100); vector2.tick(); SyncClock sync; vector1.release(&cache, &sync); vector2.release(&cache, &sync); ASSERT_EQ(sync.size(), 101U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); ASSERT_EQ(sync.get(99), 0ULL); ASSERT_EQ(sync.get(100), 1ULL); sync.Reset(&cache); } } TEST(Clock, Growth2) { // Test clock growth for every pair of sizes: - const uptr sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66, 100, 124, 125, 126, - 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254, 255}; - const uptr n = sizeof(sizes) / sizeof(sizes[0]); + const uptr n = ARRAY_SIZE(interesting_sizes); for (uptr fi = 0; fi < n; fi++) { for (uptr ti = fi + 1; ti < n; ti++) { - const uptr from = sizes[fi]; - const uptr to = sizes[ti]; + const uptr from = interesting_sizes[fi]; + const uptr to = interesting_sizes[ti]; SyncClock sync; ThreadClock vector(0); for (uptr i = 0; i < from; i++) vector.set(&cache, i, i + 1); if (from != 0) vector.release(&cache, &sync); ASSERT_EQ(sync.size(), from); for (uptr i = 0; i < from; i++) ASSERT_EQ(sync.get(i), i + 1); for (uptr i = 0; i < to; i++) vector.set(&cache, i, i + 1); vector.release(&cache, &sync); ASSERT_EQ(sync.size(), to); for (uptr i = 0; i < to; i++) ASSERT_EQ(sync.get(i), i + 1); vector.set(&cache, to + 1, to + 1); vector.release(&cache, &sync); ASSERT_EQ(sync.size(), to + 2); for (uptr i = 0; i < to; i++) ASSERT_EQ(sync.get(i), i + 1); ASSERT_EQ(sync.get(to), 0U); ASSERT_EQ(sync.get(to + 1), to + 1); sync.Reset(&cache); } } } const uptr kThreads = 4; const uptr kClocks = 4; // SimpleSyncClock and SimpleThreadClock implement the same thing as // SyncClock and ThreadClock, but in a very simple way. struct SimpleSyncClock { u64 clock[kThreads]; uptr size; SimpleSyncClock() { Reset(); } void Reset() { size = 0; for (uptr i = 0; i < kThreads; i++) clock[i] = 0; } bool verify(const SyncClock *other) const { for (uptr i = 0; i < min(size, other->size()); i++) { if (clock[i] != other->get(i)) return false; } for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) { if (i < size && clock[i] != 0) return false; if (i < other->size() && other->get(i) != 0) return false; } return true; } }; struct SimpleThreadClock { u64 clock[kThreads]; uptr size; unsigned tid; explicit SimpleThreadClock(unsigned tid) { this->tid = tid; size = tid + 1; for (uptr i = 0; i < kThreads; i++) clock[i] = 0; } void tick() { clock[tid]++; } void acquire(const SimpleSyncClock *src) { if (size < src->size) size = src->size; for (uptr i = 0; i < kThreads; i++) clock[i] = max(clock[i], src->clock[i]); } void release(SimpleSyncClock *dst) const { if (dst->size < size) dst->size = size; for (uptr i = 0; i < kThreads; i++) dst->clock[i] = max(dst->clock[i], clock[i]); } void acq_rel(SimpleSyncClock *dst) { acquire(dst); release(dst); } void ReleaseStore(SimpleSyncClock *dst) const { if (dst->size < size) dst->size = size; for (uptr i = 0; i < kThreads; i++) dst->clock[i] = clock[i]; } bool verify(const ThreadClock *other) const { for (uptr i = 0; i < min(size, other->size()); i++) { if (clock[i] != other->get(i)) return false; } for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) { if (i < size && clock[i] != 0) return false; if (i < other->size() && other->get(i) != 0) return false; } return true; } }; static bool ClockFuzzer(bool printing) { // Create kThreads thread clocks. SimpleThreadClock *thr0[kThreads]; ThreadClock *thr1[kThreads]; unsigned reused[kThreads]; for (unsigned i = 0; i < kThreads; i++) { reused[i] = 0; thr0[i] = new SimpleThreadClock(i); thr1[i] = new ThreadClock(i, reused[i]); } // Create kClocks sync clocks. SimpleSyncClock *sync0[kClocks]; SyncClock *sync1[kClocks]; for (unsigned i = 0; i < kClocks; i++) { sync0[i] = new SimpleSyncClock(); sync1[i] = new SyncClock(); } // Do N random operations (acquire, release, etc) and compare results // for SimpleThread/SyncClock and real Thread/SyncClock. for (int i = 0; i < 10000; i++) { unsigned tid = rand() % kThreads; unsigned cid = rand() % kClocks; thr0[tid]->tick(); thr1[tid]->tick(); switch (rand() % 6) { case 0: if (printing) printf("acquire thr%d <- clk%d\n", tid, cid); thr0[tid]->acquire(sync0[cid]); thr1[tid]->acquire(&cache, sync1[cid]); break; case 1: if (printing) printf("release thr%d -> clk%d\n", tid, cid); thr0[tid]->release(sync0[cid]); thr1[tid]->release(&cache, sync1[cid]); break; case 2: if (printing) printf("acq_rel thr%d <> clk%d\n", tid, cid); thr0[tid]->acq_rel(sync0[cid]); thr1[tid]->acq_rel(&cache, sync1[cid]); break; case 3: if (printing) printf("rel_str thr%d >> clk%d\n", tid, cid); thr0[tid]->ReleaseStore(sync0[cid]); thr1[tid]->ReleaseStore(&cache, sync1[cid]); break; case 4: if (printing) printf("reset clk%d\n", cid); sync0[cid]->Reset(); sync1[cid]->Reset(&cache); break; case 5: if (printing) printf("reset thr%d\n", tid); u64 epoch = thr0[tid]->clock[tid] + 1; reused[tid]++; delete thr0[tid]; thr0[tid] = new SimpleThreadClock(tid); thr0[tid]->clock[tid] = epoch; delete thr1[tid]; thr1[tid] = new ThreadClock(tid, reused[tid]); thr1[tid]->set(epoch); break; } if (printing) { for (unsigned i = 0; i < kThreads; i++) { printf("thr%d: ", i); thr1[i]->DebugDump(printf); printf("\n"); } for (unsigned i = 0; i < kClocks; i++) { printf("clk%d: ", i); sync1[i]->DebugDump(printf); printf("\n"); } printf("\n"); } if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) { if (!printing) return false; printf("differs with model:\n"); for (unsigned i = 0; i < kThreads; i++) { printf("thr%d: clock=[", i); for (uptr j = 0; j < thr0[i]->size; j++) printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]); printf("]\n"); } for (unsigned i = 0; i < kClocks; i++) { printf("clk%d: clock=[", i); for (uptr j = 0; j < sync0[i]->size; j++) printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]); printf("]\n"); } return false; } } for (unsigned i = 0; i < kClocks; i++) { sync1[i]->Reset(&cache); } return true; } TEST(Clock, Fuzzer) { struct timeval tv; gettimeofday(&tv, NULL); int seed = tv.tv_sec + tv.tv_usec; printf("seed=%d\n", seed); srand(seed); if (!ClockFuzzer(false)) { // Redo the test with the same seed, but logging operations. srand(seed); ClockFuzzer(true); ASSERT_TRUE(false); } } } // namespace __tsan diff --git a/lib/ubsan/ubsan_handlers.cc b/lib/ubsan/ubsan_handlers.cc index 185752719aff..75a4490a1843 100644 --- a/lib/ubsan/ubsan_handlers.cc +++ b/lib/ubsan/ubsan_handlers.cc @@ -1,659 +1,664 @@ //===-- ubsan_handlers.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Error logging entry points for the UBSan runtime. // //===----------------------------------------------------------------------===// #include "ubsan_platform.h" #if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" #include "sanitizer_common/sanitizer_common.h" using namespace __sanitizer; using namespace __ubsan; namespace __ubsan { bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) { // We are not allowed to skip error report: if we are in unrecoverable // handler, we have to terminate the program right now, and therefore // have to print some diagnostic. // // Even if source location is disabled, it doesn't mean that we have // already report an error to the user: some concurrently running // thread could have acquired it, but not yet printed the report. if (Opts.FromUnrecoverableHandler) return false; return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename()); } const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of", "upcast of", "cast to virtual base of", "_Nonnull binding to"}; } static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, ReportOptions Opts) { Location Loc = Data->Loc.acquire(); uptr Alignment = (uptr)1 << Data->LogAlignment; ErrorType ET; if (!Pointer) ET = ErrorType::NullPointerUse; else if (Pointer & (Alignment - 1)) ET = ErrorType::MisalignedPointerUse; else ET = ErrorType::InsufficientObjectSize; // Use the SourceLocation from Data to track deduplication, even if it's // invalid. if (ignoreReport(Loc.getSourceLocation(), Opts, ET)) return; SymbolizedStackHolder FallbackLoc; if (Data->Loc.isInvalid()) { FallbackLoc.reset(getCallerLocation(Opts.pc)); Loc = FallbackLoc; } ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::NullPointerUse: Diag(Loc, DL_Error, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; case ErrorType::MisalignedPointerUse: Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment << Data->Type; break; case ErrorType::InsufficientObjectSize: Diag(Loc, DL_Error, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type; break; default: UNREACHABLE("unexpected error type!"); } if (Pointer) Diag(Pointer, DL_Note, "pointer points here"); } void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(false); handleTypeMismatchImpl(Data, Pointer, Opts); } void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(true); handleTypeMismatchImpl(Data, Pointer, Opts); Die(); } /// \brief Common diagnostic emission for various forms of integer overflow. template static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, const char *Operator, T RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "%0 integer overflow: " "%1 %2 %3 cannot be represented in type %4") << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) << Operator << RHS << Data->Type; } #define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \ ValueHandle RHS) { \ GET_REPORT_OPTIONS(unrecoverable); \ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \ if (unrecoverable) \ Die(); \ } UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true) static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (IsSigned) Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") << Value(Data->Type, OldVal) << Data->Type; else Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1") << Value(Data->Type, OldVal) << Data->Type; } void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(false); handleNegateOverflowImpl(Data, OldVal, Opts); } void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(true); handleNegateOverflowImpl(Data, OldVal, Opts); Die(); } static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->Type, LHS); Value RHSVal(Data->Type, RHS); ErrorType ET; if (RHSVal.isMinusOne()) ET = ErrorType::SignedIntegerOverflow; else if (Data->Type.isIntegerTy()) ET = ErrorType::IntegerDivideByZero; else ET = ErrorType::FloatDivideByZero; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::SignedIntegerOverflow: Diag(Loc, DL_Error, "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; break; default: Diag(Loc, DL_Error, "division by zero"); break; } } void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleDivremOverflowImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleDivremOverflowImpl(Data, LHS, RHS, Opts); Die(); } static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->LHSType, LHS); Value RHSVal(Data->RHSType, RHS); ErrorType ET; if (RHSVal.isNegative() || RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth()) ET = ErrorType::InvalidShiftExponent; else ET = ErrorType::InvalidShiftBase; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (ET == ErrorType::InvalidShiftExponent) { if (RHSVal.isNegative()) Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal; else Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2") << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; } else { if (LHSVal.isNegative()) Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal; else Diag(Loc, DL_Error, "left shift of %0 by %1 places cannot be represented in type %2") << LHSVal << RHSVal << Data->LHSType; } } void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_shift_out_of_bounds_abort( ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); Die(); } static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::OutOfBoundsIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Value IndexVal(Data->IndexType, Index); Diag(Loc, DL_Error, "index %0 out of bounds for type %1") << IndexVal << Data->ArrayType; } void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(false); handleOutOfBoundsImpl(Data, Index, Opts); } void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(true); handleOutOfBoundsImpl(Data, Index, Opts); Die(); } static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { ScopedReport R(Opts, Data->Loc, ErrorType::UnreachableCall); Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call"); } void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleBuiltinUnreachableImpl(Data, Opts); Die(); } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { ScopedReport R(Opts, Data->Loc, ErrorType::MissingReturn); Diag(Data->Loc, DL_Error, "execution reached the end of a value-returning function " "without returning a value"); } void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleMissingReturnImpl(Data, Opts); Die(); } static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::NonPositiveVLAIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "variable length array bound evaluates to " "non-positive value %0") << Value(Data->Type, Bound); } void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(false); handleVLABoundNotPositive(Data, Bound, Opts); } void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(true); handleVLABoundNotPositive(Data, Bound, Opts); Die(); } static bool looksLikeFloatCastOverflowDataV1(void *Data) { // First field is either a pointer to filename or a pointer to a // TypeDescriptor. u8 *FilenameOrTypeDescriptor; internal_memcpy(&FilenameOrTypeDescriptor, Data, sizeof(FilenameOrTypeDescriptor)); // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known, // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename, // adding two printable characters will not yield such a value. Otherwise, // if one of them is 0xff, this is most likely TK_Unknown type descriptor. u16 MaybeFromTypeKind = FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1]; return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff || FilenameOrTypeDescriptor[1] == 0xff; } static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, ReportOptions Opts) { SymbolizedStackHolder CallerLoc; Location Loc; const TypeDescriptor *FromType, *ToType; ErrorType ET = ErrorType::FloatCastOverflow; if (looksLikeFloatCastOverflowDataV1(DataPtr)) { auto Data = reinterpret_cast(DataPtr); CallerLoc.reset(getCallerLocation(Opts.pc)); Loc = CallerLoc; FromType = &Data->FromType; ToType = &Data->ToType; } else { auto Data = reinterpret_cast(DataPtr); SourceLocation SLoc = Data->Loc.acquire(); if (ignoreReport(SLoc, Opts, ET)) return; Loc = SLoc; FromType = &Data->FromType; ToType = &Data->ToType; } ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "%0 is outside the range of representable values of type %2") << Value(*FromType, From) << *FromType << *ToType; } void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(false); handleFloatCastOverflow(Data, From, Opts); } void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(true); handleFloatCastOverflow(Data, From, Opts); Die(); } static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); // This check could be more precise if we used different handlers for // -fsanitize=bool and -fsanitize=enum. bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) || (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6)); ErrorType ET = IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "load of value %0, which is not a valid value for type %1") << Value(Data->Type, Val) << Data->Type; } void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(false); handleLoadInvalidValue(Data, Val, Opts); } void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(true); handleLoadInvalidValue(Data, Val, Opts); Die(); } static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, ValueHandle Function, ReportOptions Opts) { SourceLocation CallLoc = Data->Loc.acquire(); ErrorType ET = ErrorType::FunctionTypeMismatch; if (ignoreReport(CallLoc, Opts, ET)) return; ScopedReport R(Opts, CallLoc, ET); SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(CallLoc, DL_Error, "call to function %0 through pointer to incorrect function type %1") << FName << Data->Type; Diag(FLoc, DL_Note, "%0 defined here") << FName; } void __ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(false); handleFunctionTypeMismatch(Data, Function, Opts); } void __ubsan::__ubsan_handle_function_type_mismatch_abort( FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(true); handleFunctionTypeMismatch(Data, Function, Opts); Die(); } static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr, ReportOptions Opts, bool IsAttr) { if (!LocPtr) UNREACHABLE("source location pointer is null!"); SourceLocation Loc = LocPtr->acquire(); ErrorType ET = ErrorType::InvalidNullReturn; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "null pointer returned from function declared to never " "return null"); if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, "%0 specified here") << (IsAttr ? "returns_nonnull attribute" : "_Nonnull return type annotation"); } void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, true); } void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, LocPtr, Opts, false); } void __ubsan::__ubsan_handle_nullability_return_v1_abort( NonNullReturnData *Data, SourceLocation *LocPtr) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, LocPtr, Opts, false); Die(); } static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidNullArgument; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, "%0 specified here") << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation"); } void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, true); } void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, false); } void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, false); Die(); } static void handlePointerOverflowImpl(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::PointerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); - if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) - Diag(Loc, DL_Error, "unsigned pointer index expression result is %0, " - "preceding its base %1") - << (void *)Result << (void *)Base; - else + if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) { + if (Base > Result) + Diag(Loc, DL_Error, "addition of unsigned offset to %0 overflowed to %1") + << (void *)Base << (void *)Result; + else + Diag(Loc, DL_Error, + "subtraction of unsigned offset from %0 overflowed to %1") + << (void *)Base << (void *)Result; + } else { Diag(Loc, DL_Error, "pointer index expression with base %0 overflowed to %1") << (void *)Base << (void *)Result; + } } void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(false); handlePointerOverflowImpl(Data, Base, Result, Opts); } void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data, ValueHandle Base, ValueHandle Result) { GET_REPORT_OPTIONS(true); handlePointerOverflowImpl(Data, Base, Result, Opts); Die(); } static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ReportOptions Opts) { if (Data->CheckKind != CFITCK_ICall) Die(); SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::CFIBadType; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " "indirect function call") << Data->Type; SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(FLoc, DL_Note, "%0 defined here") << FName; } namespace __ubsan { #ifdef UBSAN_CAN_USE_CXXABI SANITIZER_WEAK_ATTRIBUTE void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); #else static void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } #endif } // namespace __ubsan void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(false); if (Data->CheckKind == CFITCK_ICall) handleCFIBadIcall(Data, Value, Opts); else HandleCFIBadType(Data, Value, ValidVtable, Opts); } void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(true); if (Data->CheckKind == CFITCK_ICall) handleCFIBadIcall(Data, Value, Opts); else HandleCFIBadType(Data, Value, ValidVtable, Opts); Die(); } #endif // CAN_SANITIZE_UB diff --git a/test/asan/TestCases/allocator_returns_null.cc b/test/asan/TestCases/allocator_returns_null.cc index 90e25b55e727..8ce002f04d61 100644 --- a/test/asan/TestCases/allocator_returns_null.cc +++ b/test/asan/TestCases/allocator_returns_null.cc @@ -1,121 +1,131 @@ // Test the behavior of malloc/calloc/realloc/new when the allocation size is // more than ASan allocator's max allowed one. // By default (allocator_may_return_null=0) the process should crash. // With allocator_may_return_null=1 the allocator should return 0, except the // operator new(), which should crash anyway (operator new(std::nothrow) should // return nullptr, indeed). // // RUN: %clangxx_asan -O0 %s -o %t // RUN: not %run %t malloc 2>&1 | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mNULL // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cNULL // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coNULL // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rNULL // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrNULL // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnCRASH // RUN: %env_asan_opts=allocator_may_return_null=1 %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnNULL +// UNSUPPORTED: win32 + #include -#include +#include #include #include +#include #include #include int main(int argc, char **argv) { // Disable stderr buffering. Needed on Windows. setvbuf(stderr, NULL, _IONBF, 0); assert(argc == 2); const char *action = argv[1]; fprintf(stderr, "%s:\n", action); static const size_t kMaxAllowedMallocSizePlusOne = #if __LP64__ || defined(_WIN64) (1ULL << 40) + 1; #else (3UL << 30) + 1; #endif void *x = 0; if (!strcmp(action, "malloc")) { x = malloc(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "calloc")) { x = calloc((kMaxAllowedMallocSizePlusOne / 4) + 1, 4); } else if (!strcmp(action, "calloc-overflow")) { volatile size_t kMaxSizeT = std::numeric_limits::max(); size_t kArraySize = 4096; volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; x = calloc(kArraySize, kArraySize2); } else if (!strcmp(action, "realloc")) { x = realloc(0, kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "realloc-after-malloc")) { char *t = (char*)malloc(100); *t = 42; x = realloc(t, kMaxAllowedMallocSizePlusOne); assert(*t == 42); free(t); } else if (!strcmp(action, "new")) { x = operator new(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "new-nothrow")) { x = operator new(kMaxAllowedMallocSizePlusOne, std::nothrow); } else { assert(0); } + fprintf(stderr, "errno: %d\n", errno); + // The NULL pointer is printed differently on different systems, while (long)0 // is always the same. fprintf(stderr, "x: %lx\n", (long)x); free(x); return x != 0; } // CHECK-mCRASH: malloc: // CHECK-mCRASH: AddressSanitizer's allocator is terminating the process // CHECK-cCRASH: calloc: // CHECK-cCRASH: AddressSanitizer's allocator is terminating the process // CHECK-coCRASH: calloc-overflow: // CHECK-coCRASH: AddressSanitizer's allocator is terminating the process // CHECK-rCRASH: realloc: // CHECK-rCRASH: AddressSanitizer's allocator is terminating the process // CHECK-mrCRASH: realloc-after-malloc: // CHECK-mrCRASH: AddressSanitizer's allocator is terminating the process // CHECK-nCRASH: new: // CHECK-nCRASH: AddressSanitizer's allocator is terminating the process // CHECK-nnCRASH: new-nothrow: // CHECK-nnCRASH: AddressSanitizer's allocator is terminating the process // CHECK-mNULL: malloc: +// CHECK-mNULL: errno: 12 // CHECK-mNULL: x: 0 // CHECK-cNULL: calloc: +// CHECK-cNULL: errno: 12 // CHECK-cNULL: x: 0 // CHECK-coNULL: calloc-overflow: +// CHECK-coNULL: errno: 12 // CHECK-coNULL: x: 0 // CHECK-rNULL: realloc: +// CHECK-rNULL: errno: 12 // CHECK-rNULL: x: 0 // CHECK-mrNULL: realloc-after-malloc: +// CHECK-mrNULL: errno: 12 // CHECK-mrNULL: x: 0 // CHECK-nnNULL: new-nothrow: // CHECK-nnNULL: x: 0 diff --git a/test/lsan/TestCases/allocator_returns_null.cc b/test/lsan/TestCases/allocator_returns_null.cc index ab2c734e1e58..28dd696dc673 100644 --- a/test/lsan/TestCases/allocator_returns_null.cc +++ b/test/lsan/TestCases/allocator_returns_null.cc @@ -1,123 +1,131 @@ // Test the behavior of malloc/calloc/realloc/new when the allocation size is // more than LSan allocator's max allowed one. // By default (allocator_may_return_null=0) the process should crash. // With allocator_may_return_null=1 the allocator should return 0, except the // operator new(), which should crash anyway (operator new(std::nothrow) should // return nullptr, indeed). // // RUN: %clangxx_lsan -O0 %s -o %t // RUN: not %run %t malloc 2>&1 | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mNULL // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cNULL // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coNULL // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rNULL // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrNULL // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: %env_lsan_opts=allocator_may_return_null=0 not %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnCRASH // RUN: %env_lsan_opts=allocator_may_return_null=1 %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnNULL #include -#include +#include #include #include +#include #include #include int main(int argc, char **argv) { // Disable stderr buffering. Needed on Windows. setvbuf(stderr, NULL, _IONBF, 0); assert(argc == 2); const char *action = argv[1]; fprintf(stderr, "%s:\n", action); // Use max of ASan and LSan allocator limits to cover both "lsan" and // "lsan + asan" configs. static const size_t kMaxAllowedMallocSizePlusOne = #if __LP64__ || defined(_WIN64) (1ULL << 40) + 1; #else (3UL << 30) + 1; #endif void *x = 0; if (!strcmp(action, "malloc")) { x = malloc(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "calloc")) { x = calloc((kMaxAllowedMallocSizePlusOne / 4) + 1, 4); } else if (!strcmp(action, "calloc-overflow")) { volatile size_t kMaxSizeT = std::numeric_limits::max(); size_t kArraySize = 4096; volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; x = calloc(kArraySize, kArraySize2); } else if (!strcmp(action, "realloc")) { x = realloc(0, kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "realloc-after-malloc")) { char *t = (char*)malloc(100); *t = 42; x = realloc(t, kMaxAllowedMallocSizePlusOne); assert(*t == 42); free(t); } else if (!strcmp(action, "new")) { x = operator new(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "new-nothrow")) { x = operator new(kMaxAllowedMallocSizePlusOne, std::nothrow); } else { assert(0); } + fprintf(stderr, "errno: %d\n", errno); + // The NULL pointer is printed differently on different systems, while (long)0 // is always the same. fprintf(stderr, "x: %zu\n", (size_t)x); free(x); return x != 0; } // CHECK-mCRASH: malloc: // CHECK-mCRASH: Sanitizer's allocator is terminating the process // CHECK-cCRASH: calloc: // CHECK-cCRASH: Sanitizer's allocator is terminating the process // CHECK-coCRASH: calloc-overflow: // CHECK-coCRASH: Sanitizer's allocator is terminating the process // CHECK-rCRASH: realloc: // CHECK-rCRASH: Sanitizer's allocator is terminating the process // CHECK-mrCRASH: realloc-after-malloc: // CHECK-mrCRASH: Sanitizer's allocator is terminating the process // CHECK-nCRASH: new: // CHECK-nCRASH: Sanitizer's allocator is terminating the process // CHECK-nnCRASH: new-nothrow: // CHECK-nnCRASH: Sanitizer's allocator is terminating the process // CHECK-mNULL: malloc: +// CHECK-mNULL: errno: 12 // CHECK-mNULL: x: 0 // CHECK-cNULL: calloc: +// CHECK-cNULL: errno: 12 // CHECK-cNULL: x: 0 // CHECK-coNULL: calloc-overflow: +// CHECK-coNULL: errno: 12 // CHECK-coNULL: x: 0 // CHECK-rNULL: realloc: +// CHECK-rNULL: errno: 12 // CHECK-rNULL: x: 0 // CHECK-mrNULL: realloc-after-malloc: +// CHECK-mrNULL: errno: 12 // CHECK-mrNULL: x: 0 // CHECK-nnNULL: new-nothrow: // CHECK-nnNULL: x: 0 diff --git a/test/msan/allocator_returns_null.cc b/test/msan/allocator_returns_null.cc index 2c7c32d404fc..583b5b4f76be 100644 --- a/test/msan/allocator_returns_null.cc +++ b/test/msan/allocator_returns_null.cc @@ -1,122 +1,131 @@ // Test the behavior of malloc/calloc/realloc/new when the allocation size is // more than MSan allocator's max allowed one. // By default (allocator_may_return_null=0) the process should crash. // With allocator_may_return_null=1 the allocator should return 0, except the // operator new(), which should crash anyway (operator new(std::nothrow) should // return nullptr, indeed). // // RUN: %clangxx_msan -O0 %s -o %t // RUN: not %run %t malloc 2>&1 | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mNULL // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t calloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-cNULL // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t calloc-overflow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-coNULL // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t realloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-rNULL // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t realloc-after-malloc 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-mrNULL // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 not %run %t new 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=0 not %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnCRASH // RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t new-nothrow 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-nnNULL +// UNSUPPORTED: win32 #include -#include +#include #include #include +#include #include #include int main(int argc, char **argv) { // Disable stderr buffering. Needed on Windows. setvbuf(stderr, NULL, _IONBF, 0); assert(argc == 2); const char *action = argv[1]; fprintf(stderr, "%s:\n", action); static const size_t kMaxAllowedMallocSizePlusOne = #if __LP64__ || defined(_WIN64) (8UL << 30) + 1; #else (2UL << 30) + 1; #endif void *x = 0; if (!strcmp(action, "malloc")) { x = malloc(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "calloc")) { x = calloc((kMaxAllowedMallocSizePlusOne / 4) + 1, 4); } else if (!strcmp(action, "calloc-overflow")) { volatile size_t kMaxSizeT = std::numeric_limits::max(); size_t kArraySize = 4096; volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; x = calloc(kArraySize, kArraySize2); } else if (!strcmp(action, "realloc")) { x = realloc(0, kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "realloc-after-malloc")) { char *t = (char*)malloc(100); *t = 42; x = realloc(t, kMaxAllowedMallocSizePlusOne); assert(*t == 42); free(t); } else if (!strcmp(action, "new")) { x = operator new(kMaxAllowedMallocSizePlusOne); } else if (!strcmp(action, "new-nothrow")) { x = operator new(kMaxAllowedMallocSizePlusOne, std::nothrow); } else { assert(0); } + fprintf(stderr, "errno: %d\n", errno); + // The NULL pointer is printed differently on different systems, while (long)0 // is always the same. fprintf(stderr, "x: %lx\n", (long)x); free(x); return x != 0; } // CHECK-mCRASH: malloc: // CHECK-mCRASH: MemorySanitizer's allocator is terminating the process // CHECK-cCRASH: calloc: // CHECK-cCRASH: MemorySanitizer's allocator is terminating the process // CHECK-coCRASH: calloc-overflow: // CHECK-coCRASH: MemorySanitizer's allocator is terminating the process // CHECK-rCRASH: realloc: // CHECK-rCRASH: MemorySanitizer's allocator is terminating the process // CHECK-mrCRASH: realloc-after-malloc: // CHECK-mrCRASH: MemorySanitizer's allocator is terminating the process // CHECK-nCRASH: new: // CHECK-nCRASH: MemorySanitizer's allocator is terminating the process // CHECK-nnCRASH: new-nothrow: // CHECK-nnCRASH: MemorySanitizer's allocator is terminating the process // CHECK-mNULL: malloc: +// CHECK-mNULL: errno: 12 // CHECK-mNULL: x: 0 // CHECK-cNULL: calloc: +// CHECK-cNULL: errno: 12 // CHECK-cNULL: x: 0 // CHECK-coNULL: calloc-overflow: +// CHECK-coNULL: errno: 12 // CHECK-coNULL: x: 0 // CHECK-rNULL: realloc: +// CHECK-rNULL: errno: 12 // CHECK-rNULL: x: 0 // CHECK-mrNULL: realloc-after-malloc: +// CHECK-mrNULL: errno: 12 // CHECK-mrNULL: x: 0 // CHECK-nnNULL: new-nothrow: // CHECK-nnNULL: x: 0 diff --git a/test/scudo/memalign.cpp b/test/scudo/memalign.cpp index 856128f2489f..82c54af8b0e4 100644 --- a/test/scudo/memalign.cpp +++ b/test/scudo/memalign.cpp @@ -1,80 +1,80 @@ // RUN: %clang_scudo %s -o %t // RUN: %run %t valid 2>&1 // RUN: %run %t invalid 2>&1 // Tests that the various aligned allocation functions work as intended. Also // tests for the condition where the alignment is not a power of 2. #include #include #include #include #include // Reduce the size of the quarantine, or the test can run out of aligned memory // on 32-bit for the larger alignments. extern "C" const char *__scudo_default_options() { return "QuarantineSizeMb=1"; } // Sometimes the headers may not have this... extern "C" void *aligned_alloc (size_t alignment, size_t size); int main(int argc, char **argv) { void *p = nullptr; size_t alignment = 1U << 12; size_t size = 1U << 12; int err; assert(argc == 2); if (!strcmp(argv[1], "valid")) { posix_memalign(&p, alignment, size); assert(p); free(p); p = aligned_alloc(alignment, size); assert(p); free(p); // Tests various combinations of alignment and sizes for (int i = (sizeof(void *) == 4) ? 3 : 4; i < 19; i++) { alignment = 1U << i; for (int j = 1; j < 33; j++) { size = 0x800 * j; for (int k = 0; k < 3; k++) { p = memalign(alignment, size - (2 * sizeof(void *) * k)); assert(p); free(p); } } } // For larger alignment, reduce the number of allocations to avoid running // out of potential addresses (on 32-bit). for (int i = 19; i <= 24; i++) { for (int k = 0; k < 3; k++) { p = memalign(alignment, 0x1000 - (2 * sizeof(void *) * k)); assert(p); free(p); } } } if (!strcmp(argv[1], "invalid")) { // Alignment is not a power of 2. p = memalign(alignment - 1, size); assert(!p); // Size is not a multiple of alignment. p = aligned_alloc(alignment, size >> 1); assert(!p); - p = (void *)0x42UL; + void *p_unchanged = (void *)0x42UL; + p = p_unchanged; // Alignment is not a power of 2. err = posix_memalign(&p, 3, size); - assert(!p); + assert(p == p_unchanged); assert(err == EINVAL); - p = (void *)0x42UL; // Alignment is a power of 2, but not a multiple of size(void *). err = posix_memalign(&p, 2, size); - assert(!p); + assert(p == p_unchanged); assert(err == EINVAL); } return 0; } diff --git a/test/tsan/Linux/check_memcpy.cc b/test/tsan/Linux/check_memcpy.cc index 8ad04c07cf51..b81efa42ad52 100644 --- a/test/tsan/Linux/check_memcpy.cc +++ b/test/tsan/Linux/check_memcpy.cc @@ -1,15 +1,17 @@ // Test that verifies TSan runtime doesn't contain compiler-emitted // memcpy/memmove calls. It builds the binary with TSan and passes it to // check_memcpy.sh script. // RUN: %clangxx_tsan -O1 %s -o %t // RUN: llvm-objdump -d %t | FileCheck %s +// REQUIRES: compiler-rt-optimized + int main() { return 0; } // CHECK-NOT: callq {{.*<(__interceptor_)?mem(cpy|set)>}} // tail calls: // CHECK-NOT: jmpq {{.*<(__interceptor_)?mem(cpy|set)>}} diff --git a/test/ubsan/TestCases/Pointer/unsigned-index-expression.cpp b/test/ubsan/TestCases/Pointer/unsigned-index-expression.cpp index 991374b5a676..0002c713f866 100644 --- a/test/ubsan/TestCases/Pointer/unsigned-index-expression.cpp +++ b/test/ubsan/TestCases/Pointer/unsigned-index-expression.cpp @@ -1,13 +1,20 @@ -// RUN: %clangxx -fsanitize=pointer-overflow %s -o %t +// RUN: %clangxx -std=c++11 -fsanitize=pointer-overflow %s -o %t // RUN: %t 2>&1 | FileCheck %s int main(int argc, char *argv[]) { char c; char *p = &c; - unsigned long long offset = -1; + unsigned long long neg_1 = -1; - // CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:15: runtime error: unsigned pointer index expression result is 0x{{.*}}, preceding its base 0x{{.*}} - char *q = p + offset; + // CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:15: runtime error: addition of unsigned offset to 0x{{.*}} overflowed to 0x{{.*}} + char *q = p + neg_1; + + // CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:16: runtime error: subtraction of unsigned offset from 0x{{.*}} overflowed to 0x{{.*}} + char *q1 = p - neg_1; + + // CHECK: unsigned-index-expression.cpp:[[@LINE+2]]:16: runtime error: pointer index expression with base 0x{{0*}} overflowed to 0x{{.*}} + char *n = nullptr; + char *q2 = n - 1ULL; return 0; }