Index: vendor/compiler-rt/dist/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/CMakeLists.txt (revision 320961) @@ -1,271 +1,271 @@ # CMake build for CompilerRT. # # This build assumes that CompilerRT is checked out into the # 'projects/compiler-rt' or 'runtimes/compiler-rt' inside of an LLVM tree. # Standalone build system for CompilerRT is not yet ready. # # An important constraint of the build is that it only produces libraries # based on the ability of the host toolchain to target various platforms. cmake_minimum_required(VERSION 3.4.3) # Check if compiler-rt is built as a standalone project. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR COMPILER_RT_STANDALONE_BUILD) project(CompilerRT C CXX ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) endif() # Add path for custom compiler-rt modules. list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_SOURCE_DIR}/cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules" ) include(base-config-ix) include(CompilerRTUtils) option(COMPILER_RT_BUILD_BUILTINS "Build builtins" ON) mark_as_advanced(COMPILER_RT_BUILD_BUILTINS) option(COMPILER_RT_BUILD_SANITIZERS "Build sanitizers" ON) mark_as_advanced(COMPILER_RT_BUILD_SANITIZERS) option(COMPILER_RT_BUILD_XRAY "Build xray" ON) mark_as_advanced(COMPILER_RT_BUILD_XRAY) set(COMPILER_RT_BAREMETAL_BUILD OFF CACHE BOOLEAN "Build for a bare-metal target.") if (COMPILER_RT_STANDALONE_BUILD) load_llvm_config() # Find Python interpreter. set(Python_ADDITIONAL_VERSIONS 2.7 2.6 2.5) include(FindPythonInterp) if(NOT PYTHONINTERP_FOUND) message(FATAL_ERROR " Unable to find Python interpreter required testing. Please install Python or specify the PYTHON_EXECUTABLE CMake variable.") endif() # Define default arguments to lit. set(LIT_ARGS_DEFAULT "-sv") if (MSVC OR XCODE) set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar") endif() set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit") endif() construct_compiler_rt_default_triple() if ("${COMPILER_RT_DEFAULT_TARGET_ABI}" STREQUAL "androideabi") set(ANDROID 1) endif() set(COMPILER_RT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(COMPILER_RT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) # We support running instrumented tests when we're not cross compiling # and target a UNIX-like system or Windows. # We can run tests on Android even when we are cross-compiling. if(("${CMAKE_HOST_SYSTEM}" STREQUAL "${CMAKE_SYSTEM}" AND (UNIX OR WIN32)) OR ANDROID OR COMPILER_RT_EMULATOR) option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" ON) else() option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" OFF) endif() option(COMPILER_RT_DEBUG "Build runtimes with full debug info" OFF) option(COMPILER_RT_EXTERNALIZE_DEBUGINFO "Generate dSYM files and strip executables and libraries (Darwin Only)" OFF) # COMPILER_RT_DEBUG_PYBOOL is used by lit.common.configured.in. pythonize_bool(COMPILER_RT_DEBUG) include(config-ix) -if(APPLE AND SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.9") +if(APPLE AND SANITIZER_MIN_OSX_VERSION AND SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.9") # Mac OS X prior to 10.9 had problems with exporting symbols from # libc++/libc++abi. set(use_cxxabi_default OFF) elseif(MSVC) set(use_cxxabi_default OFF) else() set(use_cxxabi_default ON) endif() option(SANITIZER_CAN_USE_CXXABI "Sanitizers can use cxxabi" ${use_cxxabi_default}) pythonize_bool(SANITIZER_CAN_USE_CXXABI) #================================ # Setup Compiler Flags #================================ if(MSVC) # Override any existing /W flags with /W4. This is what LLVM does. Failing to # remove other /W[0-4] flags will result in a warning about overriding a # previous flag. if (COMPILER_RT_HAS_W4_FLAG) string(REGEX REPLACE " /W[0-4]" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REGEX REPLACE " /W[0-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") append_string_if(COMPILER_RT_HAS_W4_FLAG /W4 CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() else() append_string_if(COMPILER_RT_HAS_WALL_FLAG -Wall CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() if(COMPILER_RT_ENABLE_WERROR) append_string_if(COMPILER_RT_HAS_WERROR_FLAG -Werror CMAKE_C_FLAGS CMAKE_CXX_FLAGS) append_string_if(COMPILER_RT_HAS_WX_FLAG /WX CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() append_string_if(COMPILER_RT_HAS_STD_CXX11_FLAG -std=c++11 CMAKE_CXX_FLAGS) # Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP. if(NOT COMPILER_RT_HAS_FUNC_SYMBOL) add_definitions(-D__func__=__FUNCTION__) endif() # Provide some common commmandline flags for Sanitizer runtimes. if(NOT WIN32) append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG -fno-exceptions SANITIZER_COMMON_CFLAGS) if(NOT COMPILER_RT_DEBUG AND NOT APPLE) append_list_if(COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG -fomit-frame-pointer SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FUNWIND_TABLES_FLAG -funwind-tables SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG -fno-stack-protector SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_SANITIZE_SAFE_STACK_FLAG -fno-sanitize=safe-stack SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_INLINES_HIDDEN_FLAG -fvisibility-inlines-hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SANITIZER_COMMON_CFLAGS) # The following is a workaround for powerpc64le. This is the only architecture # that requires -fno-function-sections to work properly. If lacking, the ASan # Linux test function-sections-are-bad.cc fails with the following error: # 'undefined symbol: __sanitizer_unaligned_load32'. if(DEFINED TARGET_powerpc64le_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections TARGET_powerpc64le_CFLAGS) endif() if(MSVC) # Replace the /M[DT][d] flags with /MT, and strip any definitions of _DEBUG, # which cause definition mismatches at link time. # FIXME: In fact, sanitizers should support both /MT and /MD, see PR20214. if(COMPILER_RT_HAS_MT_FLAG) foreach(flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) string(REGEX REPLACE "/M[DT]d" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/D_DEBUG" "" ${flag_var} "${${flag_var}}") endforeach() endif() append_list_if(COMPILER_RT_HAS_Oy_FLAG /Oy- SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_GS_FLAG /GS- SANITIZER_COMMON_CFLAGS) # VS 2015 (version 1900) added support for thread safe static initialization. # However, ASan interceptors run before CRT initialization, which causes the # new thread safe code to crash. Disable this feature for now. if (MSVC_VERSION GREATER 1899 OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") list(APPEND SANITIZER_COMMON_CFLAGS /Zc:threadSafeInit-) endif() endif() append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS) # Build with optimization, unless we're in debug mode. If we're using MSVC, # always respect the optimization flags set by CMAKE_BUILD_TYPE instead. if(NOT COMPILER_RT_DEBUG AND NOT MSVC) list(APPEND SANITIZER_COMMON_CFLAGS -O3) endif() # Determine if we should restrict stack frame sizes. # Stack frames on PowerPC and Mips and in debug biuld can be much larger than # anticipated. # FIXME: Fix all sanitizers and add -Wframe-larger-than to # SANITIZER_COMMON_FLAGS if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG AND NOT COMPILER_RT_DEBUG AND NOT ${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "powerpc|mips") set(SANITIZER_LIMIT_FRAME_SIZE TRUE) else() set(SANITIZER_LIMIT_FRAME_SIZE FALSE) endif() # Build sanitizer runtimes with debug info. if(COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG AND NOT COMPILER_RT_DEBUG) list(APPEND SANITIZER_COMMON_CFLAGS -gline-tables-only) elseif(COMPILER_RT_HAS_G_FLAG) list(APPEND SANITIZER_COMMON_CFLAGS -g) elseif(MSVC) # Use /Z7 instead of /Zi for the asan runtime. This avoids the LNK4099 # warning from the MS linker complaining that it can't find the 'vc140.pdb' # file used by our object library compilations. list(APPEND SANITIZER_COMMON_CFLAGS /Z7) llvm_replace_compiler_option(CMAKE_CXX_FLAGS "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_DEBUG "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Z[i7I]" "/Z7") endif() if(LLVM_ENABLE_MODULES) # Sanitizers cannot be built with -fmodules. The interceptors intentionally # don't include system headers, which is incompatible with modules. list(APPEND SANITIZER_COMMON_CFLAGS -fno-modules) endif() # Turn off several warnings. append_list_if(COMPILER_RT_HAS_WGNU_FLAG -Wno-gnu SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG -Wno-c99-extensions SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WNON_VIRTUAL_DTOR_FLAG -Wno-non-virtual-dtor SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4146_FLAG /wd4146 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4291_FLAG /wd4291 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4391_FLAG /wd4391 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4722_FLAG /wd4722 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4800_FLAG /wd4800 SANITIZER_COMMON_CFLAGS) # Warnings to turn off for all libraries, not just sanitizers. append_string_if(COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG -Wno-unused-parameter CMAKE_C_FLAGS CMAKE_CXX_FLAGS) if (CMAKE_LINKER MATCHES "link.exe$") # Silence MSVC linker warnings caused by empty object files. The # sanitizer libraries intentionally use ifdefs that result in empty # files, rather than skipping these files in the build system. # Ideally, we would pass this flag only for the libraries that need # it, but CMake doesn't seem to have a way to set linker flags for # individual static libraries, so we enable the suppression flag for # the whole compiler-rt project. append("/IGNORE:4221" CMAKE_STATIC_LINKER_FLAGS) endif() add_subdirectory(include) set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/projects/libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/../libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_HAS_LIBCXX_SOURCES FALSE) endif() endif() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/tools/lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/../lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_HAS_LLD FALSE) endif() endif() pythonize_bool(COMPILER_RT_HAS_LLD) add_subdirectory(lib) if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(unittests) add_subdirectory(test) endif() Index: vendor/compiler-rt/dist/cmake/Modules/CompilerRTDarwinUtils.cmake =================================================================== --- vendor/compiler-rt/dist/cmake/Modules/CompilerRTDarwinUtils.cmake (revision 320960) +++ vendor/compiler-rt/dist/cmake/Modules/CompilerRTDarwinUtils.cmake (revision 320961) @@ -1,444 +1,454 @@ include(CMakeParseArguments) # On OS X SDKs can be installed anywhere on the base system and xcode-select can # set the default Xcode to use. This function finds the SDKs that are present in # the current Xcode. function(find_darwin_sdk_dir var sdk_name) - # Let's first try the internal SDK, otherwise use the public SDK. - execute_process( - COMMAND xcodebuild -version -sdk ${sdk_name}.internal Path - RESULT_VARIABLE result_process - OUTPUT_VARIABLE var_internal - OUTPUT_STRIP_TRAILING_WHITESPACE - ERROR_FILE /dev/null - ) + set(DARWIN_${sdk_name}_CACHED_SYSROOT "" CACHE STRING "Darwin SDK path for SDK ${sdk_name}.") + set(DARWIN_PREFER_PUBLIC_SDK OFF CACHE BOOL "Prefer Darwin public SDK, even when an internal SDK is present.") + + if(DARWIN_${sdk_name}_CACHED_SYSROOT) + set(${var} ${DARWIN_${sdk_name}_CACHED_SYSROOT} PARENT_SCOPE) + return() + endif() + if(NOT DARWIN_PREFER_PUBLIC_SDK) + # Let's first try the internal SDK, otherwise use the public SDK. + execute_process( + COMMAND xcodebuild -version -sdk ${sdk_name}.internal Path + RESULT_VARIABLE result_process + OUTPUT_VARIABLE var_internal + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_FILE /dev/null + ) + endif() if((NOT result_process EQUAL 0) OR "" STREQUAL "${var_internal}") execute_process( COMMAND xcodebuild -version -sdk ${sdk_name} Path RESULT_VARIABLE result_process OUTPUT_VARIABLE var_internal OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_FILE /dev/null ) else() set(${var}_INTERNAL ${var_internal} PARENT_SCOPE) endif() if(result_process EQUAL 0) set(${var} ${var_internal} PARENT_SCOPE) endif() + set(DARWIN_${sdk_name}_CACHED_SYSROOT ${var_internal} CACHE STRING "Darwin SDK path for SDK ${sdk_name}." FORCE) endfunction() # There isn't a clear mapping of what architectures are supported with a given # target platform, but ld's version output does list the architectures it can # link for. function(darwin_get_toolchain_supported_archs output_var) execute_process( COMMAND ld -v ERROR_VARIABLE LINKER_VERSION) string(REGEX MATCH "configured to support archs: ([^\n]+)" ARCHES_MATCHED "${LINKER_VERSION}") if(ARCHES_MATCHED) set(ARCHES "${CMAKE_MATCH_1}") message(STATUS "Got ld supported ARCHES: ${ARCHES}") string(REPLACE " " ";" ARCHES ${ARCHES}) else() # If auto-detecting fails, fall back to a default set message(WARNING "Detecting supported architectures from 'ld -v' failed. Returning default set.") set(ARCHES "i386;x86_64;armv7;armv7s;arm64") endif() set(${output_var} ${ARCHES} PARENT_SCOPE) endfunction() # This function takes an OS and a list of architectures and identifies the # subset of the architectures list that the installed toolchain can target. function(darwin_test_archs os valid_archs) if(${valid_archs}) message(STATUS "Using cached valid architectures for ${os}.") return() endif() set(archs ${ARGN}) if(NOT TEST_COMPILE_ONLY) message(STATUS "Finding valid architectures for ${os}...") set(SIMPLE_C ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/src.c) file(WRITE ${SIMPLE_C} "#include \nint main() { printf(__FILE__); return 0; }\n") set(os_linker_flags) foreach(flag ${DARWIN_${os}_LINK_FLAGS}) set(os_linker_flags "${os_linker_flags} ${flag}") endforeach() endif() # The simple program will build for x86_64h on the simulator because it is # compatible with x86_64 libraries (mostly), but since x86_64h isn't actually # a valid or useful architecture for the iOS simulator we should drop it. if(${os} MATCHES "^(iossim|tvossim|watchossim)$") list(REMOVE_ITEM archs "x86_64h") endif() set(working_archs) foreach(arch ${archs}) set(arch_linker_flags "-arch ${arch} ${os_linker_flags}") if(TEST_COMPILE_ONLY) try_compile_only(CAN_TARGET_${os}_${arch} -v -arch ${arch} ${DARWIN_${os}_CFLAGS}) else() set(SAVED_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS}) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${arch_linker_flags}") try_compile(CAN_TARGET_${os}_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_C} COMPILE_DEFINITIONS "-v -arch ${arch}" ${DARWIN_${os}_CFLAGS} OUTPUT_VARIABLE TEST_OUTPUT) set(CMAKE_EXE_LINKER_FLAGS ${SAVED_CMAKE_EXE_LINKER_FLAGS}) endif() if(${CAN_TARGET_${os}_${arch}}) list(APPEND working_archs ${arch}) else() file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log "Testing compiler for supporting ${os}-${arch}:\n" "${TEST_OUTPUT}\n") endif() endforeach() set(${valid_archs} ${working_archs} CACHE STRING "List of valid architectures for platform ${os}.") endfunction() # This function checks the host cpusubtype to see if it is post-haswell. Haswell # and later machines can run x86_64h binaries. Haswell is cpusubtype 8. function(darwin_filter_host_archs input output) list_intersect(tmp_var DARWIN_osx_ARCHS ${input}) execute_process( COMMAND sysctl hw.cpusubtype OUTPUT_VARIABLE SUBTYPE) string(REGEX MATCH "hw.cpusubtype: ([0-9]*)" SUBTYPE_MATCHED "${SUBTYPE}") set(HASWELL_SUPPORTED Off) if(SUBTYPE_MATCHED) if(${CMAKE_MATCH_1} GREATER 7) set(HASWELL_SUPPORTED On) endif() endif() if(NOT HASWELL_SUPPORTED) list(REMOVE_ITEM tmp_var x86_64h) endif() set(${output} ${tmp_var} PARENT_SCOPE) endfunction() # Read and process the exclude file into a list of symbols function(darwin_read_list_from_file output_var file) if(EXISTS ${file}) file(READ ${file} EXCLUDES) string(REPLACE "\n" ";" EXCLUDES ${EXCLUDES}) set(${output_var} ${EXCLUDES} PARENT_SCOPE) endif() endfunction() # this function takes an OS, architecture and minimum version and provides a # list of builtin functions to exclude function(darwin_find_excluded_builtins_list output_var) cmake_parse_arguments(LIB "" "OS;ARCH;MIN_VERSION" "" ${ARGN}) if(NOT LIB_OS OR NOT LIB_ARCH) message(FATAL_ERROR "Must specify OS and ARCH to darwin_find_excluded_builtins_list!") endif() darwin_read_list_from_file(${LIB_OS}_BUILTINS ${DARWIN_EXCLUDE_DIR}/${LIB_OS}.txt) darwin_read_list_from_file(${LIB_OS}_${LIB_ARCH}_BASE_BUILTINS ${DARWIN_EXCLUDE_DIR}/${LIB_OS}-${LIB_ARCH}.txt) if(LIB_MIN_VERSION) file(GLOB builtin_lists ${DARWIN_EXCLUDE_DIR}/${LIB_OS}*-${LIB_ARCH}.txt) foreach(builtin_list ${builtin_lists}) string(REGEX MATCH "${LIB_OS}([0-9\\.]*)-${LIB_ARCH}.txt" VERSION_MATCHED "${builtin_list}") if (VERSION_MATCHED AND NOT CMAKE_MATCH_1 VERSION_LESS LIB_MIN_VERSION) if(NOT smallest_version) set(smallest_version ${CMAKE_MATCH_1}) elseif(CMAKE_MATCH_1 VERSION_LESS smallest_version) set(smallest_version ${CMAKE_MATCH_1}) endif() endif() endforeach() if(smallest_version) darwin_read_list_from_file(${LIB_ARCH}_${LIB_OS}_BUILTINS ${DARWIN_EXCLUDE_DIR}/${LIB_OS}${smallest_version}-${LIB_ARCH}.txt) endif() endif() set(${output_var} ${${LIB_ARCH}_${LIB_OS}_BUILTINS} ${${LIB_OS}_${LIB_ARCH}_BASE_BUILTINS} ${${LIB_OS}_BUILTINS} PARENT_SCOPE) endfunction() # adds a single builtin library for a single OS & ARCH macro(darwin_add_builtin_library name suffix) cmake_parse_arguments(LIB "" "PARENT_TARGET;OS;ARCH" "SOURCES;CFLAGS;DEFS" ${ARGN}) set(libname "${name}.${suffix}_${LIB_ARCH}_${LIB_OS}") add_library(${libname} STATIC ${LIB_SOURCES}) if(DARWIN_${LIB_OS}_SYSROOT) set(sysroot_flag -isysroot ${DARWIN_${LIB_OS}_SYSROOT}) endif() set_target_compile_flags(${libname} ${sysroot_flag} ${DARWIN_${LIB_OS}_BUILTIN_MIN_VER_FLAG} ${LIB_CFLAGS}) set_property(TARGET ${libname} APPEND PROPERTY COMPILE_DEFINITIONS ${LIB_DEFS}) set_target_properties(${libname} PROPERTIES OUTPUT_NAME ${libname}${COMPILER_RT_OS_SUFFIX}) set_target_properties(${libname} PROPERTIES OSX_ARCHITECTURES ${LIB_ARCH}) if(LIB_PARENT_TARGET) add_dependencies(${LIB_PARENT_TARGET} ${libname}) endif() list(APPEND ${LIB_OS}_${suffix}_libs ${libname}) list(APPEND ${LIB_OS}_${suffix}_lipo_flags -arch ${arch} $) endmacro() function(darwin_lipo_libs name) cmake_parse_arguments(LIB "" "PARENT_TARGET;OUTPUT_DIR;INSTALL_DIR" "LIPO_FLAGS;DEPENDS" ${ARGN}) if(LIB_DEPENDS AND LIB_LIPO_FLAGS) add_custom_command(OUTPUT ${LIB_OUTPUT_DIR}/lib${name}.a COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR} COMMAND lipo -output ${LIB_OUTPUT_DIR}/lib${name}.a -create ${LIB_LIPO_FLAGS} DEPENDS ${LIB_DEPENDS} ) add_custom_target(${name} DEPENDS ${LIB_OUTPUT_DIR}/lib${name}.a) add_dependencies(${LIB_PARENT_TARGET} ${name}) install(FILES ${LIB_OUTPUT_DIR}/lib${name}.a DESTINATION ${LIB_INSTALL_DIR}) else() message(WARNING "Not generating lipo target for ${name} because no input libraries exist.") endif() endfunction() # Filter out generic versions of routines that are re-implemented in # architecture specific manner. This prevents multiple definitions of the # same symbols, making the symbol selection non-deterministic. function(darwin_filter_builtin_sources output_var exclude_or_include excluded_list) if(exclude_or_include STREQUAL "EXCLUDE") set(filter_action GREATER) set(filter_value -1) elseif(exclude_or_include STREQUAL "INCLUDE") set(filter_action LESS) set(filter_value 0) else() message(FATAL_ERROR "darwin_filter_builtin_sources called without EXCLUDE|INCLUDE") endif() set(intermediate ${ARGN}) foreach (_file ${intermediate}) get_filename_component(_name_we ${_file} NAME_WE) list(FIND ${excluded_list} ${_name_we} _found) if(_found ${filter_action} ${filter_value}) list(REMOVE_ITEM intermediate ${_file}) elseif(${_file} MATCHES ".*/.*\\.S" OR ${_file} MATCHES ".*/.*\\.c") get_filename_component(_name ${_file} NAME) string(REPLACE ".S" ".c" _cname "${_name}") list(REMOVE_ITEM intermediate ${_cname}) endif () endforeach () set(${output_var} ${intermediate} PARENT_SCOPE) endfunction() # Generates builtin libraries for all operating systems specified in ARGN. Each # OS library is constructed by lipo-ing together single-architecture libraries. macro(darwin_add_builtin_libraries) set(DARWIN_EXCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Darwin-excludes) set(CFLAGS "-fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer") set(CMAKE_C_FLAGS "") set(CMAKE_CXX_FLAGS "") set(CMAKE_ASM_FLAGS "") set(PROFILE_SOURCES ../profile/InstrProfiling ../profile/InstrProfilingBuffer ../profile/InstrProfilingPlatformDarwin ../profile/InstrProfilingWriter) foreach (os ${ARGN}) list_intersect(DARWIN_BUILTIN_ARCHS DARWIN_${os}_ARCHS BUILTIN_SUPPORTED_ARCH) foreach (arch ${DARWIN_BUILTIN_ARCHS}) darwin_find_excluded_builtins_list(${arch}_${os}_EXCLUDED_BUILTINS OS ${os} ARCH ${arch} MIN_VERSION ${DARWIN_${os}_BUILTIN_MIN_VER}) darwin_filter_builtin_sources(filtered_sources EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS ${${arch}_SOURCES}) darwin_add_builtin_library(clang_rt builtins OS ${os} ARCH ${arch} SOURCES ${filtered_sources} CFLAGS ${CFLAGS} -arch ${arch} PARENT_TARGET builtins) endforeach() # Don't build cc_kext libraries for simulator platforms if(NOT DARWIN_${os}_SKIP_CC_KEXT) foreach (arch ${DARWIN_BUILTIN_ARCHS}) # By not specifying MIN_VERSION this only reads the OS and OS-arch lists. # We don't want to filter out the builtins that are present in libSystem # because kexts can't link libSystem. darwin_find_excluded_builtins_list(${arch}_${os}_EXCLUDED_BUILTINS OS ${os} ARCH ${arch}) darwin_filter_builtin_sources(filtered_sources EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS ${${arch}_SOURCES}) # In addition to the builtins cc_kext includes some profile sources darwin_add_builtin_library(clang_rt cc_kext OS ${os} ARCH ${arch} SOURCES ${filtered_sources} ${PROFILE_SOURCES} CFLAGS ${CFLAGS} -arch ${arch} -mkernel DEFS KERNEL_USE PARENT_TARGET builtins) endforeach() set(archive_name clang_rt.cc_kext_${os}) if(${os} STREQUAL "osx") set(archive_name clang_rt.cc_kext) endif() darwin_lipo_libs(${archive_name} PARENT_TARGET builtins LIPO_FLAGS ${${os}_cc_kext_lipo_flags} DEPENDS ${${os}_cc_kext_libs} OUTPUT_DIR ${COMPILER_RT_LIBRARY_OUTPUT_DIR} INSTALL_DIR ${COMPILER_RT_LIBRARY_INSTALL_DIR}) endif() endforeach() # We put the x86 sim slices into the archives for their base OS foreach (os ${ARGN}) if(NOT ${os} MATCHES ".*sim$") darwin_lipo_libs(clang_rt.${os} PARENT_TARGET builtins LIPO_FLAGS ${${os}_builtins_lipo_flags} ${${os}sim_builtins_lipo_flags} DEPENDS ${${os}_builtins_libs} ${${os}sim_builtins_libs} OUTPUT_DIR ${COMPILER_RT_LIBRARY_OUTPUT_DIR} INSTALL_DIR ${COMPILER_RT_LIBRARY_INSTALL_DIR}) endif() endforeach() darwin_add_embedded_builtin_libraries() endmacro() macro(darwin_add_embedded_builtin_libraries) # this is a hacky opt-out. If you can't target both intel and arm # architectures we bail here. set(DARWIN_SOFT_FLOAT_ARCHS armv6m armv7m armv7em armv7) set(DARWIN_HARD_FLOAT_ARCHS armv7em armv7) if(COMPILER_RT_SUPPORTED_ARCH MATCHES ".*armv.*") list(FIND COMPILER_RT_SUPPORTED_ARCH i386 i386_idx) if(i386_idx GREATER -1) list(APPEND DARWIN_HARD_FLOAT_ARCHS i386) endif() list(FIND COMPILER_RT_SUPPORTED_ARCH x86_64 x86_64_idx) if(x86_64_idx GREATER -1) list(APPEND DARWIN_HARD_FLOAT_ARCHS x86_64) endif() set(MACHO_SYM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/macho_embedded) set(CFLAGS "-Oz -Wall -fomit-frame-pointer -ffreestanding") set(CMAKE_C_FLAGS "") set(CMAKE_CXX_FLAGS "") set(CMAKE_ASM_FLAGS "") set(SOFT_FLOAT_FLAG -mfloat-abi=soft) set(HARD_FLOAT_FLAG -mfloat-abi=hard) set(ENABLE_PIC Off) set(PIC_FLAG -fPIC) set(STATIC_FLAG -static) set(DARWIN_macho_embedded_ARCHS armv6m armv7m armv7em armv7 i386 x86_64) set(DARWIN_macho_embedded_LIBRARY_OUTPUT_DIR ${COMPILER_RT_OUTPUT_DIR}/lib/macho_embedded) set(DARWIN_macho_embedded_LIBRARY_INSTALL_DIR ${COMPILER_RT_INSTALL_PATH}/lib/macho_embedded) set(CFLAGS_armv7 "-target thumbv7-apple-darwin-eabi") set(CFLAGS_i386 "-march=pentium") darwin_read_list_from_file(common_FUNCTIONS ${MACHO_SYM_DIR}/common.txt) darwin_read_list_from_file(thumb2_FUNCTIONS ${MACHO_SYM_DIR}/thumb2.txt) darwin_read_list_from_file(thumb2_64_FUNCTIONS ${MACHO_SYM_DIR}/thumb2-64.txt) darwin_read_list_from_file(arm_FUNCTIONS ${MACHO_SYM_DIR}/arm.txt) darwin_read_list_from_file(i386_FUNCTIONS ${MACHO_SYM_DIR}/i386.txt) set(armv6m_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS}) set(armv7m_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS}) set(armv7em_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS}) set(armv7_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS} ${thumb2_64_FUNCTIONS}) set(i386_FUNCTIONS ${common_FUNCTIONS} ${i386_FUNCTIONS}) set(x86_64_FUNCTIONS ${common_FUNCTIONS}) foreach(arch ${DARWIN_macho_embedded_ARCHS}) darwin_filter_builtin_sources(${arch}_filtered_sources INCLUDE ${arch}_FUNCTIONS ${${arch}_SOURCES}) if(NOT ${arch}_filtered_sources) message("${arch}_SOURCES: ${${arch}_SOURCES}") message("${arch}_FUNCTIONS: ${${arch}_FUNCTIONS}") message(FATAL_ERROR "Empty filtered sources!") endif() endforeach() foreach(float_type SOFT HARD) foreach(type PIC STATIC) string(TOLOWER "${float_type}_${type}" lib_suffix) foreach(arch ${DARWIN_${float_type}_FLOAT_ARCHS}) set(DARWIN_macho_embedded_SYSROOT ${DARWIN_osx_SYSROOT}) set(float_flag) if(${arch} MATCHES "^arm") # x86 targets are hard float by default, but the complain about the # float ABI flag, so don't pass it unless we're targeting arm. set(float_flag ${${float_type}_FLOAT_FLAG}) endif() darwin_add_builtin_library(clang_rt ${lib_suffix} OS macho_embedded ARCH ${arch} SOURCES ${${arch}_filtered_sources} CFLAGS ${CFLAGS} -arch ${arch} ${${type}_FLAG} ${float_flag} ${CFLAGS_${arch}} PARENT_TARGET builtins) endforeach() foreach(lib ${macho_embedded_${lib_suffix}_libs}) set_target_properties(${lib} PROPERTIES LINKER_LANGUAGE C) endforeach() darwin_lipo_libs(clang_rt.${lib_suffix} PARENT_TARGET builtins LIPO_FLAGS ${macho_embedded_${lib_suffix}_lipo_flags} DEPENDS ${macho_embedded_${lib_suffix}_libs} OUTPUT_DIR ${DARWIN_macho_embedded_LIBRARY_OUTPUT_DIR} INSTALL_DIR ${DARWIN_macho_embedded_LIBRARY_INSTALL_DIR}) endforeach() endforeach() endif() endmacro() Index: vendor/compiler-rt/dist/cmake/config-ix.cmake =================================================================== --- vendor/compiler-rt/dist/cmake/config-ix.cmake (revision 320960) +++ vendor/compiler-rt/dist/cmake/config-ix.cmake (revision 320961) @@ -1,546 +1,545 @@ include(CMakePushCheckState) include(CheckCXXCompilerFlag) include(CheckLibraryExists) include(CheckSymbolExists) include(TestBigEndian) function(check_linker_flag flag out_var) cmake_push_check_state() set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${flag}") check_cxx_compiler_flag("" ${out_var}) cmake_pop_check_state() endfunction() # CodeGen options. check_cxx_compiler_flag(-fPIC COMPILER_RT_HAS_FPIC_FLAG) check_cxx_compiler_flag(-fPIE COMPILER_RT_HAS_FPIE_FLAG) check_cxx_compiler_flag(-fno-builtin COMPILER_RT_HAS_FNO_BUILTIN_FLAG) check_cxx_compiler_flag(-fno-exceptions COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG) check_cxx_compiler_flag(-fomit-frame-pointer COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG) check_cxx_compiler_flag(-funwind-tables COMPILER_RT_HAS_FUNWIND_TABLES_FLAG) check_cxx_compiler_flag(-fno-stack-protector COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG) check_cxx_compiler_flag(-fno-sanitize=safe-stack COMPILER_RT_HAS_FNO_SANITIZE_SAFE_STACK_FLAG) check_cxx_compiler_flag(-fvisibility=hidden COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG) check_cxx_compiler_flag(-frtti COMPILER_RT_HAS_FRTTI_FLAG) check_cxx_compiler_flag(-fno-rtti COMPILER_RT_HAS_FNO_RTTI_FLAG) check_cxx_compiler_flag(-ffreestanding COMPILER_RT_HAS_FFREESTANDING_FLAG) check_cxx_compiler_flag("-Werror -fno-function-sections" COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG) check_cxx_compiler_flag(-std=c++11 COMPILER_RT_HAS_STD_CXX11_FLAG) check_cxx_compiler_flag(-ftls-model=initial-exec COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC) check_cxx_compiler_flag(-fno-lto COMPILER_RT_HAS_FNO_LTO_FLAG) check_cxx_compiler_flag("-Werror -msse3" COMPILER_RT_HAS_MSSE3_FLAG) check_cxx_compiler_flag("-Werror -msse4.2" COMPILER_RT_HAS_MSSE4_2_FLAG) check_cxx_compiler_flag(--sysroot=. COMPILER_RT_HAS_SYSROOT_FLAG) check_cxx_compiler_flag("-Werror -mcrc" COMPILER_RT_HAS_MCRC_FLAG) if(NOT WIN32 AND NOT CYGWIN) # MinGW warns if -fvisibility-inlines-hidden is used. check_cxx_compiler_flag("-fvisibility-inlines-hidden" COMPILER_RT_HAS_FVISIBILITY_INLINES_HIDDEN_FLAG) endif() check_cxx_compiler_flag(/GR COMPILER_RT_HAS_GR_FLAG) check_cxx_compiler_flag(/GS COMPILER_RT_HAS_GS_FLAG) check_cxx_compiler_flag(/MT COMPILER_RT_HAS_MT_FLAG) check_cxx_compiler_flag(/Oy COMPILER_RT_HAS_Oy_FLAG) # Debug info flags. check_cxx_compiler_flag(-gline-tables-only COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG) check_cxx_compiler_flag(-g COMPILER_RT_HAS_G_FLAG) check_cxx_compiler_flag(/Zi COMPILER_RT_HAS_Zi_FLAG) # Warnings. check_cxx_compiler_flag(-Wall COMPILER_RT_HAS_WALL_FLAG) check_cxx_compiler_flag(-Werror COMPILER_RT_HAS_WERROR_FLAG) check_cxx_compiler_flag("-Werror -Wframe-larger-than=512" COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG) check_cxx_compiler_flag("-Werror -Wglobal-constructors" COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG) check_cxx_compiler_flag("-Werror -Wc99-extensions" COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG) check_cxx_compiler_flag("-Werror -Wgnu" COMPILER_RT_HAS_WGNU_FLAG) check_cxx_compiler_flag("-Werror -Wnon-virtual-dtor" COMPILER_RT_HAS_WNON_VIRTUAL_DTOR_FLAG) check_cxx_compiler_flag("-Werror -Wvariadic-macros" COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG) check_cxx_compiler_flag("-Werror -Wunused-parameter" COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG) check_cxx_compiler_flag("-Werror -Wcovered-switch-default" COMPILER_RT_HAS_WCOVERED_SWITCH_DEFAULT_FLAG) check_cxx_compiler_flag(/W4 COMPILER_RT_HAS_W4_FLAG) check_cxx_compiler_flag(/WX COMPILER_RT_HAS_WX_FLAG) check_cxx_compiler_flag(/wd4146 COMPILER_RT_HAS_WD4146_FLAG) check_cxx_compiler_flag(/wd4291 COMPILER_RT_HAS_WD4291_FLAG) check_cxx_compiler_flag(/wd4221 COMPILER_RT_HAS_WD4221_FLAG) check_cxx_compiler_flag(/wd4391 COMPILER_RT_HAS_WD4391_FLAG) check_cxx_compiler_flag(/wd4722 COMPILER_RT_HAS_WD4722_FLAG) check_cxx_compiler_flag(/wd4800 COMPILER_RT_HAS_WD4800_FLAG) # Symbols. check_symbol_exists(__func__ "" COMPILER_RT_HAS_FUNC_SYMBOL) # Libraries. check_library_exists(c fopen "" COMPILER_RT_HAS_LIBC) check_library_exists(dl dlopen "" COMPILER_RT_HAS_LIBDL) check_library_exists(rt shm_open "" COMPILER_RT_HAS_LIBRT) check_library_exists(m pow "" COMPILER_RT_HAS_LIBM) check_library_exists(pthread pthread_create "" COMPILER_RT_HAS_LIBPTHREAD) check_library_exists(stdc++ __cxa_throw "" COMPILER_RT_HAS_LIBSTDCXX) # Linker flags. if(ANDROID) check_linker_flag("-Wl,-z,global" COMPILER_RT_HAS_Z_GLOBAL) check_library_exists(log __android_log_write "" COMPILER_RT_HAS_LIBLOG) endif() # Architectures. # List of all architectures we can target. set(COMPILER_RT_SUPPORTED_ARCH) # Try to compile a very simple source file to ensure we can target the given # platform. We use the results of these tests to build only the various target # runtime libraries supported by our current compilers cross-compiling # abilities. set(SIMPLE_SOURCE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple.cc) file(WRITE ${SIMPLE_SOURCE} "#include \n#include \nint main() { printf(\"hello, world\"); }\n") # Detect whether the current target platform is 32-bit or 64-bit, and setup # the correct commandline flags needed to attempt to target 32-bit and 64-bit. if (NOT CMAKE_SIZEOF_VOID_P EQUAL 4 AND NOT CMAKE_SIZEOF_VOID_P EQUAL 8) message(FATAL_ERROR "Please use architecture with 4 or 8 byte pointers.") endif() test_targets() # Returns a list of architecture specific target cflags in @out_var list. function(get_target_flags_for_arch arch out_var) list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX) if(ARCH_INDEX EQUAL -1) message(FATAL_ERROR "Unsupported architecture: ${arch}") else() if (NOT APPLE) set(${out_var} ${TARGET_${arch}_CFLAGS} PARENT_SCOPE) else() # This is only called in constructing cflags for tests executing on the # host. This will need to all be cleaned up to support building tests # for cross-targeted hardware (i.e. iOS). set(${out_var} -arch ${arch} PARENT_SCOPE) endif() endif() endfunction() # Returns a compiler and CFLAGS that should be used to run tests for the # specific architecture. When cross-compiling, this is controled via # COMPILER_RT_TEST_COMPILER and COMPILER_RT_TEST_COMPILER_CFLAGS. macro(get_test_cc_for_arch arch cc_out cflags_out) if(ANDROID OR ${arch} MATCHES "arm|aarch64") # This is only true if we are cross-compiling. # Build all tests with host compiler and use host tools. set(${cc_out} ${COMPILER_RT_TEST_COMPILER}) set(${cflags_out} ${COMPILER_RT_TEST_COMPILER_CFLAGS}) else() get_target_flags_for_arch(${arch} ${cflags_out}) if(APPLE) list(APPEND ${cflags_out} ${DARWIN_osx_CFLAGS}) endif() string(REPLACE ";" " " ${cflags_out} "${${cflags_out}}") endif() endmacro() set(ARM64 aarch64) set(ARM32 arm armhf) set(X86 i386 i686) set(X86_64 x86_64) set(MIPS32 mips mipsel) set(MIPS64 mips64 mips64el) set(PPC64 powerpc64 powerpc64le) set(S390X s390x) set(WASM32 wasm32) set(WASM64 wasm64) if(APPLE) set(ARM64 arm64) set(ARM32 armv7 armv7s armv7k) set(X86_64 x86_64 x86_64h) endif() set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X}) set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X}) set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) if(APPLE) set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64}) else() set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32}) endif() set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64} ${MIPS32} ${MIPS64} ${S390X}) set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X}) set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64}) set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) if(APPLE) include(CompilerRTDarwinUtils) find_darwin_sdk_dir(DARWIN_osx_SYSROOT macosx) find_darwin_sdk_dir(DARWIN_iossim_SYSROOT iphonesimulator) find_darwin_sdk_dir(DARWIN_ios_SYSROOT iphoneos) find_darwin_sdk_dir(DARWIN_watchossim_SYSROOT watchsimulator) find_darwin_sdk_dir(DARWIN_watchos_SYSROOT watchos) find_darwin_sdk_dir(DARWIN_tvossim_SYSROOT appletvsimulator) find_darwin_sdk_dir(DARWIN_tvos_SYSROOT appletvos) if(NOT DARWIN_osx_SYSROOT) if(EXISTS /usr/include) set(DARWIN_osx_SYSROOT /) else() message(ERROR "Could not detect OS X Sysroot. Either install Xcode or the Apple Command Line Tools") endif() endif() if(COMPILER_RT_ENABLE_IOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS ios) set(DARWIN_ios_MIN_VER_FLAG -miphoneos-version-min) set(DARWIN_ios_SANITIZER_MIN_VER_FLAG ${DARWIN_ios_MIN_VER_FLAG}=8.0) endif() if(COMPILER_RT_ENABLE_WATCHOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS watchos) set(DARWIN_watchos_MIN_VER_FLAG -mwatchos-version-min) set(DARWIN_watchos_SANITIZER_MIN_VER_FLAG ${DARWIN_watchos_MIN_VER_FLAG}=2.0) endif() if(COMPILER_RT_ENABLE_TVOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS tvos) set(DARWIN_tvos_MIN_VER_FLAG -mtvos-version-min) set(DARWIN_tvos_SANITIZER_MIN_VER_FLAG ${DARWIN_tvos_MIN_VER_FLAG}=9.0) endif() # Note: In order to target x86_64h on OS X the minimum deployment target must # be 10.8 or higher. set(SANITIZER_COMMON_SUPPORTED_OS osx) set(PROFILE_SUPPORTED_OS osx) set(TSAN_SUPPORTED_OS osx) if(NOT SANITIZER_MIN_OSX_VERSION) string(REGEX MATCH "-mmacosx-version-min=([.0-9]+)" MACOSX_VERSION_MIN_FLAG "${CMAKE_CXX_FLAGS}") if(MACOSX_VERSION_MIN_FLAG) set(SANITIZER_MIN_OSX_VERSION "${CMAKE_MATCH_1}") elseif(CMAKE_OSX_DEPLOYMENT_TARGET) set(SANITIZER_MIN_OSX_VERSION ${CMAKE_OSX_DEPLOYMENT_TARGET}) else() set(SANITIZER_MIN_OSX_VERSION 10.9) endif() if(SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.7") message(FATAL_ERROR "macOS deployment target '${SANITIZER_MIN_OSX_VERSION}' is too old.") endif() if(SANITIZER_MIN_OSX_VERSION VERSION_GREATER "10.9") message(WARNING "macOS deployment target '${SANITIZER_MIN_OSX_VERSION}' is too new, setting to '10.9' instead.") set(SANITIZER_MIN_OSX_VERSION 10.9) endif() endif() # We're setting the flag manually for each target OS set(CMAKE_OSX_DEPLOYMENT_TARGET "") set(DARWIN_COMMON_CFLAGS -stdlib=libc++) set(DARWIN_COMMON_LINK_FLAGS -stdlib=libc++ -lc++ -lc++abi) check_linker_flag("-fapplication-extension" COMPILER_RT_HAS_APP_EXTENSION) if(COMPILER_RT_HAS_APP_EXTENSION) list(APPEND DARWIN_COMMON_LINK_FLAGS "-fapplication-extension") endif() set(DARWIN_osx_CFLAGS ${DARWIN_COMMON_CFLAGS} -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) set(DARWIN_osx_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) if(DARWIN_osx_SYSROOT) list(APPEND DARWIN_osx_CFLAGS -isysroot ${DARWIN_osx_SYSROOT}) list(APPEND DARWIN_osx_LINK_FLAGS -isysroot ${DARWIN_osx_SYSROOT}) endif() # Figure out which arches to use for each OS darwin_get_toolchain_supported_archs(toolchain_arches) message(STATUS "Toolchain supported arches: ${toolchain_arches}") if(NOT MACOSX_VERSION_MIN_FLAG) darwin_test_archs(osx DARWIN_osx_ARCHS ${toolchain_arches}) message(STATUS "OSX supported arches: ${DARWIN_osx_ARCHS}") foreach(arch ${DARWIN_osx_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() foreach(platform ${DARWIN_EMBEDDED_PLATFORMS}) if(DARWIN_${platform}sim_SYSROOT) set(DARWIN_${platform}sim_CFLAGS ${DARWIN_COMMON_CFLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_SKIP_CC_KEXT On) darwin_test_archs(${platform}sim DARWIN_${platform}sim_ARCHS ${toolchain_arches}) message(STATUS "${platform} Simulator supported arches: ${DARWIN_${platform}sim_ARCHS}") if(DARWIN_${platform}sim_ARCHS) list(APPEND SANITIZER_COMMON_SUPPORTED_OS ${platform}sim) list(APPEND PROFILE_SUPPORTED_OS ${platform}sim) - if(DARWIN_${platform}_SYSROOT_INTERNAL) - list(APPEND TSAN_SUPPORTED_OS ${platform}sim) - endif() + list(APPEND TSAN_SUPPORTED_OS ${platform}sim) endif() foreach(arch ${DARWIN_${platform}sim_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() endif() if(DARWIN_${platform}_SYSROOT) set(DARWIN_${platform}_CFLAGS ${DARWIN_COMMON_CFLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}_SYSROOT}) set(DARWIN_${platform}_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}_SYSROOT}) darwin_test_archs(${platform} DARWIN_${platform}_ARCHS ${toolchain_arches}) message(STATUS "${platform} supported arches: ${DARWIN_${platform}_ARCHS}") if(DARWIN_${platform}_ARCHS) list(APPEND SANITIZER_COMMON_SUPPORTED_OS ${platform}) list(APPEND PROFILE_SUPPORTED_OS ${platform}) + list(APPEND TSAN_SUPPORTED_OS ${platform}) endif() foreach(arch ${DARWIN_${platform}_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() endif() endforeach() endif() # for list_intersect include(CompilerRTUtils) list_intersect(SANITIZER_COMMON_SUPPORTED_ARCH ALL_SANITIZER_COMMON_SUPPORTED_ARCH COMPILER_RT_SUPPORTED_ARCH ) set(LSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) set(UBSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) list_intersect(ASAN_SUPPORTED_ARCH ALL_ASAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(DFSAN_SUPPORTED_ARCH ALL_DFSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(LSAN_SUPPORTED_ARCH ALL_LSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(MSAN_SUPPORTED_ARCH ALL_MSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(PROFILE_SUPPORTED_ARCH ALL_PROFILE_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(TSAN_SUPPORTED_ARCH ALL_TSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(UBSAN_SUPPORTED_ARCH ALL_UBSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(SAFESTACK_SUPPORTED_ARCH ALL_SAFESTACK_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(CFI_SUPPORTED_ARCH ALL_CFI_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(ESAN_SUPPORTED_ARCH ALL_ESAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(SCUDO_SUPPORTED_ARCH ALL_SCUDO_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(XRAY_SUPPORTED_ARCH ALL_XRAY_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) else() # Architectures supported by compiler-rt libraries. filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH ${ALL_SANITIZER_COMMON_SUPPORTED_ARCH}) # LSan and UBSan common files should be available on all architectures # supported by other sanitizers (even if they build into dummy object files). filter_available_targets(LSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) filter_available_targets(UBSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) filter_available_targets(ASAN_SUPPORTED_ARCH ${ALL_ASAN_SUPPORTED_ARCH}) filter_available_targets(DFSAN_SUPPORTED_ARCH ${ALL_DFSAN_SUPPORTED_ARCH}) filter_available_targets(LSAN_SUPPORTED_ARCH ${ALL_LSAN_SUPPORTED_ARCH}) filter_available_targets(MSAN_SUPPORTED_ARCH ${ALL_MSAN_SUPPORTED_ARCH}) filter_available_targets(PROFILE_SUPPORTED_ARCH ${ALL_PROFILE_SUPPORTED_ARCH}) filter_available_targets(TSAN_SUPPORTED_ARCH ${ALL_TSAN_SUPPORTED_ARCH}) filter_available_targets(UBSAN_SUPPORTED_ARCH ${ALL_UBSAN_SUPPORTED_ARCH}) filter_available_targets(SAFESTACK_SUPPORTED_ARCH ${ALL_SAFESTACK_SUPPORTED_ARCH}) filter_available_targets(CFI_SUPPORTED_ARCH ${ALL_CFI_SUPPORTED_ARCH}) filter_available_targets(ESAN_SUPPORTED_ARCH ${ALL_ESAN_SUPPORTED_ARCH}) filter_available_targets(SCUDO_SUPPORTED_ARCH ${ALL_SCUDO_SUPPORTED_ARCH}) filter_available_targets(XRAY_SUPPORTED_ARCH ${ALL_XRAY_SUPPORTED_ARCH}) endif() if (MSVC) # See if the DIA SDK is available and usable. set(MSVC_DIA_SDK_DIR "$ENV{VSINSTALLDIR}DIA SDK") if (IS_DIRECTORY ${MSVC_DIA_SDK_DIR}) set(CAN_SYMBOLIZE 1) else() set(CAN_SYMBOLIZE 0) endif() else() set(CAN_SYMBOLIZE 1) endif() find_program(GOLD_EXECUTABLE NAMES ${LLVM_DEFAULT_TARGET_TRIPLE}-ld.gold ld.gold ${LLVM_DEFAULT_TARGET_TRIPLE}-ld ld DOC "The gold linker") if(COMPILER_RT_SUPPORTED_ARCH) list(REMOVE_DUPLICATES COMPILER_RT_SUPPORTED_ARCH) endif() message(STATUS "Compiler-RT supported architectures: ${COMPILER_RT_SUPPORTED_ARCH}") if(ANDROID) set(OS_NAME "Android") else() set(OS_NAME "${CMAKE_SYSTEM_NAME}") endif() set(ALL_SANITIZERS asan;dfsan;msan;tsan;safestack;cfi;esan;scudo) set(COMPILER_RT_SANITIZERS_TO_BUILD ${ALL_SANITIZERS} CACHE STRING "sanitizers to build if supported on the target (all;${ALL_SANITIZERS})") list_replace(COMPILER_RT_SANITIZERS_TO_BUILD all "${ALL_SANITIZERS}") if (SANITIZER_COMMON_SUPPORTED_ARCH AND NOT LLVM_USE_SANITIZER AND (OS_NAME MATCHES "Android|Darwin|Linux|FreeBSD" OR (OS_NAME MATCHES "Windows" AND (NOT MINGW AND NOT CYGWIN)))) set(COMPILER_RT_HAS_SANITIZER_COMMON TRUE) else() set(COMPILER_RT_HAS_SANITIZER_COMMON FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON) set(COMPILER_RT_HAS_INTERCEPTION TRUE) else() set(COMPILER_RT_HAS_INTERCEPTION FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND ASAN_SUPPORTED_ARCH) set(COMPILER_RT_HAS_ASAN TRUE) else() set(COMPILER_RT_HAS_ASAN FALSE) endif() if (OS_NAME MATCHES "Linux|FreeBSD|Windows") set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME TRUE) else() set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME FALSE) endif() # TODO: Add builtins support. if (COMPILER_RT_HAS_SANITIZER_COMMON AND DFSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_DFSAN TRUE) else() set(COMPILER_RT_HAS_DFSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND LSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD") set(COMPILER_RT_HAS_LSAN TRUE) else() set(COMPILER_RT_HAS_LSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND MSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_MSAN TRUE) else() set(COMPILER_RT_HAS_MSAN FALSE) endif() if (PROFILE_SUPPORTED_ARCH AND NOT LLVM_USE_SANITIZER AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Windows|Android") set(COMPILER_RT_HAS_PROFILE TRUE) else() set(COMPILER_RT_HAS_PROFILE FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND TSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Android") set(COMPILER_RT_HAS_TSAN TRUE) else() set(COMPILER_RT_HAS_TSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND UBSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Windows|Android") set(COMPILER_RT_HAS_UBSAN TRUE) else() set(COMPILER_RT_HAS_UBSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND SAFESTACK_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD") set(COMPILER_RT_HAS_SAFESTACK TRUE) else() set(COMPILER_RT_HAS_SAFESTACK FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND CFI_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_CFI TRUE) else() set(COMPILER_RT_HAS_CFI FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND ESAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_ESAN TRUE) else() set(COMPILER_RT_HAS_ESAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND SCUDO_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_SCUDO TRUE) else() set(COMPILER_RT_HAS_SCUDO FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND XRAY_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_XRAY TRUE) else() set(COMPILER_RT_HAS_XRAY FALSE) endif() Index: vendor/compiler-rt/dist/lib/asan/asan_errors.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_errors.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_errors.cc (revision 320961) @@ -1,516 +1,515 @@ //===-- asan_errors.cc ------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // ASan implementation for error structures. //===----------------------------------------------------------------------===// #include "asan_errors.h" #include #include "asan_descriptions.h" #include "asan_mapping.h" #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_stackdepot.h" namespace __asan { void ErrorStackOverflow::Print() { Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: %s on address %p" " (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(), (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid); Printf("%s", d.EndWarning()); scariness.Print(); BufferedStackTrace stack; GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, common_flags()->fast_unwind_on_fatal); stack.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); } static void MaybeDumpInstructionBytes(uptr pc) { if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return; InternalScopedString str(1024); str.append("First 16 instruction bytes at pc: "); if (IsAccessibleMemoryRange(pc, 16)) { for (int i = 0; i < 16; ++i) { PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " "); } str.append("\n"); } else { str.append("unaccessible\n"); } Report("%s", str.data()); } static void MaybeDumpRegisters(void *context) { if (!flags()->dump_registers) return; SignalContext::DumpAllRegisters(context); } static void MaybeReportNonExecRegion(uptr pc) { #if SANITIZER_FREEBSD || SANITIZER_LINUX MemoryMappingLayout proc_maps(/*cache_enabled*/ true); - uptr start, end, protection; - while (proc_maps.Next(&start, &end, nullptr, nullptr, 0, &protection)) { - if (pc >= start && pc < end && - !(protection & MemoryMappingLayout::kProtectionExecute)) + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + if (pc >= segment.start && pc < segment.end && !segment.IsExecutable()) Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n"); } #endif } void ErrorDeadlySignal::Print() { Decorator d; Printf("%s", d.Warning()); const char *description = __sanitizer::DescribeSignalOrException(signo); Report( "ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p " "T%d)\n", description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid); Printf("%s", d.EndWarning()); if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n"); if (is_memory_access) { const char *access_type = write_flag == SignalContext::WRITE ? "WRITE" : (write_flag == SignalContext::READ ? "READ" : "UNKNOWN"); Report("The signal is caused by a %s memory access.\n", access_type); if (addr < GetPageSizeCached()) Report("Hint: address points to the zero page.\n"); } MaybeReportNonExecRegion(pc); scariness.Print(); BufferedStackTrace stack; GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, common_flags()->fast_unwind_on_fatal); stack.Print(); MaybeDumpInstructionBytes(pc); MaybeDumpRegisters(context); Printf("AddressSanitizer can not provide additional info.\n"); ReportErrorSummary(description, &stack); } void ErrorDoubleFree::Print() { Decorator d; Printf("%s", d.Warning()); char tname[128]; Report( "ERROR: AddressSanitizer: attempting %s on %p in " "thread T%d%s:\n", scariness.GetDescription(), addr_description.addr, tid, ThreadNameWithParenthesis(tid, tname, sizeof(tname))); Printf("%s", d.EndWarning()); scariness.Print(); GET_STACK_TRACE_FATAL(second_free_stack->trace[0], second_free_stack->top_frame_bp); stack.Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); } void ErrorNewDeleteSizeMismatch::Print() { Decorator d; Printf("%s", d.Warning()); char tname[128]; Report( "ERROR: AddressSanitizer: %s on %p in thread " "T%d%s:\n", scariness.GetDescription(), addr_description.addr, tid, ThreadNameWithParenthesis(tid, tname, sizeof(tname))); Printf("%s object passed to delete has wrong type:\n", d.EndWarning()); Printf( " size of the allocated type: %zd bytes;\n" " size of the deallocated type: %zd bytes.\n", addr_description.chunk_access.chunk_size, delete_size); CHECK_GT(free_stack->size, 0); scariness.Print(); GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); stack.Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); Report( "HINT: if you don't care about these errors you may set " "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); } void ErrorFreeNotMalloced::Print() { Decorator d; Printf("%s", d.Warning()); char tname[128]; Report( "ERROR: AddressSanitizer: attempting free on address " "which was not malloc()-ed: %p in thread T%d%s\n", addr_description.Address(), tid, ThreadNameWithParenthesis(tid, tname, sizeof(tname))); Printf("%s", d.EndWarning()); CHECK_GT(free_stack->size, 0); scariness.Print(); GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); stack.Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); } void ErrorAllocTypeMismatch::Print() { static const char *alloc_names[] = {"INVALID", "malloc", "operator new", "operator new []"}; static const char *dealloc_names[] = {"INVALID", "free", "operator delete", "operator delete []"}; CHECK_NE(alloc_type, dealloc_type); Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n", scariness.GetDescription(), alloc_names[alloc_type], dealloc_names[dealloc_type], addr_description.addr); Printf("%s", d.EndWarning()); CHECK_GT(dealloc_stack->size, 0); scariness.Print(); GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp); stack.Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); Report( "HINT: if you don't care about these errors you may set " "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); } void ErrorMallocUsableSizeNotOwned::Print() { Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for " "pointer which is not owned: %p\n", addr_description.Address()); Printf("%s", d.EndWarning()); stack->Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), stack); } void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: attempting to call " "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n", addr_description.Address()); Printf("%s", d.EndWarning()); stack->Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), stack); } void ErrorStringFunctionMemoryRangesOverlap::Print() { Decorator d; char bug_type[100]; internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) " "overlap\n", bug_type, addr1_description.Address(), addr1_description.Address() + length1, addr2_description.Address(), addr2_description.Address() + length2); Printf("%s", d.EndWarning()); scariness.Print(); stack->Print(); addr1_description.Print(); addr2_description.Print(); ReportErrorSummary(bug_type, stack); } void ErrorStringFunctionSizeOverflow::Print() { Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", scariness.GetDescription(), size); Printf("%s", d.EndWarning()); scariness.Print(); stack->Print(); addr_description.Print(); ReportErrorSummary(scariness.GetDescription(), stack); } void ErrorBadParamsToAnnotateContiguousContainer::Print() { Report( "ERROR: AddressSanitizer: bad parameters to " "__sanitizer_annotate_contiguous_container:\n" " beg : %p\n" " end : %p\n" " old_mid : %p\n" " new_mid : %p\n", beg, end, old_mid, new_mid); uptr granularity = SHADOW_GRANULARITY; if (!IsAligned(beg, granularity)) Report("ERROR: beg is not aligned by %d\n", granularity); stack->Print(); ReportErrorSummary(scariness.GetDescription(), stack); } void ErrorODRViolation::Print() { Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), global1.beg); Printf("%s", d.EndWarning()); InternalScopedString g1_loc(256), g2_loc(256); PrintGlobalLocation(&g1_loc, global1); PrintGlobalLocation(&g2_loc, global2); Printf(" [1] size=%zd '%s' %s\n", global1.size, MaybeDemangleGlobalName(global1.name), g1_loc.data()); Printf(" [2] size=%zd '%s' %s\n", global2.size, MaybeDemangleGlobalName(global2.name), g2_loc.data()); if (stack_id1 && stack_id2) { Printf("These globals were registered at these points:\n"); Printf(" [1]:\n"); StackDepotGet(stack_id1).Print(); Printf(" [2]:\n"); StackDepotGet(stack_id2).Print(); } Report( "HINT: if you don't care about these errors you may set " "ASAN_OPTIONS=detect_odr_violation=0\n"); InternalScopedString error_msg(256); error_msg.append("%s: global '%s' at %s", scariness.GetDescription(), MaybeDemangleGlobalName(global1.name), g1_loc.data()); ReportErrorSummary(error_msg.data()); } void ErrorInvalidPointerPair::Print() { Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(), addr1_description.Address(), addr2_description.Address()); Printf("%s", d.EndWarning()); GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); addr1_description.Print(); addr2_description.Print(); ReportErrorSummary(scariness.GetDescription(), &stack); } static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) { return s[-1] > 127 && s[1] > 127; } ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, bool is_write_, uptr access_size_) : ErrorBase(tid), addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false), pc(pc_), bp(bp_), sp(sp_), access_size(access_size_), is_write(is_write_), shadow_val(0) { scariness.Clear(); if (access_size) { if (access_size <= 9) { char desr[] = "?-byte"; desr[0] = '0' + access_size; scariness.Scare(access_size + access_size / 2, desr); } else if (access_size >= 10) { scariness.Scare(15, "multi-byte"); } is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read"); // Determine the error type. bug_descr = "unknown-crash"; if (AddrIsInMem(addr)) { u8 *shadow_addr = (u8 *)MemToShadow(addr); // If we are accessing 16 bytes, look at the second shadow byte. if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++; // If we are in the partial right redzone, look at the next shadow byte. if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++; bool far_from_bounds = false; shadow_val = *shadow_addr; int bug_type_score = 0; // For use-after-frees reads are almost as bad as writes. int read_after_free_bonus = 0; switch (shadow_val) { case kAsanHeapLeftRedzoneMagic: case kAsanArrayCookieMagic: bug_descr = "heap-buffer-overflow"; bug_type_score = 10; far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); break; case kAsanHeapFreeMagic: bug_descr = "heap-use-after-free"; bug_type_score = 20; if (!is_write) read_after_free_bonus = 18; break; case kAsanStackLeftRedzoneMagic: bug_descr = "stack-buffer-underflow"; bug_type_score = 25; far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); break; case kAsanInitializationOrderMagic: bug_descr = "initialization-order-fiasco"; bug_type_score = 1; break; case kAsanStackMidRedzoneMagic: case kAsanStackRightRedzoneMagic: bug_descr = "stack-buffer-overflow"; bug_type_score = 25; far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); break; case kAsanStackAfterReturnMagic: bug_descr = "stack-use-after-return"; bug_type_score = 30; if (!is_write) read_after_free_bonus = 18; break; case kAsanUserPoisonedMemoryMagic: bug_descr = "use-after-poison"; bug_type_score = 20; break; case kAsanContiguousContainerOOBMagic: bug_descr = "container-overflow"; bug_type_score = 10; break; case kAsanStackUseAfterScopeMagic: bug_descr = "stack-use-after-scope"; bug_type_score = 10; break; case kAsanGlobalRedzoneMagic: bug_descr = "global-buffer-overflow"; bug_type_score = 10; far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); break; case kAsanIntraObjectRedzone: bug_descr = "intra-object-overflow"; bug_type_score = 10; break; case kAsanAllocaLeftMagic: case kAsanAllocaRightMagic: bug_descr = "dynamic-stack-buffer-overflow"; bug_type_score = 25; far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); break; } scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr); if (far_from_bounds) scariness.Scare(10, "far-from-bounds"); } } } static void PrintContainerOverflowHint() { Printf("HINT: if you don't care about these errors you may set " "ASAN_OPTIONS=detect_container_overflow=0.\n" "If you suspect a false positive see also: " "https://github.com/google/sanitizers/wiki/" "AddressSanitizerContainerOverflow.\n"); } static void PrintShadowByte(InternalScopedString *str, const char *before, u8 byte, const char *after = "\n") { PrintMemoryByte(str, before, byte, /*in_shadow*/true, after); } static void PrintLegend(InternalScopedString *str) { str->append( "Shadow byte legend (one shadow byte represents %d " "application bytes):\n", (int)SHADOW_GRANULARITY); PrintShadowByte(str, " Addressable: ", 0); str->append(" Partially addressable: "); for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); str->append("\n"); PrintShadowByte(str, " Heap left redzone: ", kAsanHeapLeftRedzoneMagic); PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic); PrintShadowByte(str, " Stack left redzone: ", kAsanStackLeftRedzoneMagic); PrintShadowByte(str, " Stack mid redzone: ", kAsanStackMidRedzoneMagic); PrintShadowByte(str, " Stack right redzone: ", kAsanStackRightRedzoneMagic); PrintShadowByte(str, " Stack after return: ", kAsanStackAfterReturnMagic); PrintShadowByte(str, " Stack use after scope: ", kAsanStackUseAfterScopeMagic); PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic); PrintShadowByte(str, " Global init order: ", kAsanInitializationOrderMagic); PrintShadowByte(str, " Poisoned by user: ", kAsanUserPoisonedMemoryMagic); PrintShadowByte(str, " Container overflow: ", kAsanContiguousContainerOOBMagic); PrintShadowByte(str, " Array cookie: ", kAsanArrayCookieMagic); PrintShadowByte(str, " Intra object redzone: ", kAsanIntraObjectRedzone); PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); } static void PrintShadowBytes(InternalScopedString *str, const char *before, u8 *bytes, u8 *guilty, uptr n) { Decorator d; if (before) str->append("%s%p:", before, bytes); for (uptr i = 0; i < n; i++) { u8 *p = bytes + i; const char *before = p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " "; const char *after = p == guilty ? "]" : ""; PrintShadowByte(str, before, *p, after); } str->append("\n"); } static void PrintShadowMemoryForAddress(uptr addr) { if (!AddrIsInMem(addr)) return; uptr shadow_addr = MemToShadow(addr); const uptr n_bytes_per_row = 16; uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); InternalScopedString str(4096 * 8); str.append("Shadow bytes around the buggy address:\n"); for (int i = -5; i <= 5; i++) { const char *prefix = (i == 0) ? "=>" : " "; PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row), (u8 *)shadow_addr, n_bytes_per_row); } if (flags()->print_legend) PrintLegend(&str); Printf("%s", str.data()); } void ErrorGeneric::Print() { Decorator d; Printf("%s", d.Warning()); uptr addr = addr_description.Address(); Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n", bug_descr, (void *)addr, pc, bp, sp); Printf("%s", d.EndWarning()); char tname[128]; Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(), access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size, (void *)addr, tid, ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess()); scariness.Print(); GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); // Pass bug_descr because we have a special case for // initialization-order-fiasco addr_description.Print(bug_descr); if (shadow_val == kAsanContiguousContainerOOBMagic) PrintContainerOverflowHint(); ReportErrorSummary(bug_descr, &stack); PrintShadowMemoryForAddress(addr); } } // namespace __asan Index: vendor/compiler-rt/dist/lib/asan/asan_internal.h =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_internal.h (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_internal.h (revision 320961) @@ -1,148 +1,149 @@ //===-- asan_internal.h -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // ASan-private header which defines various general utilities. //===----------------------------------------------------------------------===// #ifndef ASAN_INTERNAL_H #define ASAN_INTERNAL_H #include "asan_flags.h" #include "asan_interface_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_libc.h" #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) # error "The AddressSanitizer run-time should not be" " instrumented by AddressSanitizer" #endif // Build-time configuration options. // If set, asan will intercept C++ exception api call(s). #ifndef ASAN_HAS_EXCEPTIONS # define ASAN_HAS_EXCEPTIONS 1 #endif // If set, values like allocator chunk size, as well as defaults for some flags // will be changed towards less memory overhead. #ifndef ASAN_LOW_MEMORY # if SANITIZER_IOS || SANITIZER_ANDROID # define ASAN_LOW_MEMORY 1 # else # define ASAN_LOW_MEMORY 0 # endif #endif #ifndef ASAN_DYNAMIC # ifdef PIC # define ASAN_DYNAMIC 1 # else # define ASAN_DYNAMIC 0 # endif #endif // All internal functions in asan reside inside the __asan namespace // to avoid namespace collisions with the user programs. // Separate namespace also makes it simpler to distinguish the asan run-time // functions from the instrumented user code in a profile. namespace __asan { class AsanThread; using __sanitizer::StackTrace; void AsanInitFromRtl(); // asan_win.cc void InitializePlatformExceptionHandlers(); // Returns whether an address is a valid allocated system heap block. // 'addr' must point to the beginning of the block. bool IsSystemHeapAddress(uptr addr); // asan_rtl.cc void NORETURN ShowStatsAndAbort(); // asan_malloc_linux.cc / asan_malloc_mac.cc void ReplaceSystemMalloc(); // asan_linux.cc / asan_mac.cc / asan_win.cc +uptr FindDynamicShadowStart(); void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); // Support function for __asan_(un)register_image_globals. Searches for the // loaded image containing `needle' and then enumerates all global metadata // structures declared in that image, applying `op' (e.g., // __asan_(un)register_globals) to them. typedef void (*globals_op_fptr)(__asan_global *, uptr); void AsanApplyToGlobals(globals_op_fptr op, const void *needle); void AsanOnDeadlySignal(int, void *siginfo, void *context); void ReadContextStack(void *context, uptr *stack, uptr *ssize); void StopInitOrderChecking(); // Wrapper for TLS/TSD. void AsanTSDInit(void (*destructor)(void *tsd)); void *AsanTSDGet(); void AsanTSDSet(void *tsd); void PlatformTSDDtor(void *tsd); void AppendToErrorMessageBuffer(const char *buffer); void *AsanDlSymNext(const char *sym); void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name); // Add convenient macro for interface functions that may be represented as // weak hooks. #define ASAN_MALLOC_HOOK(ptr, size) \ do { \ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \ RunMallocHooks(ptr, size); \ } while (false) #define ASAN_FREE_HOOK(ptr) \ do { \ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \ RunFreeHooks(ptr); \ } while (false) #define ASAN_ON_ERROR() \ if (&__asan_on_error) __asan_on_error() extern int asan_inited; // Used to avoid infinite recursion in __asan_init(). extern bool asan_init_is_running; extern void (*death_callback)(void); // These magic values are written to shadow for better error reporting. const int kAsanHeapLeftRedzoneMagic = 0xfa; const int kAsanHeapFreeMagic = 0xfd; const int kAsanStackLeftRedzoneMagic = 0xf1; const int kAsanStackMidRedzoneMagic = 0xf2; const int kAsanStackRightRedzoneMagic = 0xf3; const int kAsanStackAfterReturnMagic = 0xf5; const int kAsanInitializationOrderMagic = 0xf6; const int kAsanUserPoisonedMemoryMagic = 0xf7; const int kAsanContiguousContainerOOBMagic = 0xfc; const int kAsanStackUseAfterScopeMagic = 0xf8; const int kAsanGlobalRedzoneMagic = 0xf9; const int kAsanInternalHeapMagic = 0xfe; const int kAsanArrayCookieMagic = 0xac; const int kAsanIntraObjectRedzone = 0xbb; const int kAsanAllocaLeftMagic = 0xca; const int kAsanAllocaRightMagic = 0xcb; static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kRetiredStackFrameMagic = 0x45E0360E; } // namespace __asan #endif // ASAN_INTERNAL_H Index: vendor/compiler-rt/dist/lib/asan/asan_linux.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_linux.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_linux.cc (revision 320961) @@ -1,177 +1,182 @@ //===-- asan_linux.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Linux-specific details. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_freebsd.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" #include #include #include #include #include #include #include #include #include #include #include #if SANITIZER_FREEBSD #include #endif #if SANITIZER_ANDROID || SANITIZER_FREEBSD #include extern "C" void* _DYNAMIC; #else #include #include #endif // x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in // 32-bit mode. #if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \ __FreeBSD_version <= 902001 // v9.2 #define ucontext_t xucontext_t #endif typedef enum { ASAN_RT_VERSION_UNDEFINED = 0, ASAN_RT_VERSION_DYNAMIC, ASAN_RT_VERSION_STATIC, } asan_rt_version_t; // FIXME: perhaps also store abi version here? extern "C" { SANITIZER_INTERFACE_ATTRIBUTE asan_rt_version_t __asan_rt_version; } namespace __asan { void InitializePlatformInterceptors() {} void InitializePlatformExceptionHandlers() {} bool IsSystemHeapAddress (uptr addr) { return false; } void *AsanDoesNotSupportStaticLinkage() { // This will fail to link with -static. return &_DYNAMIC; // defined in link.h } +uptr FindDynamicShadowStart() { + UNREACHABLE("FindDynamicShadowStart is not available"); + return 0; +} + void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { UNIMPLEMENTED(); } #if SANITIZER_ANDROID // FIXME: should we do anything for Android? void AsanCheckDynamicRTPrereqs() {} void AsanCheckIncompatibleRT() {} #else static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, void *data) { // Continue until the first dynamic library is found if (!info->dlpi_name || info->dlpi_name[0] == 0) return 0; // Ignore vDSO if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) return 0; *(const char **)data = info->dlpi_name; return 1; } static bool IsDynamicRTName(const char *libname) { return internal_strstr(libname, "libclang_rt.asan") || internal_strstr(libname, "libasan.so"); } static void ReportIncompatibleRT() { Report("Your application is linked against incompatible ASan runtimes.\n"); Die(); } void AsanCheckDynamicRTPrereqs() { if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order) return; // Ensure that dynamic RT is the first DSO in the list const char *first_dso_name = nullptr; dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); if (first_dso_name && !IsDynamicRTName(first_dso_name)) { Report("ASan runtime does not come first in initial library list; " "you should either link runtime to your application or " "manually preload it with LD_PRELOAD.\n"); Die(); } } void AsanCheckIncompatibleRT() { if (ASAN_DYNAMIC) { if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { __asan_rt_version = ASAN_RT_VERSION_DYNAMIC; } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) { ReportIncompatibleRT(); } } else { if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { // Ensure that dynamic runtime is not present. We should detect it // as early as possible, otherwise ASan interceptors could bind to // the functions in dynamic ASan runtime instead of the functions in // system libraries, causing crashes later in ASan initialization. MemoryMappingLayout proc_maps(/*cache_enabled*/true); char filename[128]; - while (proc_maps.Next(nullptr, nullptr, nullptr, filename, - sizeof(filename), nullptr)) { - if (IsDynamicRTName(filename)) { + MemoryMappedSegment segment(filename, sizeof(filename)); + while (proc_maps.Next(&segment)) { + if (IsDynamicRTName(segment.filename)) { Report("Your application is linked against " "incompatible ASan runtimes.\n"); Die(); } } __asan_rt_version = ASAN_RT_VERSION_STATIC; } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) { ReportIncompatibleRT(); } } } #endif // SANITIZER_ANDROID #if !SANITIZER_ANDROID void ReadContextStack(void *context, uptr *stack, uptr *ssize) { ucontext_t *ucp = (ucontext_t*)context; *stack = (uptr)ucp->uc_stack.ss_sp; *ssize = ucp->uc_stack.ss_size; } #else void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } #endif void *AsanDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); } } // namespace __asan #endif // SANITIZER_FREEBSD || SANITIZER_LINUX Index: vendor/compiler-rt/dist/lib/asan/asan_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_mac.cc (revision 320961) @@ -1,289 +1,312 @@ //===-- asan_mac.cc -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Mac-specific details. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_MAC #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mac.h" #include #include #include #include #include #include #include #include // for free() #include #include #include #include #include // from , but we don't have that file on iOS extern "C" { extern char ***_NSGetArgv(void); extern char ***_NSGetEnviron(void); } namespace __asan { void InitializePlatformInterceptors() {} void InitializePlatformExceptionHandlers() {} bool IsSystemHeapAddress (uptr addr) { return false; } // No-op. Mac does not support static linkage anyway. void *AsanDoesNotSupportStaticLinkage() { return 0; } +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + + uptr largest_gap_found = 0; + uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, + granularity, &largest_gap_found); + // If the shadow doesn't fit, restrict the address space to make it fit. + if (shadow_start == 0) { + uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); + RestrictMemoryToMaxAddress(new_max_vm); + kHighMemEnd = new_max_vm - 1; + space_size = kHighShadowEnd + left_padding; + shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); + } + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + // No-op. Mac does not support static linkage anyway. void AsanCheckDynamicRTPrereqs() {} // No-op. Mac does not support static linkage anyway. void AsanCheckIncompatibleRT() {} void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { // Find the Mach-O header for the image containing the needle Dl_info info; int err = dladdr(needle, &info); if (err == 0) return; #if __LP64__ const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase; #else const struct mach_header *mh = (struct mach_header *)info.dli_fbase; #endif // Look up the __asan_globals section in that image and register its globals unsigned long size = 0; __asan_global *globals = (__asan_global *)getsectiondata( mh, "__DATA", "__asan_globals", &size); if (!globals) return; if (size % sizeof(__asan_global) != 0) return; op(globals, size / sizeof(__asan_global)); } void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } // Support for the following functions from libdispatch on Mac OS: // dispatch_async_f() // dispatch_async() // dispatch_sync_f() // dispatch_sync() // dispatch_after_f() // dispatch_after() // dispatch_group_async_f() // dispatch_group_async() // TODO(glider): libdispatch API contains other functions that we don't support // yet. // // dispatch_sync() and dispatch_sync_f() are synchronous, although chances are // they can cause jobs to run on a thread different from the current one. // TODO(glider): if so, we need a test for this (otherwise we should remove // them). // // The following functions use dispatch_barrier_async_f() (which isn't a library // function but is exported) and are thus supported: // dispatch_source_set_cancel_handler_f() // dispatch_source_set_cancel_handler() // dispatch_source_set_event_handler_f() // dispatch_source_set_event_handler() // // The reference manual for Grand Central Dispatch is available at // http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html // The implementation details are at // http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c typedef void* dispatch_group_t; typedef void* dispatch_queue_t; typedef void* dispatch_source_t; typedef u64 dispatch_time_t; typedef void (*dispatch_function_t)(void *block); typedef void* (*worker_t)(void *block); // A wrapper for the ObjC blocks used to support libdispatch. typedef struct { void *block; dispatch_function_t func; u32 parent_tid; } asan_block_context_t; ALWAYS_INLINE void asan_register_worker_thread(int parent_tid, StackTrace *stack) { AsanThread *t = GetCurrentThread(); if (!t) { t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, parent_tid, stack, /* detached */ true); t->Init(); asanThreadRegistry().StartThread(t->tid(), GetTid(), /* workerthread */ true, 0); SetCurrentThread(t); } } // For use by only those functions that allocated the context via // alloc_asan_context(). extern "C" void asan_dispatch_call_block_and_release(void *block) { GET_STACK_TRACE_THREAD; asan_block_context_t *context = (asan_block_context_t*)block; VReport(2, "asan_dispatch_call_block_and_release(): " "context: %p, pthread_self: %p\n", block, pthread_self()); asan_register_worker_thread(context->parent_tid, &stack); // Call the original dispatcher for the block. context->func(context->block); asan_free(context, &stack, FROM_MALLOC); } } // namespace __asan using namespace __asan; // NOLINT // Wrap |ctxt| and |func| into an asan_block_context_t. // The caller retains control of the allocated context. extern "C" asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, BufferedStackTrace *stack) { asan_block_context_t *asan_ctxt = (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); asan_ctxt->block = ctxt; asan_ctxt->func = func; asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); return asan_ctxt; } // Define interceptor for dispatch_*_f function with the three most common // parameters: dispatch_queue_t, context, dispatch_function_t. #define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ dispatch_function_t func) { \ GET_STACK_TRACE_THREAD; \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ if (Verbosity() >= 2) { \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ asan_ctxt, pthread_self()); \ PRINT_CURRENT_STACK(); \ } \ return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \ asan_dispatch_call_block_and_release); \ } INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); if (Verbosity() >= 2) { Report("dispatch_after_f: %p\n", asan_ctxt); PRINT_CURRENT_STACK(); } return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt, asan_dispatch_call_block_and_release); } INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); if (Verbosity() >= 2) { Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", asan_ctxt, pthread_self()); PRINT_CURRENT_STACK(); } REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt, asan_dispatch_call_block_and_release); } #if !defined(MISSING_BLOCKS_SUPPORT) extern "C" { void dispatch_async(dispatch_queue_t dq, void(^work)(void)); void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)); void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)); void dispatch_source_set_cancel_handler(dispatch_source_t ds, void(^work)(void)); void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); } #define GET_ASAN_BLOCK(work) \ void (^asan_block)(void); \ int parent_tid = GetCurrentTidOrInvalid(); \ asan_block = ^(void) { \ GET_STACK_TRACE_THREAD; \ asan_register_worker_thread(parent_tid, &stack); \ work(); \ } INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void(^work)(void)) { ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_async)(dq, asan_block); } INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) { ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_group_async)(dg, dq, asan_block); } INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) { ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_after)(when, queue, asan_block); } INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, void(^work)(void)) { if (!work) { REAL(dispatch_source_set_cancel_handler)(ds, work); return; } ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_source_set_cancel_handler)(ds, asan_block); } INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds, void(^work)(void)) { ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_source_set_event_handler)(ds, asan_block); } #endif #endif // SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/asan/asan_new_delete.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_new_delete.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_new_delete.cc (revision 320961) @@ -1,201 +1,201 @@ //===-- asan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Interceptors for operators new and delete. //===----------------------------------------------------------------------===// #include "asan_allocator.h" #include "asan_internal.h" #include "asan_stack.h" #include "interception/interception.h" #include // C++ operators can't have dllexport attributes on Windows. We export them // anyway by passing extra -export flags to the linker, which is exactly that // dllexport would normally do. We need to export them in order to make the // VS2015 dynamic CRT (MD) work. #if SANITIZER_WINDOWS #define CXX_OPERATOR_ATTRIBUTE -#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:"##sym)) +#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym)) #ifdef _WIN64 COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[] COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[] #else COMMENT_EXPORT("??2@YAPAXI@Z") // operator new COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[] COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[] #endif #undef COMMENT_EXPORT #else #define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE #endif using namespace __asan; // NOLINT // FreeBSD prior v9.2 have wrong definition of 'size_t'. // http://svnweb.freebsd.org/base?view=revision&revision=232261 #if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 #include #if __FreeBSD_version <= 902001 // v9.2 #define size_t unsigned #endif // __FreeBSD_version #endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 // This code has issues on OSX. // See https://github.com/google/sanitizers/issues/131. // Fake std::nothrow_t and std::align_val_t to avoid including . namespace std { struct nothrow_t {}; enum class align_val_t: size_t {}; } // namespace std // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. #define OPERATOR_NEW_BODY(type, nothrow) \ GET_STACK_TRACE_MALLOC;\ void *res = asan_memalign(0, size, &stack, type);\ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ return res; #define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ GET_STACK_TRACE_MALLOC;\ void *res = asan_memalign((uptr)align, size, &stack, type);\ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ return res; // On OS X it's not enough to just provide our own 'operator new' and // 'operator delete' implementations, because they're going to be in the // runtime dylib, and the main executable will depend on both the runtime // dylib and libstdc++, each of those'll have its implementation of new and // delete. // To make sure that C++ allocation/deallocation operators are overridden on // OS X we need to intercept them using their mangled names. #if !SANITIZER_MAC CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::align_val_t align) { OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::align_val_t align) { OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) { OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) { OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } #else // SANITIZER_MAC INTERCEPTOR(void *, _Znwm, size_t size) { OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } INTERCEPTOR(void *, _Znam, size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } #endif #define OPERATOR_DELETE_BODY(type) \ GET_STACK_TRACE_FREE;\ asan_free(ptr, &stack, type); #if !SANITIZER_MAC CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr, std::align_val_t) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr, std::align_val_t) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW); } CXX_OPERATOR_ATTRIBUTE void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW_BR); } #else // SANITIZER_MAC INTERCEPTOR(void, _ZdlPv, void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); } INTERCEPTOR(void, _ZdaPv, void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW); } INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } #endif Index: vendor/compiler-rt/dist/lib/asan/asan_posix.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_posix.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_posix.cc (revision 320961) @@ -1,122 +1,122 @@ //===-- asan_posix.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Posix-specific details. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_POSIX #include "asan_internal.h" #include "asan_interceptors.h" #include "asan_mapping.h" #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include #include #include #include #include #include namespace __asan { void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { ScopedDeadlySignal signal_scope(GetCurrentThread()); int code = (int)((siginfo_t*)siginfo)->si_code; // Write the first message using fd=2, just in case. // It may actually fail to write in case stderr is closed. internal_write(2, "ASAN:DEADLYSIGNAL\n", 18); SignalContext sig = SignalContext::Create(siginfo, context); // Access at a reasonable offset above SP, or slightly below it (to account // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is // probably a stack overflow. #ifdef __s390__ // On s390, the fault address in siginfo points to start of the page, not // to the precise word that was accessed. Mask off the low bits of sp to // take it into account. bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) && sig.addr < sig.sp + 0xFFFF; #else bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF; #endif #if __powerpc__ // Large stack frames can be allocated with e.g. // lis r0,-10000 // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000 // If the store faults then sp will not have been updated, so test above - // will not work, becase the fault address will be more than just "slightly" + // will not work, because the fault address will be more than just "slightly" // below sp. if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) { u32 inst = *(unsigned *)sig.pc; u32 ra = (inst >> 16) & 0x1F; u32 opcd = inst >> 26; u32 xo = (inst >> 1) & 0x3FF; // Check for store-with-update to sp. The instructions we accept are: // stbu rs,d(ra) stbux rs,ra,rb // sthu rs,d(ra) sthux rs,ra,rb // stwu rs,d(ra) stwux rs,ra,rb // stdu rs,ds(ra) stdux rs,ra,rb // where ra is r1 (the stack pointer). if (ra == 1 && (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 || (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181)))) IsStackAccess = true; } #endif // __powerpc__ // We also check si_code to filter out SEGV caused by something else other // then hitting the guard page or unmapped memory, like, for example, // unaligned memory access. if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) ReportStackOverflow(sig); else ReportDeadlySignal(signo, sig); } // ---------------------- TSD ---------------- {{{1 static pthread_key_t tsd_key; static bool tsd_key_inited = false; void AsanTSDInit(void (*destructor)(void *tsd)) { CHECK(!tsd_key_inited); tsd_key_inited = true; CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); } void *AsanTSDGet() { CHECK(tsd_key_inited); return pthread_getspecific(tsd_key); } void AsanTSDSet(void *tsd) { CHECK(tsd_key_inited); pthread_setspecific(tsd_key, tsd); } void PlatformTSDDtor(void *tsd) { AsanThreadContext *context = (AsanThreadContext*)tsd; if (context->destructor_iterations > 1) { context->destructor_iterations--; CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); return; } AsanThread::TSDDtor(tsd); } } // namespace __asan #endif // SANITIZER_POSIX Index: vendor/compiler-rt/dist/lib/asan/asan_rtl.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_rtl.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_rtl.cc (revision 320961) @@ -1,696 +1,688 @@ //===-- asan_rtl.cc -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Main file of the ASan run-time library. //===----------------------------------------------------------------------===// #include "asan_activation.h" #include "asan_allocator.h" #include "asan_interceptors.h" #include "asan_interface_internal.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "lsan/lsan_common.h" #include "ubsan/ubsan_init.h" #include "ubsan/ubsan_platform.h" uptr __asan_shadow_memory_dynamic_address; // Global interface symbol. int __asan_option_detect_stack_use_after_return; // Global interface symbol. uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. namespace __asan { uptr AsanMappingProfile[kAsanMappingProfileSize]; static void AsanDie() { static atomic_uint32_t num_calls; if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { // Don't die twice - run a busy loop. while (1) { } } if (common_flags()->print_module_map >= 1) PrintModuleMap(); if (flags()->sleep_before_dying) { Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); SleepForSeconds(flags()->sleep_before_dying); } if (flags()->unmap_shadow_on_exit) { if (kMidMemBeg) { UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); } else { UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); } } } static void AsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line, cond, (uptr)v1, (uptr)v2); // FIXME: check for infinite recursion without a thread-local counter here. PRINT_CURRENT_STACK_CHECK(); Die(); } // -------------------------- Globals --------------------- {{{1 int asan_inited; bool asan_init_is_running; #if !ASAN_FIXED_MAPPING uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; #endif // -------------------------- Misc ---------------- {{{1 void ShowStatsAndAbort() { __asan_print_accumulated_stats(); Die(); } // ---------------------- mmap -------------------- {{{1 // Reserve memory range [beg, end]. // We need to use inclusive range because end+1 may not be representable. void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { CHECK_EQ((beg % GetMmapGranularity()), 0); CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); uptr size = end - beg + 1; DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. void *res = MmapFixedNoReserve(beg, size, name); if (res != (void*)beg) { Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "Perhaps you're using ulimit -v\n", size); Abort(); } if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size); if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); } // --------------- LowLevelAllocateCallbac ---------- {{{1 static void OnLowLevelAllocate(uptr ptr, uptr size) { PoisonShadow(ptr, size, kAsanInternalHeapMagic); } // -------------------------- Run-time entry ------------------- {{{1 // exported functions #define ASAN_REPORT_ERROR(type, is_write, size) \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_ ## type ## size(uptr addr) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_ ## type ## size ## _noabort(uptr addr) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ } \ ASAN_REPORT_ERROR(load, false, 1) ASAN_REPORT_ERROR(load, false, 2) ASAN_REPORT_ERROR(load, false, 4) ASAN_REPORT_ERROR(load, false, 8) ASAN_REPORT_ERROR(load, false, 16) ASAN_REPORT_ERROR(store, true, 1) ASAN_REPORT_ERROR(store, true, 2) ASAN_REPORT_ERROR(store, true, 4) ASAN_REPORT_ERROR(store, true, 8) ASAN_REPORT_ERROR(store, true, 16) #define ASAN_REPORT_ERROR_N(type, is_write) \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ } \ ASAN_REPORT_ERROR_N(load, false) ASAN_REPORT_ERROR_N(store, true) #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ uptr sp = MEM_TO_SHADOW(addr); \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ : *reinterpret_cast(sp); \ if (UNLIKELY(s)) { \ if (UNLIKELY(size >= SHADOW_GRANULARITY || \ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ (s8)s)) { \ if (__asan_test_only_reported_buggy_pointer) { \ *__asan_test_only_reported_buggy_pointer = addr; \ } else { \ GET_CALLER_PC_BP_SP; \ ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \ fatal); \ } \ } \ } #define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_##type##size(uptr addr) { \ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_exp_##type##size(uptr addr, u32 exp) { \ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \ } \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ void __asan_##type##size ## _noabort(uptr addr) { \ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \ } \ ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2) ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4) ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8) ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, 0, true); } } extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, exp, true); } } extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, 0, false); } } extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, 0, true); } } extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, exp, true); } } extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, 0, false); } } // Force the linker to keep the symbols for various ASan interface functions. // We want to keep those in the executable in order to let the instrumented // dynamic libraries access the symbol even if it is not used by the executable // itself. This should help if the build system is removing dead code at link // time. static NOINLINE void force_interface_symbols() { volatile int fake_condition = 0; // prevent dead condition elimination. // __asan_report_* functions are noreturn, so we need a switch to prevent // the compiler from removing any of them. // clang-format off switch (fake_condition) { case 1: __asan_report_load1(0); break; case 2: __asan_report_load2(0); break; case 3: __asan_report_load4(0); break; case 4: __asan_report_load8(0); break; case 5: __asan_report_load16(0); break; case 6: __asan_report_load_n(0, 0); break; case 7: __asan_report_store1(0); break; case 8: __asan_report_store2(0); break; case 9: __asan_report_store4(0); break; case 10: __asan_report_store8(0); break; case 11: __asan_report_store16(0); break; case 12: __asan_report_store_n(0, 0); break; case 13: __asan_report_exp_load1(0, 0); break; case 14: __asan_report_exp_load2(0, 0); break; case 15: __asan_report_exp_load4(0, 0); break; case 16: __asan_report_exp_load8(0, 0); break; case 17: __asan_report_exp_load16(0, 0); break; case 18: __asan_report_exp_load_n(0, 0, 0); break; case 19: __asan_report_exp_store1(0, 0); break; case 20: __asan_report_exp_store2(0, 0); break; case 21: __asan_report_exp_store4(0, 0); break; case 22: __asan_report_exp_store8(0, 0); break; case 23: __asan_report_exp_store16(0, 0); break; case 24: __asan_report_exp_store_n(0, 0, 0); break; case 25: __asan_register_globals(nullptr, 0); break; case 26: __asan_unregister_globals(nullptr, 0); break; case 27: __asan_set_death_callback(nullptr); break; case 28: __asan_set_error_report_callback(nullptr); break; case 29: __asan_handle_no_return(); break; case 30: __asan_address_is_poisoned(nullptr); break; case 31: __asan_poison_memory_region(nullptr, 0); break; case 32: __asan_unpoison_memory_region(nullptr, 0); break; case 34: __asan_before_dynamic_init(nullptr); break; case 35: __asan_after_dynamic_init(); break; case 36: __asan_poison_stack_memory(0, 0); break; case 37: __asan_unpoison_stack_memory(0, 0); break; case 38: __asan_region_is_poisoned(0, 0); break; case 39: __asan_describe_address(0); break; case 40: __asan_set_shadow_00(0, 0); break; case 41: __asan_set_shadow_f1(0, 0); break; case 42: __asan_set_shadow_f2(0, 0); break; case 43: __asan_set_shadow_f3(0, 0); break; case 44: __asan_set_shadow_f5(0, 0); break; case 45: __asan_set_shadow_f8(0, 0); break; } // clang-format on } static void asan_atexit() { Printf("AddressSanitizer exit stats:\n"); __asan_print_accumulated_stats(); // Print AsanMappingProfile. for (uptr i = 0; i < kAsanMappingProfileSize; i++) { if (AsanMappingProfile[i] == 0) continue; Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); } } static void InitializeHighMemEnd() { #if !ASAN_FIXED_MAPPING kHighMemEnd = GetMaxVirtualAddress(); // Increase kHighMemEnd to make sure it's properly // aligned together with kHighMemBeg: kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; #endif // !ASAN_FIXED_MAPPING CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); } static void ProtectGap(uptr addr, uptr size) { if (!flags()->protect_shadow_gap) { // The shadow gap is unprotected, so there is a chance that someone // is actually using this memory. Which means it needs a shadow... uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached()); uptr GapShadowEnd = RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1; if (Verbosity()) Printf("protect_shadow_gap=0:" " not protecting shadow gap, allocating gap's shadow\n" "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg, GapShadowEnd); ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd, "unprotected gap shadow"); return; } void *res = MmapFixedNoAccess(addr, size, "shadow gap"); if (addr == (uptr)res) return; // A few pages at the start of the address space can not be protected. // But we really want to protect as much as possible, to prevent this memory // being returned as a result of a non-FIXED mmap(). if (addr == kZeroBaseShadowStart) { uptr step = GetMmapGranularity(); while (size > step && addr < kZeroBaseMaxShadowStart) { addr += step; size -= step; void *res = MmapFixedNoAccess(addr, size, "shadow gap"); if (addr == (uptr)res) return; } } Report("ERROR: Failed to protect the shadow gap. " "ASan cannot proceed correctly. ABORTING.\n"); DumpProcessMap(); Die(); } static void PrintAddressSpaceLayout() { Printf("|| `[%p, %p]` || HighMem ||\n", (void*)kHighMemBeg, (void*)kHighMemEnd); Printf("|| `[%p, %p]` || HighShadow ||\n", (void*)kHighShadowBeg, (void*)kHighShadowEnd); if (kMidMemBeg) { Printf("|| `[%p, %p]` || ShadowGap3 ||\n", (void*)kShadowGap3Beg, (void*)kShadowGap3End); Printf("|| `[%p, %p]` || MidMem ||\n", (void*)kMidMemBeg, (void*)kMidMemEnd); Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void*)kShadowGap2Beg, (void*)kShadowGap2End); Printf("|| `[%p, %p]` || MidShadow ||\n", (void*)kMidShadowBeg, (void*)kMidShadowEnd); } Printf("|| `[%p, %p]` || ShadowGap ||\n", (void*)kShadowGapBeg, (void*)kShadowGapEnd); if (kLowShadowBeg) { Printf("|| `[%p, %p]` || LowShadow ||\n", (void*)kLowShadowBeg, (void*)kLowShadowEnd); Printf("|| `[%p, %p]` || LowMem ||\n", (void*)kLowMemBeg, (void*)kLowMemEnd); } Printf("MemToShadow(shadow): %p %p %p %p", (void*)MEM_TO_SHADOW(kLowShadowBeg), (void*)MEM_TO_SHADOW(kLowShadowEnd), (void*)MEM_TO_SHADOW(kHighShadowBeg), (void*)MEM_TO_SHADOW(kHighShadowEnd)); if (kMidMemBeg) { Printf(" %p %p", (void*)MEM_TO_SHADOW(kMidShadowBeg), (void*)MEM_TO_SHADOW(kMidShadowEnd)); } Printf("\n"); Printf("redzone=%zu\n", (uptr)flags()->redzone); Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); Printf("thread_local_quarantine_size_kb=%zuK\n", (uptr)flags()->thread_local_quarantine_size_kb); Printf("malloc_context_size=%zu\n", (uptr)common_flags()->malloc_context_size); Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); if (kMidMemBeg) CHECK(kMidShadowBeg > kLowShadowEnd && kMidMemBeg > kMidShadowEnd && kHighShadowBeg > kMidMemEnd); } static void InitializeShadowMemory() { // Set the shadow memory address to uninitialized. __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; uptr shadow_start = kLowShadowBeg; // Detect if a dynamic shadow address must used and find a available location // when necessary. When dynamic address is used, the macro |kLowShadowBeg| // expands to |__asan_shadow_memory_dynamic_address| which is // |kDefaultShadowSentinel|. if (shadow_start == kDefaultShadowSentinel) { __asan_shadow_memory_dynamic_address = 0; CHECK_EQ(0, kLowShadowBeg); - - uptr granularity = GetMmapGranularity(); - uptr alignment = 8 * granularity; - uptr left_padding = granularity; - uptr space_size = kHighShadowEnd + left_padding; - - shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity); - CHECK_NE((uptr)0, shadow_start); - CHECK(IsAligned(shadow_start, alignment)); + shadow_start = FindDynamicShadowStart(); } // Update the shadow memory address (potentially) used by instrumentation. __asan_shadow_memory_dynamic_address = shadow_start; if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); bool full_shadow_is_available = MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); #if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ !ASAN_FIXED_MAPPING if (!full_shadow_is_available) { kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; } #endif if (Verbosity()) PrintAddressSpaceLayout(); if (full_shadow_is_available) { // mmap the low shadow plus at least one page at the left. if (kLowShadowBeg) ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the high shadow. ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gap. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); } else if (kMidMemBeg && MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { CHECK(kLowShadowBeg != kLowShadowEnd); // mmap the low shadow plus at least one page at the left. ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the mid shadow. ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); // mmap the high shadow. ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gaps. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); } else { Report("Shadow memory range interleaves with an existing memory mapping. " "ASan cannot proceed correctly. ABORTING.\n"); Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", shadow_start, kHighShadowEnd); DumpProcessMap(); Die(); } } static void AsanInitInternal() { if (LIKELY(asan_inited)) return; SanitizerToolName = "AddressSanitizer"; CHECK(!asan_init_is_running && "ASan init calls itself!"); asan_init_is_running = true; CacheBinaryName(); // Initialize flags. This must be done early, because most of the // initialization steps look at flags(). InitializeFlags(); AsanCheckIncompatibleRT(); AsanCheckDynamicRTPrereqs(); AvoidCVE_2016_2143(); SetCanPoisonMemory(flags()->poison_heap); SetMallocContextSize(common_flags()->malloc_context_size); InitializePlatformExceptionHandlers(); InitializeHighMemEnd(); // Make sure we are not statically linked. AsanDoesNotSupportStaticLinkage(); // Install tool-specific callbacks in sanitizer_common. AddDieCallback(AsanDie); SetCheckFailedCallback(AsanCheckFailed); SetPrintfAndReportCallback(AppendToErrorMessageBuffer); __sanitizer_set_report_path(common_flags()->log_path); __asan_option_detect_stack_use_after_return = flags()->detect_stack_use_after_return; // Re-exec ourselves if we need to set additional env or command line args. MaybeReexec(); // Setup internal allocator callback. SetLowLevelAllocateCallback(OnLowLevelAllocate); InitializeAsanInterceptors(); // Enable system log ("adb logcat") on Android. // Doing this before interceptors are initialized crashes in: // AsanInitInternal -> android_log_write -> __interceptor_strcmp AndroidLogInit(); ReplaceSystemMalloc(); DisableCoreDumperIfNecessary(); InitializeShadowMemory(); AsanTSDInit(PlatformTSDDtor); InstallDeadlySignalHandlers(AsanOnDeadlySignal); AllocatorOptions allocator_options; allocator_options.SetFrom(flags(), common_flags()); InitializeAllocator(allocator_options); MaybeStartBackgroudThread(); SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // should be set to 1 prior to initializing the threads. asan_inited = 1; asan_init_is_running = false; if (flags()->atexit) Atexit(asan_atexit); InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); // Now that ASan runtime is (mostly) initialized, deactivate it if // necessary, so that it can be re-activated when requested. if (flags()->start_deactivated) AsanDeactivate(); // interceptors InitTlsSize(); // Create main thread. AsanThread *main_thread = AsanThread::Create( /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, /* stack */ nullptr, /* detached */ true); CHECK_EQ(0, main_thread->tid()); SetCurrentThread(main_thread); main_thread->ThreadStart(internal_getpid(), /* signal_thread_is_registered */ nullptr); force_interface_symbols(); // no-op. SanitizerInitializeUnwinder(); if (CAN_SANITIZE_LEAKS) { __lsan::InitCommonLsan(); if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { Atexit(__lsan::DoLeakCheck); } } #if CAN_SANITIZE_UB __ubsan::InitAsPlugin(); #endif InitializeSuppressions(); if (CAN_SANITIZE_LEAKS) { // LateInitialize() calls dlsym, which can allocate an error string buffer // in the TLS. Let's ignore the allocation to avoid reporting a leak. __lsan::ScopedInterceptorDisabler disabler; Symbolizer::LateInitialize(); } else { Symbolizer::LateInitialize(); } VReport(1, "AddressSanitizer Init done\n"); } // Initialize as requested from some part of ASan runtime library (interceptors, // allocator, etc). void AsanInitFromRtl() { AsanInitInternal(); } #if ASAN_DYNAMIC // Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable // (and thus normal initializers from .preinit_array or modules haven't run). class AsanInitializer { public: // NOLINT AsanInitializer() { AsanInitFromRtl(); } }; static AsanInitializer asan_initializer; #endif // ASAN_DYNAMIC } // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT void NOINLINE __asan_handle_no_return() { if (asan_init_is_running) return; int local_stack; AsanThread *curr_thread = GetCurrentThread(); uptr PageSize = GetPageSizeCached(); uptr top, bottom; if (curr_thread) { top = curr_thread->stack_top(); bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); } else { // If we haven't seen this thread, try asking the OS for stack bounds. uptr tls_addr, tls_size, stack_size; GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, &tls_size); top = bottom + stack_size; } static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M if (top - bottom > kMaxExpectedCleanupSize) { static bool reported_warning = false; if (reported_warning) return; reported_warning = true; Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " "stack top: %p; bottom %p; size: %p (%zd)\n" "False positive error reports may follow\n" "For details see " "https://github.com/google/sanitizers/issues/189\n", top, bottom, top - bottom, top - bottom); return; } PoisonShadow(bottom, top - bottom, 0); if (curr_thread && curr_thread->has_fake_stack()) curr_thread->fake_stack()->HandleNoReturn(); } void NOINLINE __asan_set_death_callback(void (*callback)(void)) { SetUserDieCallback(callback); } // Initialize as requested from instrumented application code. // We use this call as a trigger to wake up ASan from deactivated state. void __asan_init() { AsanActivate(); AsanInitInternal(); } void __asan_version_mismatch_check() { // Do nothing. } Index: vendor/compiler-rt/dist/lib/asan/asan_thread.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_thread.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_thread.cc (revision 320961) @@ -1,476 +1,475 @@ //===-- asan_thread.cc ----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Thread-related code. //===----------------------------------------------------------------------===// #include "asan_allocator.h" #include "asan_interceptors.h" #include "asan_poisoning.h" #include "asan_stack.h" #include "asan_thread.h" #include "asan_mapping.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "lsan/lsan_common.h" namespace __asan { // AsanThreadContext implementation. struct CreateThreadContextArgs { AsanThread *thread; StackTrace *stack; }; void AsanThreadContext::OnCreated(void *arg) { CreateThreadContextArgs *args = static_cast(arg); if (args->stack) stack_id = StackDepotPut(*args->stack); thread = args->thread; thread->set_context(this); } void AsanThreadContext::OnFinished() { // Drop the link to the AsanThread object. thread = nullptr; } // MIPS requires aligned address static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ThreadRegistry *asan_thread_registry; static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); static LowLevelAllocator allocator_for_thread_context; static ThreadContextBase *GetAsanThreadContext(u32 tid) { BlockingMutexLock lock(&mu_for_thread_context); return new(allocator_for_thread_context) AsanThreadContext(tid); } ThreadRegistry &asanThreadRegistry() { static bool initialized; // Don't worry about thread_safety - this should be called when there is // a single thread. if (!initialized) { // Never reuse ASan threads: we store pointer to AsanThreadContext // in TSD and can't reliably tell when no more TSD destructors will // be called. It would be wrong to reuse AsanThreadContext for another // thread before all TSD destructors will be called for it. asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); initialized = true; } return *asan_thread_registry; } AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { return static_cast( asanThreadRegistry().GetThreadLocked(tid)); } // AsanThread implementation. AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, u32 parent_tid, StackTrace *stack, bool detached) { uptr PageSize = GetPageSizeCached(); uptr size = RoundUpTo(sizeof(AsanThread), PageSize); AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); thread->start_routine_ = start_routine; thread->arg_ = arg; CreateThreadContextArgs args = { thread, stack }; asanThreadRegistry().CreateThread(*reinterpret_cast(thread), detached, parent_tid, &args); return thread; } void AsanThread::TSDDtor(void *tsd) { AsanThreadContext *context = (AsanThreadContext*)tsd; VReport(1, "T%d TSDDtor\n", context->tid); if (context->thread) context->thread->Destroy(); } void AsanThread::Destroy() { int tid = this->tid(); VReport(1, "T%d exited\n", tid); malloc_storage().CommitBack(); if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); asanThreadRegistry().FinishThread(tid); FlushToDeadThreadStats(&stats_); // We also clear the shadow on thread destruction because // some code may still be executing in later TSD destructors // and we don't want it to have any poisoned stack. ClearShadowForThreadStackAndTLS(); DeleteFakeStack(tid); uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); UnmapOrDie(this, size); DTLS_Destroy(); } void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size) { if (atomic_load(&stack_switching_, memory_order_relaxed)) { Report("ERROR: starting fiber switch while in fiber switch\n"); Die(); } next_stack_bottom_ = bottom; next_stack_top_ = bottom + size; atomic_store(&stack_switching_, 1, memory_order_release); FakeStack *current_fake_stack = fake_stack_; if (fake_stack_save) *fake_stack_save = fake_stack_; fake_stack_ = nullptr; SetTLSFakeStack(nullptr); // if fake_stack_save is null, the fiber will die, delete the fakestack if (!fake_stack_save && current_fake_stack) current_fake_stack->Destroy(this->tid()); } void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old, uptr *size_old) { if (!atomic_load(&stack_switching_, memory_order_relaxed)) { Report("ERROR: finishing a fiber switch that has not started\n"); Die(); } if (fake_stack_save) { SetTLSFakeStack(fake_stack_save); fake_stack_ = fake_stack_save; } if (bottom_old) *bottom_old = stack_bottom_; if (size_old) *size_old = stack_top_ - stack_bottom_; stack_bottom_ = next_stack_bottom_; stack_top_ = next_stack_top_; atomic_store(&stack_switching_, 0, memory_order_release); next_stack_top_ = 0; next_stack_bottom_ = 0; } inline AsanThread::StackBounds AsanThread::GetStackBounds() const { if (!atomic_load(&stack_switching_, memory_order_acquire)) { // Make sure the stack bounds are fully initialized. if (stack_bottom_ >= stack_top_) return {0, 0}; return {stack_bottom_, stack_top_}; } char local; const uptr cur_stack = (uptr)&local; // Note: need to check next stack first, because FinishSwitchFiber // may be in process of overwriting stack_top_/bottom_. But in such case // we are already on the next stack. if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) return {next_stack_bottom_, next_stack_top_}; return {stack_bottom_, stack_top_}; } uptr AsanThread::stack_top() { return GetStackBounds().top; } uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; } uptr AsanThread::stack_size() { const auto bounds = GetStackBounds(); return bounds.top - bounds.bottom; } // We want to create the FakeStack lazyly on the first use, but not eralier // than the stack size is known and the procedure has to be async-signal safe. FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { uptr stack_size = this->stack_size(); if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. return nullptr; - CHECK_LE(stack_size, 0x10000000); uptr old_val = 0; // fake_stack_ has 3 states: // 0 -- not initialized // 1 -- being initialized // ptr -- initialized // This CAS checks if the state was 0 and if so changes it to state 1, // if that was successful, it initializes the pointer. if (atomic_compare_exchange_strong( reinterpret_cast(&fake_stack_), &old_val, 1UL, memory_order_relaxed)) { uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); stack_size_log = Min(stack_size_log, static_cast(flags()->max_uar_stack_size_log)); stack_size_log = Max(stack_size_log, static_cast(flags()->min_uar_stack_size_log)); fake_stack_ = FakeStack::Create(stack_size_log); SetTLSFakeStack(fake_stack_); return fake_stack_; } return nullptr; } void AsanThread::Init() { next_stack_top_ = next_stack_bottom_ = 0; atomic_store(&stack_switching_, false, memory_order_release); fake_stack_ = nullptr; // Will be initialized lazily if needed. CHECK_EQ(this->stack_size(), 0U); SetThreadStackAndTls(); CHECK_GT(this->stack_size(), 0U); CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_top_ - 1)); ClearShadowForThreadStackAndTLS(); int local = 0; VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, &local); } thread_return_t AsanThread::ThreadStart( tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { Init(); asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false, nullptr); if (signal_thread_is_registered) atomic_store(signal_thread_is_registered, 1, memory_order_release); if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); if (!start_routine_) { // start_routine_ == 0 if we're on the main thread or on one of the // OS X libdispatch worker threads. But nobody is supposed to call // ThreadStart() for the worker threads. CHECK_EQ(tid(), 0); return 0; } thread_return_t res = start_routine_(arg_); // On POSIX systems we defer this to the TSD destructor. LSan will consider // the thread's memory as non-live from the moment we call Destroy(), even // though that memory might contain pointers to heap objects which will be // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before // the TSD destructors have run might cause false positives in LSan. if (!SANITIZER_POSIX) this->Destroy(); return res; } void AsanThread::SetThreadStackAndTls() { uptr tls_size = 0; uptr stack_size = 0; GetThreadStackAndTls(tid() == 0, const_cast(&stack_bottom_), const_cast(&stack_size), &tls_begin_, &tls_size); stack_top_ = stack_bottom_ + stack_size; tls_end_ = tls_begin_ + tls_size; dtls_ = DTLS_Get(); int local; CHECK(AddrIsInStack((uptr)&local)); } void AsanThread::ClearShadowForThreadStackAndTLS() { PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); if (tls_begin_ != tls_end_) PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0); } bool AsanThread::GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access) { uptr bottom = 0; if (AddrIsInStack(addr)) { bottom = stack_bottom(); } else if (has_fake_stack()) { bottom = fake_stack()->AddrIsInFakeStack(addr); CHECK(bottom); access->offset = addr - bottom; access->frame_pc = ((uptr*)bottom)[2]; access->frame_descr = (const char *)((uptr*)bottom)[1]; return true; } uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr. uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY); u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); u8 *shadow_bottom = (u8*)MemToShadow(bottom); while (shadow_ptr >= shadow_bottom && *shadow_ptr != kAsanStackLeftRedzoneMagic) { shadow_ptr--; mem_ptr -= SHADOW_GRANULARITY; } while (shadow_ptr >= shadow_bottom && *shadow_ptr == kAsanStackLeftRedzoneMagic) { shadow_ptr--; mem_ptr -= SHADOW_GRANULARITY; } if (shadow_ptr < shadow_bottom) { return false; } uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY); CHECK(ptr[0] == kCurrentStackFrameMagic); access->offset = addr - (uptr)ptr; access->frame_pc = ptr[2]; access->frame_descr = (const char*)ptr[1]; return true; } bool AsanThread::AddrIsInStack(uptr addr) { const auto bounds = GetStackBounds(); return addr >= bounds.bottom && addr < bounds.top; } static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, void *addr) { AsanThreadContext *tctx = static_cast(tctx_base); AsanThread *t = tctx->thread; if (!t) return false; if (t->AddrIsInStack((uptr)addr)) return true; if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr)) return true; return false; } AsanThread *GetCurrentThread() { AsanThreadContext *context = reinterpret_cast(AsanTSDGet()); if (!context) { if (SANITIZER_ANDROID) { // On Android, libc constructor is called _after_ asan_init, and cleans up // TSD. Try to figure out if this is still the main thread by the stack // address. We are not entirely sure that we have correct main thread // limits, so only do this magic on Android, and only if the found thread // is the main thread. AsanThreadContext *tctx = GetThreadContextByTidLocked(0); if (tctx && ThreadStackContainsAddress(tctx, &context)) { SetCurrentThread(tctx->thread); return tctx->thread; } } return nullptr; } return context->thread; } void SetCurrentThread(AsanThread *t) { CHECK(t->context()); VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), (void *)GetThreadSelf()); // Make sure we do not reset the current AsanThread. CHECK_EQ(0, AsanTSDGet()); AsanTSDSet(t->context()); CHECK_EQ(t->context(), AsanTSDGet()); } u32 GetCurrentTidOrInvalid() { AsanThread *t = GetCurrentThread(); return t ? t->tid() : kInvalidTid; } AsanThread *FindThreadByStackAddress(uptr addr) { asanThreadRegistry().CheckLocked(); AsanThreadContext *tctx = static_cast( asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, (void *)addr)); return tctx ? tctx->thread : nullptr; } void EnsureMainThreadIDIsCorrect() { AsanThreadContext *context = reinterpret_cast(AsanTSDGet()); if (context && (context->tid == 0)) context->os_id = GetTid(); } __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); if (!context) return nullptr; return context->thread; } } // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); if (!t) return false; *stack_begin = t->stack_bottom(); *stack_end = t->stack_top(); *tls_begin = t->tls_begin(); *tls_end = t->tls_end(); // ASan doesn't keep allocator caches in TLS, so these are unused. *cache_begin = 0; *cache_end = 0; *dtls = t->dtls(); return true; } void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); if (t && t->has_fake_stack()) t->fake_stack()->ForEachFakeFrame(callback, arg); } void LockThreadRegistry() { __asan::asanThreadRegistry().Lock(); } void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); } void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } } // namespace __lsan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, uptr size) { AsanThread *t = GetCurrentThread(); if (!t) { VReport(1, "__asan_start_switch_fiber called from unknown thread\n"); return; } t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_finish_switch_fiber(void* fakestack, const void **bottom_old, uptr *size_old) { AsanThread *t = GetCurrentThread(); if (!t) { VReport(1, "__asan_finish_switch_fiber called from unknown thread\n"); return; } t->FinishSwitchFiber((FakeStack*)fakestack, (uptr*)bottom_old, (uptr*)size_old); } } Index: vendor/compiler-rt/dist/lib/asan/asan_win.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_win.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/asan_win.cc (revision 320961) @@ -1,341 +1,353 @@ //===-- asan_win.cc -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Windows-specific details. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_WINDOWS #define WIN32_LEAN_AND_MEAN #include #include #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_thread.h" #include "asan_mapping.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_win.h" #include "sanitizer_common/sanitizer_win_defs.h" using namespace __asan; // NOLINT extern "C" { SANITIZER_INTERFACE_ATTRIBUTE int __asan_should_detect_stack_use_after_return() { __asan_init(); return __asan_option_detect_stack_use_after_return; } SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_shadow_memory_dynamic_address() { __asan_init(); return __asan_shadow_memory_dynamic_address; } } // extern "C" // ---------------------- Windows-specific interceptors ---------------- {{{ static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler; extern "C" SANITIZER_INTERFACE_ATTRIBUTE long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) { EXCEPTION_RECORD *exception_record = info->ExceptionRecord; CONTEXT *context = info->ContextRecord; // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. SignalContext sig = SignalContext::Create(exception_record, context); ReportDeadlySignal(exception_record->ExceptionCode, sig); UNREACHABLE("returned from reporting deadly signal"); } // Wrapper SEH Handler. If the exception should be handled by asan, we call // __asan_unhandled_exception_filter, otherwise, we execute the user provided // exception handler or the default. static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { DWORD exception_code = info->ExceptionRecord->ExceptionCode; if (__sanitizer::IsHandledDeadlyException(exception_code)) return __asan_unhandled_exception_filter(info); if (user_seh_handler) return user_seh_handler(info); // Bubble out to the default exception filter. if (default_seh_handler) return default_seh_handler(info); return EXCEPTION_CONTINUE_SEARCH; } INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter, LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) { CHECK(REAL(SetUnhandledExceptionFilter)); if (ExceptionFilter == &SEHHandler) return REAL(SetUnhandledExceptionFilter)(ExceptionFilter); // We record the user provided exception handler to be called for all the // exceptions unhandled by asan. Swap(ExceptionFilter, user_seh_handler); return ExceptionFilter; } INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) { CHECK(REAL(RtlRaiseException)); // This is a noreturn function, unless it's one of the exceptions raised to // communicate with the debugger, such as the one from OutputDebugString. if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C) __asan_handle_no_return(); REAL(RtlRaiseException)(ExceptionRecord); } INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { CHECK(REAL(RaiseException)); __asan_handle_no_return(); REAL(RaiseException)(a, b, c, d); } #ifdef _WIN64 INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT CHECK(REAL(__C_specific_handler)); __asan_handle_no_return(); return REAL(__C_specific_handler)(a, b, c, d); } #else INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { CHECK(REAL(_except_handler3)); __asan_handle_no_return(); return REAL(_except_handler3)(a, b, c, d); } #if ASAN_DYNAMIC // This handler is named differently in -MT and -MD CRTs. #define _except_handler4 _except_handler4_common #endif INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { CHECK(REAL(_except_handler4)); __asan_handle_no_return(); return REAL(_except_handler4)(a, b, c, d); } #endif static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { AsanThread *t = (AsanThread*)arg; SetCurrentThread(t); return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); } INTERCEPTOR_WINAPI(DWORD, CreateThread, void* security, uptr stack_size, DWORD (__stdcall *start_routine)(void*), void* arg, DWORD thr_flags, void* tid) { // Strict init-order checking is thread-hostile. if (flags()->strict_init_order) StopInitOrderChecking(); GET_STACK_TRACE_THREAD; // FIXME: The CreateThread interceptor is not the same as a pthread_create // one. This is a bandaid fix for PR22025. bool detached = false; // FIXME: how can we determine it on Windows? u32 current_tid = GetCurrentTidOrInvalid(); AsanThread *t = AsanThread::Create(start_routine, arg, current_tid, &stack, detached); return REAL(CreateThread)(security, stack_size, asan_thread_start, t, thr_flags, tid); } // }}} namespace __asan { void InitializePlatformInterceptors() { ASAN_INTERCEPT_FUNC(CreateThread); ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); #ifdef _WIN64 ASAN_INTERCEPT_FUNC(__C_specific_handler); #else ASAN_INTERCEPT_FUNC(_except_handler3); ASAN_INTERCEPT_FUNC(_except_handler4); #endif // Try to intercept kernel32!RaiseException, and if that fails, intercept // ntdll!RtlRaiseException instead. if (!::__interception::OverrideFunction("RaiseException", (uptr)WRAP(RaiseException), (uptr *)&REAL(RaiseException))) { CHECK(::__interception::OverrideFunction("RtlRaiseException", (uptr)WRAP(RtlRaiseException), (uptr *)&REAL(RtlRaiseException))); } } void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { UNIMPLEMENTED(); } // ---------------------- TSD ---------------- {{{ static bool tsd_key_inited = false; static __declspec(thread) void *fake_tsd = 0; void AsanTSDInit(void (*destructor)(void *tsd)) { // FIXME: we're ignoring the destructor for now. tsd_key_inited = true; } void *AsanTSDGet() { CHECK(tsd_key_inited); return fake_tsd; } void AsanTSDSet(void *tsd) { CHECK(tsd_key_inited); fake_tsd = tsd; } void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } // }}} // ---------------------- Various stuff ---------------- {{{ void *AsanDoesNotSupportStaticLinkage() { #if defined(_DEBUG) #error Please build the runtime with a non-debug CRT: /MD or /MT #endif return 0; } +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + uptr shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + void AsanCheckDynamicRTPrereqs() {} void AsanCheckIncompatibleRT() {} void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } #if SANITIZER_WINDOWS64 // Exception handler for dealing with shadow memory. static LONG CALLBACK ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { uptr page_size = GetPageSizeCached(); // Only handle access violations. if (exception_pointers->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) { return EXCEPTION_CONTINUE_SEARCH; } // Only handle access violations that land within the shadow memory. uptr addr = (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]); // Check valid shadow range. if (!AddrIsInShadow(addr)) return EXCEPTION_CONTINUE_SEARCH; // This is an access violation while trying to read from the shadow. Commit // the relevant page and let execution continue. // Determine the address of the page that is being accessed. uptr page = RoundDownTo(addr, page_size); // Query the existing page. MEMORY_BASIC_INFORMATION mem_info = {}; if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0) return EXCEPTION_CONTINUE_SEARCH; // Commit the page. uptr result = (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); if (result != page) return EXCEPTION_CONTINUE_SEARCH; // The page mapping succeeded, so continue execution as usual. return EXCEPTION_CONTINUE_EXECUTION; } #endif void InitializePlatformExceptionHandlers() { #if SANITIZER_WINDOWS64 // On Win64, we map memory on demand with access violation handler. // Install our exception handler. CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler)); #endif } bool IsSystemHeapAddress(uptr addr) { return ::HeapValidate(GetProcessHeap(), 0, (void*)addr) != FALSE; } // We want to install our own exception handler (EH) to print helpful reports // on access violations and whatnot. Unfortunately, the CRT initializers assume // they are run before any user code and drop any previously-installed EHs on // the floor, so we can't install our handler inside __asan_init. // (See crt0dat.c in the CRT sources for the details) // // Things get even more complicated with the dynamic runtime, as it finishes its // initialization before the .exe module CRT begins to initialize. // // For the static runtime (-MT), it's enough to put a callback to // __asan_set_seh_filter in the last section for C initializers. // // For the dynamic runtime (-MD), we want link the same // asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter // will be called for each instrumented module. This ensures that at least one // __asan_set_seh_filter call happens after the .exe module CRT is initialized. extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __asan_set_seh_filter() { // We should only store the previous handler if it's not our own handler in // order to avoid loops in the EH chain. auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler); if (prev_seh_handler != &SEHHandler) default_seh_handler = prev_seh_handler; return 0; } #if !ASAN_DYNAMIC // The CRT runs initializers in this order: // - C initializers, from XIA to XIZ // - C++ initializers, from XCA to XCZ // Prior to 2015, the CRT set the unhandled exception filter at priority XIY, // near the end of C initialization. Starting in 2015, it was moved to the // beginning of C++ initialization. We set our priority to XCAB to run // immediately after the CRT runs. This way, our exception filter is called // first and we can delegate to their filter if appropriate. #pragma section(".CRT$XCAB", long, read) // NOLINT __declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() = __asan_set_seh_filter; // Piggyback on the TLS initialization callback directory to initialize asan as // early as possible. Initializers in .CRT$XL* are called directly by ntdll, // which run before the CRT. Users also add code to .CRT$XLC, so it's important // to run our initializers first. static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) { if (reason == DLL_PROCESS_ATTACH) __asan_init(); } #pragma section(".CRT$XLAB", long, read) // NOLINT __declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *, unsigned long, void *) = asan_thread_init; #endif WIN_FORCE_LINK(__asan_dso_reg_hook) // }}} } // namespace __asan #endif // SANITIZER_WINDOWS Index: vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup =================================================================== --- vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup (revision 320960) +++ vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup (revision 320961) @@ -1,443 +1,443 @@ #!/bin/bash #===- lib/asan/scripts/asan_device_setup -----------------------------------===# # # The LLVM Compiler Infrastructure # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # # Prepare Android device to run ASan applications. # #===------------------------------------------------------------------------===# set -e HERE="$(cd "$(dirname "$0")" && pwd)" revert=no extra_options= device= lib= use_su=0 function usage { echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]" echo " --revert: Uninstall ASan from the device." echo " --lib: Path to ASan runtime library." echo " --extra-options: Extra ASAN_OPTIONS." echo " --device: Install to the given device. Use 'adb devices' to find" echo " device-id." echo " --use-su: Use 'su -c' prefix for every adb command instead of using" echo " 'adb root' once." echo exit 1 } function adb_push { if [ $use_su -eq 0 ]; then $ADB push "$1" "$2" else local FILENAME=$(basename $1) $ADB push "$1" "/data/local/tmp/$FILENAME" $ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null $ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\"" $ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\"" fi } function adb_remount { if [ $use_su -eq 0 ]; then $ADB remount else local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1` if [ "$STORAGE" != "" ]; then echo Remounting $STORAGE at /system - $ADB shell su -c "mount -o remount,rw $STORAGE /system" + $ADB shell su -c "mount -o rw,remount $STORAGE /system" else echo Failed to get storage device name for "/system" mount point fi fi } function adb_shell { if [ $use_su -eq 0 ]; then $ADB shell $@ else $ADB shell su -c "$*" fi } function adb_root { if [ $use_su -eq 0 ]; then $ADB root fi } function adb_wait_for_device { $ADB wait-for-device } function adb_pull { if [ $use_su -eq 0 ]; then $ADB pull "$1" "$2" else local FILENAME=$(basename $1) $ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null $ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" && $ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\"" fi } function get_device_arch { # OUT OUT64 local _outvar=$1 local _outvar64=$2 local _ABI=$(adb_shell getprop ro.product.cpu.abi) local _ARCH= local _ARCH64= if [[ $_ABI == x86* ]]; then _ARCH=i686 elif [[ $_ABI == armeabi* ]]; then _ARCH=arm elif [[ $_ABI == arm64-v8a* ]]; then _ARCH=arm _ARCH64=aarch64 else echo "Unrecognized device ABI: $_ABI" exit 1 fi eval $_outvar=\$_ARCH eval $_outvar64=\$_ARCH64 } while [[ $# > 0 ]]; do case $1 in --revert) revert=yes ;; --extra-options) shift if [[ $# == 0 ]]; then echo "--extra-options requires an argument." exit 1 fi extra_options="$1" ;; --lib) shift if [[ $# == 0 ]]; then echo "--lib requires an argument." exit 1 fi lib="$1" ;; --device) shift if [[ $# == 0 ]]; then echo "--device requires an argument." exit 1 fi device="$1" ;; --use-su) use_su=1 ;; *) usage ;; esac shift done ADB=${ADB:-adb} if [[ x$device != x ]]; then ADB="$ADB -s $device" fi if [ $use_su -eq 1 ]; then # Test if 'su' is present on the device SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'` if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then echo "ERROR: Cannot use 'su -c':" echo "$ adb shell su -c \"echo foo\"" echo $SU_TEST_OUT echo "Check that 'su' binary is correctly installed on the device or omit" echo " --use-su flag" exit 1 fi fi echo '>> Remounting /system rw' adb_wait_for_device adb_root adb_wait_for_device adb_remount adb_wait_for_device get_device_arch ARCH ARCH64 echo "Target architecture: $ARCH" ASAN_RT="libclang_rt.asan-$ARCH-android.so" if [[ -n $ARCH64 ]]; then echo "Target architecture: $ARCH64" ASAN_RT64="libclang_rt.asan-$ARCH64-android.so" fi if [[ x$revert == xyes ]]; then echo '>> Uninstalling ASan' if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then echo '>> Pre-L device detected.' adb_shell mv /system/bin/app_process.real /system/bin/app_process adb_shell rm /system/bin/asanwrapper elif ! adb_shell ls -l /system/bin/app_process64.real | grep -o 'No such file or directory' >&/dev/null; then # 64-bit installation. adb_shell mv /system/bin/app_process32.real /system/bin/app_process32 adb_shell mv /system/bin/app_process64.real /system/bin/app_process64 adb_shell rm /system/bin/asanwrapper adb_shell rm /system/bin/asanwrapper64 else # 32-bit installation. adb_shell rm /system/bin/app_process.wrap adb_shell rm /system/bin/asanwrapper adb_shell rm /system/bin/app_process adb_shell ln -s /system/bin/app_process32 /system/bin/app_process fi echo '>> Restarting shell' adb_shell stop adb_shell start # Remove the library on the last step to give a chance to the 'su' binary to # be executed without problem. adb_shell rm /system/lib/$ASAN_RT echo '>> Done' exit 0 fi if [[ -d "$lib" ]]; then ASAN_RT_PATH="$lib" elif [[ -f "$lib" && "$lib" == *"$ASAN_RT" ]]; then ASAN_RT_PATH=$(dirname "$lib") elif [[ -f "$HERE/$ASAN_RT" ]]; then ASAN_RT_PATH="$HERE" elif [[ $(basename "$HERE") == "bin" ]]; then # We could be in the toolchain's base directory. # Consider ../lib, ../lib/asan, ../lib/linux, # ../lib/clang/$VERSION/lib/linux, and ../lib64/clang/$VERSION/lib/linux. P=$(ls "$HERE"/../lib/"$ASAN_RT" \ "$HERE"/../lib/asan/"$ASAN_RT" \ "$HERE"/../lib/linux/"$ASAN_RT" \ "$HERE"/../lib/clang/*/lib/linux/"$ASAN_RT" \ "$HERE"/../lib64/clang/*/lib/linux/"$ASAN_RT" 2>/dev/null | sort | tail -1) if [[ -n "$P" ]]; then ASAN_RT_PATH="$(dirname "$P")" fi fi if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT" ]]; then echo ">> ASan runtime library not found" exit 1 fi if [[ -n "$ASAN_RT64" ]]; then if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT64" ]]; then echo ">> ASan runtime library not found" exit 1 fi fi TMPDIRBASE=$(mktemp -d) TMPDIROLD="$TMPDIRBASE/old" TMPDIR="$TMPDIRBASE/new" mkdir "$TMPDIROLD" RELEASE=$(adb_shell getprop ro.build.version.release) PRE_L=0 if echo "$RELEASE" | grep '^4\.' >&/dev/null; then PRE_L=1 fi if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then echo '>> Old-style ASan installation detected. Reverting.' adb_shell mv /system/bin/app_process.real /system/bin/app_process fi echo '>> Pre-L device detected. Setting up app_process symlink.' adb_shell mv /system/bin/app_process /system/bin/app_process32 adb_shell ln -s /system/bin/app_process32 /system/bin/app_process fi echo '>> Copying files from the device' if [[ -n "$ASAN_RT64" ]]; then adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true adb_pull /system/lib64/"$ASAN_RT64" "$TMPDIROLD" || true adb_pull /system/bin/app_process32 "$TMPDIROLD" || true adb_pull /system/bin/app_process32.real "$TMPDIROLD" || true adb_pull /system/bin/app_process64 "$TMPDIROLD" || true adb_pull /system/bin/app_process64.real "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper64 "$TMPDIROLD" || true else adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true adb_pull /system/bin/app_process32 "$TMPDIROLD" || true adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true fi cp -r "$TMPDIROLD" "$TMPDIR" if [[ -f "$TMPDIR/app_process.wrap" || -f "$TMPDIR/app_process64.real" ]]; then echo ">> Previous installation detected" else echo ">> New installation" fi echo '>> Generating wrappers' cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/" if [[ -n "$ASAN_RT64" ]]; then cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/" fi ASAN_OPTIONS=start_deactivated=1,malloc_context_size=0 # The name of a symlink to libclang_rt.asan-$ARCH-android.so used in LD_PRELOAD. # The idea is to have the same name in lib and lib64 to keep it from falling # apart when a 64-bit process spawns a 32-bit one, inheriting the environment. ASAN_RT_SYMLINK=symlink-to-libclang_rt.asan function generate_zygote_wrapper { # from, to local _from=$1 local _to=$2 if [[ PRE_L -eq 0 ]]; then # LD_PRELOAD parsing is broken in N if it starts with ":". Luckily, it is # unset in the system environment since L. local _ld_preload=$ASAN_RT_SYMLINK else local _ld_preload=\$LD_PRELOAD:$ASAN_RT_SYMLINK fi cat <"$TMPDIR/$_from" #!/system/bin/sh-from-zygote ASAN_OPTIONS=$ASAN_OPTIONS \\ ASAN_ACTIVATION_OPTIONS=include_if_exists=/data/local/tmp/asan.options.%b \\ LD_PRELOAD=$_ld_preload \\ exec $_to \$@ EOF } if [[ x$extra_options != x ]] ; then ASAN_OPTIONS="$ASAN_OPTIONS,$extra_options" fi # Zygote wrapper. if [[ -f "$TMPDIR/app_process64" ]]; then # A 64-bit device. if [[ ! -f "$TMPDIR/app_process64.real" ]]; then # New installation. mv "$TMPDIR/app_process32" "$TMPDIR/app_process32.real" mv "$TMPDIR/app_process64" "$TMPDIR/app_process64.real" fi generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real" generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real" else # A 32-bit device. generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32" fi # General command-line tool wrapper (use for anything that's not started as # zygote). cat <"$TMPDIR/asanwrapper" #!/system/bin/sh LD_PRELOAD=$ASAN_RT_SYMLINK \\ exec \$@ EOF if [[ -n "$ASAN_RT64" ]]; then cat <"$TMPDIR/asanwrapper64" #!/system/bin/sh LD_PRELOAD=$ASAN_RT_SYMLINK \\ exec \$@ EOF fi function install { # from, to, chmod, chcon local _from=$1 local _to=$2 local _mode=$3 local _context=$4 local _basename="$(basename "$_from")" echo "Installing $_to/$_basename $_mode $_context" adb_push "$_from" "$_to/$_basename" adb_shell chown root.shell "$_to/$_basename" if [[ -n "$_mode" ]]; then adb_shell chmod "$_mode" "$_to/$_basename" fi if [[ -n "$_context" ]]; then adb_shell chcon "$_context" "$_to/$_basename" fi } if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then # Make SELinux happy by keeping app_process wrapper and the shell # it runs on in zygote domain. ENFORCING=0 if adb_shell getenforce | grep Enforcing >/dev/null; then # Sometimes shell is not allowed to change file contexts. # Temporarily switch to permissive. ENFORCING=1 adb_shell setenforce 0 fi if [[ PRE_L -eq 1 ]]; then CTX=u:object_r:system_file:s0 else CTX=u:object_r:zygote_exec:s0 fi echo '>> Pushing files to the device' if [[ -n "$ASAN_RT64" ]]; then install "$TMPDIR/$ASAN_RT" /system/lib 644 install "$TMPDIR/$ASAN_RT64" /system/lib64 644 install "$TMPDIR/app_process32" /system/bin 755 $CTX install "$TMPDIR/app_process32.real" /system/bin 755 $CTX install "$TMPDIR/app_process64" /system/bin 755 $CTX install "$TMPDIR/app_process64.real" /system/bin 755 $CTX install "$TMPDIR/asanwrapper" /system/bin 755 install "$TMPDIR/asanwrapper64" /system/bin 755 adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK adb_shell ln -sf $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK else install "$TMPDIR/$ASAN_RT" /system/lib 644 install "$TMPDIR/app_process32" /system/bin 755 $CTX install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX install "$TMPDIR/asanwrapper" /system/bin 755 $CTX adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK adb_shell rm /system/bin/app_process adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process fi adb_shell cp /system/bin/sh /system/bin/sh-from-zygote adb_shell chcon $CTX /system/bin/sh-from-zygote if [ $ENFORCING == 1 ]; then adb_shell setenforce 1 fi echo '>> Restarting shell (asynchronous)' adb_shell stop adb_shell start echo '>> Please wait until the device restarts' else echo '>> Device is up to date' fi rm -r "$TMPDIRBASE" Index: vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt (revision 320961) @@ -1,523 +1,528 @@ # This directory contains a large amount of C code which provides # generic implementations of the core runtime library along with optimized # architecture-specific code in various subdirectories. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) cmake_minimum_required(VERSION 3.4.3) project(CompilerRTBuiltins C ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) set(COMPILER_RT_BUILTINS_STANDALONE_BUILD TRUE) list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_SOURCE_DIR}/../../cmake" "${CMAKE_SOURCE_DIR}/../../cmake/Modules") include(base-config-ix) include(CompilerRTUtils) load_llvm_config() construct_compiler_rt_default_triple() if(APPLE) include(CompilerRTDarwinUtils) endif() include(AddCompilerRT) endif() include(builtin-config-ix) # TODO: Need to add a mechanism for logging errors when builtin source files are # added to a sub-directory and not this CMakeLists file. set(GENERIC_SOURCES absvdi2.c absvsi2.c absvti2.c adddf3.c addsf3.c addtf3.c addvdi3.c addvsi3.c addvti3.c apple_versioning.c ashldi3.c ashlti3.c ashrdi3.c ashrti3.c bswapdi2.c bswapsi2.c - clear_cache.c clzdi2.c clzsi2.c clzti2.c cmpdi2.c cmpti2.c comparedf2.c comparesf2.c cpu_model.c ctzdi2.c ctzsi2.c ctzti2.c divdc3.c divdf3.c divdi3.c divmoddi4.c divmodsi4.c divsc3.c divsf3.c divsi3.c divtc3.c divti3.c divtf3.c divxc3.c - eprintf.c extendsfdf2.c extendhfsf2.c ffsdi2.c ffssi2.c ffsti2.c fixdfdi.c fixdfsi.c fixdfti.c fixsfdi.c fixsfsi.c fixsfti.c fixunsdfdi.c fixunsdfsi.c fixunsdfti.c fixunssfdi.c fixunssfsi.c fixunssfti.c fixunsxfdi.c fixunsxfsi.c fixunsxfti.c fixxfdi.c fixxfti.c floatdidf.c floatdisf.c floatdixf.c floatsidf.c floatsisf.c floattidf.c floattisf.c floattixf.c floatundidf.c floatundisf.c floatundixf.c floatunsidf.c floatunsisf.c floatuntidf.c floatuntisf.c floatuntixf.c int_util.c lshrdi3.c lshrti3.c moddi3.c modsi3.c modti3.c muldc3.c muldf3.c muldi3.c mulodi4.c mulosi4.c muloti4.c mulsc3.c mulsf3.c multi3.c multf3.c mulvdi3.c mulvsi3.c mulvti3.c mulxc3.c negdf2.c negdi2.c negsf2.c negti2.c negvdi2.c negvsi2.c negvti2.c os_version_check.c paritydi2.c paritysi2.c parityti2.c popcountdi2.c popcountsi2.c popcountti2.c powidf2.c powisf2.c powitf2.c powixf2.c subdf3.c subsf3.c subvdi3.c subvsi3.c subvti3.c subtf3.c trampoline_setup.c truncdfhf2.c truncdfsf2.c truncsfhf2.c ucmpdi2.c ucmpti2.c udivdi3.c udivmoddi4.c udivmodsi4.c udivmodti4.c udivsi3.c udivti3.c umoddi3.c umodsi3.c umodti3.c) set(GENERIC_TF_SOURCES comparetf2.c extenddftf2.c extendsftf2.c fixtfdi.c fixtfsi.c fixtfti.c fixunstfdi.c fixunstfsi.c fixunstfti.c floatditf.c floatsitf.c floattitf.c floatunditf.c floatunsitf.c floatuntitf.c multc3.c trunctfdf2.c trunctfsf2.c) option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN "Skip the atomic builtin (this may be needed if system headers are unavailable)" Off) -if(NOT COMPILER_RT_BAREMETAL_BUILD) +if(NOT FUCHSIA AND NOT COMPILER_RT_BAREMETAL_BUILD) set(GENERIC_SOURCES ${GENERIC_SOURCES} emutls.c - enable_execute_stack.c) + enable_execute_stack.c + eprintf.c) endif() if(COMPILER_RT_HAS_ATOMIC_KEYWORD AND NOT COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN) set(GENERIC_SOURCES ${GENERIC_SOURCES} atomic.c) endif() if(APPLE) set(GENERIC_SOURCES ${GENERIC_SOURCES} atomic_flag_clear.c atomic_flag_clear_explicit.c atomic_flag_test_and_set.c atomic_flag_test_and_set_explicit.c atomic_signal_fence.c atomic_thread_fence.c) endif() if (HAVE_UNWIND_H) set(GENERIC_SOURCES ${GENERIC_SOURCES} gcc_personality_v0.c) endif () + +if (NOT FUCHSIA) + set(GENERIC_SOURCES + ${GENERIC_SOURCES} + clear_cache.c) +endif() if (NOT MSVC) set(x86_64_SOURCES x86_64/chkstk.S x86_64/chkstk2.S x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c x86_64/floatundidf.S x86_64/floatundisf.S x86_64/floatundixf.S ${GENERIC_SOURCES}) set(x86_64h_SOURCES ${x86_64_SOURCES}) if (WIN32) set(x86_64_SOURCES ${x86_64_SOURCES} x86_64/chkstk.S x86_64/chkstk2.S) endif() set(i386_SOURCES i386/ashldi3.S i386/ashrdi3.S i386/chkstk.S i386/chkstk2.S i386/divdi3.S i386/floatdidf.S i386/floatdisf.S i386/floatdixf.S i386/floatundidf.S i386/floatundisf.S i386/floatundixf.S i386/lshrdi3.S i386/moddi3.S i386/muldi3.S i386/udivdi3.S i386/umoddi3.S ${GENERIC_SOURCES}) if (WIN32) set(i386_SOURCES ${i386_SOURCES} i386/chkstk.S i386/chkstk2.S) endif() set(i686_SOURCES ${i386_SOURCES}) else () # MSVC # Use C versions of functions when building on MSVC # MSVC's assembler takes Intel syntax, not AT&T syntax. # Also use only MSVC compilable builtin implementations. set(x86_64_SOURCES x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c ${GENERIC_SOURCES}) set(x86_64h_SOURCES ${x86_64_SOURCES}) set(i386_SOURCES ${GENERIC_SOURCES}) set(i686_SOURCES ${i386_SOURCES}) endif () # if (NOT MSVC) set(arm_SOURCES arm/bswapdi2.S arm/bswapsi2.S arm/clzdi2.S arm/clzsi2.S arm/comparesf2.S arm/divmodsi4.S arm/divsi3.S arm/modsi3.S arm/sync_fetch_and_add_4.S arm/sync_fetch_and_add_8.S arm/sync_fetch_and_and_4.S arm/sync_fetch_and_and_8.S arm/sync_fetch_and_max_4.S arm/sync_fetch_and_max_8.S arm/sync_fetch_and_min_4.S arm/sync_fetch_and_min_8.S arm/sync_fetch_and_nand_4.S arm/sync_fetch_and_nand_8.S arm/sync_fetch_and_or_4.S arm/sync_fetch_and_or_8.S arm/sync_fetch_and_sub_4.S arm/sync_fetch_and_sub_8.S arm/sync_fetch_and_umax_4.S arm/sync_fetch_and_umax_8.S arm/sync_fetch_and_umin_4.S arm/sync_fetch_and_umin_8.S arm/sync_fetch_and_xor_4.S arm/sync_fetch_and_xor_8.S arm/udivmodsi4.S arm/udivsi3.S arm/umodsi3.S ${GENERIC_SOURCES}) set(thumb1_SOURCES arm/divsi3.S arm/udivsi3.S arm/comparesf2.S arm/addsf3.S ${GENERIC_SOURCES}) set(arm_EABI_SOURCES arm/aeabi_cdcmp.S arm/aeabi_cdcmpeq_check_nan.c arm/aeabi_cfcmp.S arm/aeabi_cfcmpeq_check_nan.c arm/aeabi_dcmp.S arm/aeabi_div0.c arm/aeabi_drsub.c arm/aeabi_fcmp.S arm/aeabi_frsub.c arm/aeabi_idivmod.S arm/aeabi_ldivmod.S arm/aeabi_memcmp.S arm/aeabi_memcpy.S arm/aeabi_memmove.S arm/aeabi_memset.S arm/aeabi_uidivmod.S arm/aeabi_uldivmod.S) set(arm_Thumb1_JT_SOURCES arm/switch16.S arm/switch32.S arm/switch8.S arm/switchu8.S) set(arm_Thumb1_SjLj_EH_SOURCES arm/restore_vfp_d8_d15_regs.S arm/save_vfp_d8_d15_regs.S) set(arm_Thumb1_VFPv2_SOURCES arm/adddf3vfp.S arm/addsf3vfp.S arm/divdf3vfp.S arm/divsf3vfp.S arm/eqdf2vfp.S arm/eqsf2vfp.S arm/extendsfdf2vfp.S arm/fixdfsivfp.S arm/fixsfsivfp.S arm/fixunsdfsivfp.S arm/fixunssfsivfp.S arm/floatsidfvfp.S arm/floatsisfvfp.S arm/floatunssidfvfp.S arm/floatunssisfvfp.S arm/gedf2vfp.S arm/gesf2vfp.S arm/gtdf2vfp.S arm/gtsf2vfp.S arm/ledf2vfp.S arm/lesf2vfp.S arm/ltdf2vfp.S arm/ltsf2vfp.S arm/muldf3vfp.S arm/mulsf3vfp.S arm/nedf2vfp.S arm/negdf2vfp.S arm/negsf2vfp.S arm/nesf2vfp.S arm/subdf3vfp.S arm/subsf3vfp.S arm/truncdfsf2vfp.S arm/unorddf2vfp.S arm/unordsf2vfp.S) set(arm_Thumb1_icache_SOURCES arm/sync_synchronize.S) set(arm_Thumb1_SOURCES ${arm_Thumb1_JT_SOURCES} ${arm_Thumb1_SjLj_EH_SOURCES} ${arm_Thumb1_VFPv2_SOURCES} ${arm_Thumb1_icache_SOURCES}) if(MINGW) set(arm_SOURCES arm/aeabi_idivmod.S arm/aeabi_ldivmod.S arm/aeabi_uidivmod.S arm/aeabi_uldivmod.S divmoddi4.c divmodsi4.c divdi3.c divsi3.c fixdfdi.c fixsfdi.c fixunsdfdi.c fixunssfdi.c floatdidf.c floatdisf.c floatundidf.c floatundisf.c mingw_fixfloat.c moddi3.c udivmoddi4.c udivmodsi4.c udivsi3.c umoddi3.c emutls.c) elseif(NOT WIN32) # TODO the EABI sources should only be added to EABI targets set(arm_SOURCES ${arm_SOURCES} ${arm_EABI_SOURCES} ${arm_Thumb1_SOURCES}) set(thumb1_SOURCES ${thumb1_SOURCES} ${arm_EABI_SOURCES}) endif() set(aarch64_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES}) set(armhf_SOURCES ${arm_SOURCES}) set(armv7_SOURCES ${arm_SOURCES}) set(armv7s_SOURCES ${arm_SOURCES}) set(armv7k_SOURCES ${arm_SOURCES}) set(arm64_SOURCES ${aarch64_SOURCES}) # macho_embedded archs set(armv6m_SOURCES ${thumb1_SOURCES}) set(armv7m_SOURCES ${arm_SOURCES}) set(armv7em_SOURCES ${arm_SOURCES}) set(mips_SOURCES ${GENERIC_SOURCES}) set(mipsel_SOURCES ${mips_SOURCES}) set(mips64_SOURCES ${GENERIC_TF_SOURCES} ${mips_SOURCES}) set(mips64el_SOURCES ${GENERIC_TF_SOURCES} ${mips_SOURCES}) set(wasm32_SOURCES ${GENERIC_SOURCES}) set(wasm64_SOURCES ${GENERIC_SOURCES}) add_custom_target(builtins) set_target_properties(builtins PROPERTIES FOLDER "Compiler-RT Misc") if (APPLE) add_subdirectory(Darwin-excludes) add_subdirectory(macho_embedded) darwin_add_builtin_libraries(${BUILTIN_SUPPORTED_OS}) else () set(BUILTIN_CFLAGS "") append_list_if(COMPILER_RT_HAS_STD_C11_FLAG -std=c11 BUILTIN_CFLAGS) # These flags would normally be added to CMAKE_C_FLAGS by the llvm # cmake step. Add them manually if this is a standalone build. if(COMPILER_RT_STANDALONE_BUILD) append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin BUILTIN_CFLAGS) append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG -fvisibility=hidden BUILTIN_CFLAGS) if(NOT COMPILER_RT_DEBUG) append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fomit-frame-pointer BUILTIN_CFLAGS) endif() endif() set(BUILTIN_DEFS "") append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG VISIBILITY_HIDDEN BUILTIN_DEFS) foreach (arch ${BUILTIN_SUPPORTED_ARCH}) if (CAN_TARGET_${arch}) # NOTE: some architectures (e.g. i386) have multiple names. Ensure that # we catch them all. set(_arch ${arch}) if("${arch}" STREQUAL "i686") set(_arch "i386|i686") endif() # Filter out generic versions of routines that are re-implemented in # architecture specific manner. This prevents multiple definitions of the # same symbols, making the symbol selection non-deterministic. foreach (_file ${${arch}_SOURCES}) if (${_file} MATCHES ${_arch}/*) get_filename_component(_name ${_file} NAME) string(REPLACE ".S" ".c" _cname "${_name}") list(REMOVE_ITEM ${arch}_SOURCES ${_cname}) endif () endforeach () # Needed for clear_cache on debug mode, due to r7's usage in inline asm. # Release mode already sets it via -O2/3, Debug mode doesn't. if (${arch} STREQUAL "armhf") list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer -DCOMPILER_RT_ARMHF_TARGET) endif() add_compiler_rt_runtime(clang_rt.builtins STATIC ARCHS ${arch} SOURCES ${${arch}_SOURCES} DEFS ${BUILTIN_DEFS} CFLAGS ${BUILTIN_CFLAGS} PARENT_TARGET builtins) endif () endforeach () endif () add_dependencies(compiler-rt builtins) Index: vendor/compiler-rt/dist/lib/builtins/cpu_model.c =================================================================== --- vendor/compiler-rt/dist/lib/builtins/cpu_model.c (revision 320960) +++ vendor/compiler-rt/dist/lib/builtins/cpu_model.c (revision 320961) @@ -1,804 +1,615 @@ //===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is based on LLVM's lib/Support/Host.cpp. // It implements the operating system Host concept and builtin // __cpu_model for the compiler_rt library, for x86 only. // //===----------------------------------------------------------------------===// #if (defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64__) || defined(_M_X64)) && \ (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)) #include #define bool int #define true 1 #define false 0 #ifdef _MSC_VER #include #endif #ifndef __has_attribute #define __has_attribute(attr) 0 #endif enum VendorSignatures { SIG_INTEL = 0x756e6547 /* Genu */, SIG_AMD = 0x68747541 /* Auth */ }; enum ProcessorVendors { VENDOR_INTEL = 1, VENDOR_AMD, VENDOR_OTHER, VENDOR_MAX }; enum ProcessorTypes { - INTEL_ATOM = 1, + INTEL_BONNELL = 1, INTEL_CORE2, INTEL_COREI7, AMDFAM10H, AMDFAM15H, - INTEL_i386, - INTEL_i486, - INTEL_PENTIUM, - INTEL_PENTIUM_PRO, - INTEL_PENTIUM_II, - INTEL_PENTIUM_III, - INTEL_PENTIUM_IV, - INTEL_PENTIUM_M, - INTEL_CORE_DUO, - INTEL_XEONPHI, - INTEL_X86_64, - INTEL_NOCONA, - INTEL_PRESCOTT, - AMD_i486, - AMDPENTIUM, - AMDATHLON, - AMDFAM14H, - AMDFAM16H, + INTEL_SILVERMONT, + INTEL_KNL, + AMD_BTVER1, + AMD_BTVER2, + AMDFAM17H, CPU_TYPE_MAX }; enum ProcessorSubtypes { INTEL_COREI7_NEHALEM = 1, INTEL_COREI7_WESTMERE, INTEL_COREI7_SANDYBRIDGE, AMDFAM10H_BARCELONA, AMDFAM10H_SHANGHAI, AMDFAM10H_ISTANBUL, AMDFAM15H_BDVER1, AMDFAM15H_BDVER2, - INTEL_PENTIUM_MMX, - INTEL_CORE2_65, - INTEL_CORE2_45, + AMDFAM15H_BDVER3, + AMDFAM15H_BDVER4, + AMDFAM17H_ZNVER1, INTEL_COREI7_IVYBRIDGE, INTEL_COREI7_HASWELL, INTEL_COREI7_BROADWELL, INTEL_COREI7_SKYLAKE, INTEL_COREI7_SKYLAKE_AVX512, - INTEL_ATOM_BONNELL, - INTEL_ATOM_SILVERMONT, - INTEL_KNIGHTS_LANDING, - AMDPENTIUM_K6, - AMDPENTIUM_K62, - AMDPENTIUM_K63, - AMDPENTIUM_GEODE, - AMDATHLON_TBIRD, - AMDATHLON_MP, - AMDATHLON_XP, - AMDATHLON_K8SSE3, - AMDATHLON_OPTERON, - AMDATHLON_FX, - AMDATHLON_64, - AMD_BTVER1, - AMD_BTVER2, - AMDFAM15H_BDVER3, - AMDFAM15H_BDVER4, CPU_SUBTYPE_MAX }; enum ProcessorFeatures { FEATURE_CMOV = 0, FEATURE_MMX, FEATURE_POPCNT, FEATURE_SSE, FEATURE_SSE2, FEATURE_SSE3, FEATURE_SSSE3, FEATURE_SSE4_1, FEATURE_SSE4_2, FEATURE_AVX, FEATURE_AVX2, - FEATURE_AVX512, - FEATURE_AVX512SAVE, - FEATURE_MOVBE, - FEATURE_ADX, - FEATURE_EM64T + FEATURE_SSE4_A, + FEATURE_FMA4, + FEATURE_XOP, + FEATURE_FMA, + FEATURE_AVX512F, + FEATURE_BMI, + FEATURE_BMI2, + FEATURE_AES, + FEATURE_PCLMUL, + FEATURE_AVX512VL, + FEATURE_AVX512BW, + FEATURE_AVX512DQ, + FEATURE_AVX512CD, + FEATURE_AVX512ER, + FEATURE_AVX512PF, + FEATURE_AVX512VBMI, + FEATURE_AVX512IFMA, + FEATURE_AVX5124VNNIW, + FEATURE_AVX5124FMAPS, + FEATURE_AVX512VPOPCNTDQ }; // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max). // Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID // support. Consequently, for i386, the presence of CPUID is checked first // via the corresponding eflags bit. static bool isCpuIdSupported() { #if defined(__GNUC__) || defined(__clang__) #if defined(__i386__) int __cpuid_supported; __asm__(" pushfl\n" " popl %%eax\n" " movl %%eax,%%ecx\n" " xorl $0x00200000,%%eax\n" " pushl %%eax\n" " popfl\n" " pushfl\n" " popl %%eax\n" " movl $0,%0\n" " cmpl %%eax,%%ecx\n" " je 1f\n" " movl $1,%0\n" "1:" : "=r"(__cpuid_supported) : : "eax", "ecx"); if (!__cpuid_supported) return false; #endif return true; #endif return true; } // This code is copied from lib/Support/Host.cpp. // Changes to either file should be mirrored in the other. /// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in /// the specified arguments. If we can't run cpuid on the host, return true. -static void getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX, +static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX, unsigned *rECX, unsigned *rEDX) { #if defined(__GNUC__) || defined(__clang__) #if defined(__x86_64__) - // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually. + // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually. + // FIXME: should we save this for Clang? __asm__("movq\t%%rbx, %%rsi\n\t" "cpuid\n\t" "xchgq\t%%rbx, %%rsi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value)); + return false; #elif defined(__i386__) __asm__("movl\t%%ebx, %%esi\n\t" "cpuid\n\t" "xchgl\t%%ebx, %%esi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value)); -// pedantic #else returns to appease -Wunreachable-code (so we don't generate -// postprocessed code that looks like "return true; return false;") + return false; #else - assert(0 && "This method is defined only for x86."); + return true; #endif #elif defined(_MSC_VER) // The MSVC intrinsic is portable across x86 and x64. int registers[4]; __cpuid(registers, value); *rEAX = registers[0]; *rEBX = registers[1]; *rECX = registers[2]; *rEDX = registers[3]; + return false; #else - assert(0 && "This method is defined only for GNUC, Clang or MSVC."); + return true; #endif } /// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return /// the 4 values in the specified arguments. If we can't run cpuid on the host, /// return true. -static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf, +static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX, unsigned *rEBX, unsigned *rECX, unsigned *rEDX) { #if defined(__x86_64__) || defined(_M_X64) #if defined(__GNUC__) || defined(__clang__) // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually. // FIXME: should we save this for Clang? __asm__("movq\t%%rbx, %%rsi\n\t" "cpuid\n\t" "xchgq\t%%rbx, %%rsi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value), "c"(subleaf)); + return false; #elif defined(_MSC_VER) int registers[4]; __cpuidex(registers, value, subleaf); *rEAX = registers[0]; *rEBX = registers[1]; *rECX = registers[2]; *rEDX = registers[3]; + return false; #else - assert(0 && "This method is defined only for GNUC, Clang or MSVC."); + return true; #endif #elif defined(__i386__) || defined(_M_IX86) #if defined(__GNUC__) || defined(__clang__) __asm__("movl\t%%ebx, %%esi\n\t" "cpuid\n\t" "xchgl\t%%ebx, %%esi\n\t" : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX) : "a"(value), "c"(subleaf)); + return false; #elif defined(_MSC_VER) __asm { mov eax,value mov ecx,subleaf cpuid mov esi,rEAX mov dword ptr [esi],eax mov esi,rEBX mov dword ptr [esi],ebx mov esi,rECX mov dword ptr [esi],ecx mov esi,rEDX mov dword ptr [esi],edx } + return false; #else - assert(0 && "This method is defined only for GNUC, Clang or MSVC."); + return true; #endif #else - assert(0 && "This method is defined only for x86."); + return true; #endif } // Read control register 0 (XCR0). Used to detect features such as AVX. static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) { #if defined(__GNUC__) || defined(__clang__) // Check xgetbv; this uses a .byte sequence instead of the instruction // directly because older assemblers do not include support for xgetbv and // there is no easy way to conditionally compile based on the assembler used. __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX), "=d"(*rEDX) : "c"(0)); return false; #elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK) unsigned long long Result = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); *rEAX = Result; *rEDX = Result >> 32; return false; #else return true; #endif } static void detectX86FamilyModel(unsigned EAX, unsigned *Family, unsigned *Model) { *Family = (EAX >> 8) & 0xf; // Bits 8 - 11 *Model = (EAX >> 4) & 0xf; // Bits 4 - 7 if (*Family == 6 || *Family == 0xf) { if (*Family == 0xf) // Examine extended family ID if family ID is F. *Family += (EAX >> 20) & 0xff; // Bits 20 - 27 // Examine extended model ID if family ID is 6 or F. *Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19 } } -static void getIntelProcessorTypeAndSubtype(unsigned int Family, - unsigned int Model, - unsigned int Brand_id, - unsigned int Features, - unsigned *Type, unsigned *Subtype) { +static void +getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, + unsigned Brand_id, unsigned Features, + unsigned *Type, unsigned *Subtype) { if (Brand_id != 0) return; switch (Family) { - case 3: - *Type = INTEL_i386; - break; - case 4: - switch (Model) { - case 0: // Intel486 DX processors - case 1: // Intel486 DX processors - case 2: // Intel486 SX processors - case 3: // Intel487 processors, IntelDX2 OverDrive processors, - // IntelDX2 processors - case 4: // Intel486 SL processor - case 5: // IntelSX2 processors - case 7: // Write-Back Enhanced IntelDX2 processors - case 8: // IntelDX4 OverDrive processors, IntelDX4 processors - default: - *Type = INTEL_i486; - break; - } - case 5: - switch (Model) { - case 1: // Pentium OverDrive processor for Pentium processor (60, 66), - // Pentium processors (60, 66) - case 2: // Pentium OverDrive processor for Pentium processor (75, 90, - // 100, 120, 133), Pentium processors (75, 90, 100, 120, 133, - // 150, 166, 200) - case 3: // Pentium OverDrive processors for Intel486 processor-based - // systems - *Type = INTEL_PENTIUM; - break; - case 4: // Pentium OverDrive processor with MMX technology for Pentium - // processor (75, 90, 100, 120, 133), Pentium processor with - // MMX technology (166, 200) - *Type = INTEL_PENTIUM; - *Subtype = INTEL_PENTIUM_MMX; - break; - default: - *Type = INTEL_PENTIUM; - break; - } case 6: switch (Model) { - case 0x01: // Pentium Pro processor - *Type = INTEL_PENTIUM_PRO; - break; - case 0x03: // Intel Pentium II OverDrive processor, Pentium II processor, - // model 03 - case 0x05: // Pentium II processor, model 05, Pentium II Xeon processor, - // model 05, and Intel Celeron processor, model 05 - case 0x06: // Celeron processor, model 06 - *Type = INTEL_PENTIUM_II; - break; - case 0x07: // Pentium III processor, model 07, and Pentium III Xeon - // processor, model 07 - case 0x08: // Pentium III processor, model 08, Pentium III Xeon processor, - // model 08, and Celeron processor, model 08 - case 0x0a: // Pentium III Xeon processor, model 0Ah - case 0x0b: // Pentium III processor, model 0Bh - *Type = INTEL_PENTIUM_III; - break; - case 0x09: // Intel Pentium M processor, Intel Celeron M processor model 09. - case 0x0d: // Intel Pentium M processor, Intel Celeron M processor, model - // 0Dh. All processors are manufactured using the 90 nm process. - case 0x15: // Intel EP80579 Integrated Processor and Intel EP80579 - // Integrated Processor with Intel QuickAssist Technology - *Type = INTEL_PENTIUM_M; - break; - case 0x0e: // Intel Core Duo processor, Intel Core Solo processor, model - // 0Eh. All processors are manufactured using the 65 nm process. - *Type = INTEL_CORE_DUO; - break; // yonah case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile // processor, Intel Core 2 Quad processor, Intel Core 2 Quad // mobile processor, Intel Core 2 Extreme processor, Intel // Pentium Dual-Core processor, Intel Xeon processor, model // 0Fh. All processors are manufactured using the 65 nm process. case 0x16: // Intel Celeron processor model 16h. All processors are // manufactured using the 65 nm process - *Type = INTEL_CORE2; // "core2" - *Subtype = INTEL_CORE2_65; - break; case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model // 17h. All processors are manufactured using the 45 nm process. // // 45nm: Penryn , Wolfdale, Yorkfield (XE) case 0x1d: // Intel Xeon processor MP. All processors are manufactured using // the 45 nm process. *Type = INTEL_CORE2; // "penryn" - *Subtype = INTEL_CORE2_45; break; case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 45 nm process. case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz. // As found in a Summer 2010 model iMac. case 0x1f: - case 0x2e: // Nehalem EX + case 0x2e: // Nehalem EX *Type = INTEL_COREI7; // "nehalem" *Subtype = INTEL_COREI7_NEHALEM; break; case 0x25: // Intel Core i7, laptop version. case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 32 nm process. case 0x2f: // Westmere EX *Type = INTEL_COREI7; // "westmere" *Subtype = INTEL_COREI7_WESTMERE; break; case 0x2a: // Intel Core i7 processor. All processors are manufactured // using the 32 nm process. case 0x2d: *Type = INTEL_COREI7; //"sandybridge" *Subtype = INTEL_COREI7_SANDYBRIDGE; break; case 0x3a: - case 0x3e: // Ivy Bridge EP + case 0x3e: // Ivy Bridge EP *Type = INTEL_COREI7; // "ivybridge" *Subtype = INTEL_COREI7_IVYBRIDGE; break; // Haswell: case 0x3c: case 0x3f: case 0x45: case 0x46: *Type = INTEL_COREI7; // "haswell" *Subtype = INTEL_COREI7_HASWELL; break; // Broadwell: case 0x3d: case 0x47: case 0x4f: case 0x56: *Type = INTEL_COREI7; // "broadwell" *Subtype = INTEL_COREI7_BROADWELL; break; // Skylake: - case 0x4e: - *Type = INTEL_COREI7; // "skylake-avx512" - *Subtype = INTEL_COREI7_SKYLAKE_AVX512; - break; - case 0x5e: + case 0x4e: // Skylake mobile + case 0x5e: // Skylake desktop + case 0x8e: // Kaby Lake mobile + case 0x9e: // Kaby Lake desktop *Type = INTEL_COREI7; // "skylake" *Subtype = INTEL_COREI7_SKYLAKE; break; + // Skylake Xeon: + case 0x55: + *Type = INTEL_COREI7; + *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512" + break; + case 0x1c: // Most 45 nm Intel Atom processors case 0x26: // 45 nm Atom Lincroft case 0x27: // 32 nm Atom Medfield case 0x35: // 32 nm Atom Midview case 0x36: // 32 nm Atom Midview - *Type = INTEL_ATOM; - *Subtype = INTEL_ATOM_BONNELL; + *Type = INTEL_BONNELL; break; // "bonnell" // Atom Silvermont codes from the Intel software optimization guide. case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d: case 0x4c: // really airmont - *Type = INTEL_ATOM; - *Subtype = INTEL_ATOM_SILVERMONT; + *Type = INTEL_SILVERMONT; break; // "silvermont" case 0x57: - *Type = INTEL_XEONPHI; // knl - *Subtype = INTEL_KNIGHTS_LANDING; + *Type = INTEL_KNL; // knl break; - default: // Unknown family 6 CPU, try to guess. - if (Features & (1 << FEATURE_AVX512)) { - *Type = INTEL_XEONPHI; // knl - *Subtype = INTEL_KNIGHTS_LANDING; - break; - } - if (Features & (1 << FEATURE_ADX)) { - *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_BROADWELL; - break; - } - if (Features & (1 << FEATURE_AVX2)) { - *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_HASWELL; - break; - } - if (Features & (1 << FEATURE_AVX)) { - *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_SANDYBRIDGE; - break; - } - if (Features & (1 << FEATURE_SSE4_2)) { - if (Features & (1 << FEATURE_MOVBE)) { - *Type = INTEL_ATOM; - *Subtype = INTEL_ATOM_SILVERMONT; - } else { - *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_NEHALEM; - } - break; - } - if (Features & (1 << FEATURE_SSE4_1)) { - *Type = INTEL_CORE2; // "penryn" - *Subtype = INTEL_CORE2_45; - break; - } - if (Features & (1 << FEATURE_SSSE3)) { - if (Features & (1 << FEATURE_MOVBE)) { - *Type = INTEL_ATOM; - *Subtype = INTEL_ATOM_BONNELL; // "bonnell" - } else { - *Type = INTEL_CORE2; // "core2" - *Subtype = INTEL_CORE2_65; - } - break; - } - if (Features & (1 << FEATURE_EM64T)) { - *Type = INTEL_X86_64; - break; // x86-64 - } - if (Features & (1 << FEATURE_SSE2)) { - *Type = INTEL_PENTIUM_M; - break; - } - if (Features & (1 << FEATURE_SSE)) { - *Type = INTEL_PENTIUM_III; - break; - } - if (Features & (1 << FEATURE_MMX)) { - *Type = INTEL_PENTIUM_II; - break; - } - *Type = INTEL_PENTIUM_PRO; + default: // Unknown family 6 CPU. break; + break; } - case 15: { - switch (Model) { - case 0: // Pentium 4 processor, Intel Xeon processor. All processors are - // model 00h and manufactured using the 0.18 micron process. - case 1: // Pentium 4 processor, Intel Xeon processor, Intel Xeon - // processor MP, and Intel Celeron processor. All processors are - // model 01h and manufactured using the 0.18 micron process. - case 2: // Pentium 4 processor, Mobile Intel Pentium 4 processor - M, - // Intel Xeon processor, Intel Xeon processor MP, Intel Celeron - // processor, and Mobile Intel Celeron processor. All processors - // are model 02h and manufactured using the 0.13 micron process. - *Type = - ((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV); - break; - - case 3: // Pentium 4 processor, Intel Xeon processor, Intel Celeron D - // processor. All processors are model 03h and manufactured using - // the 90 nm process. - case 4: // Pentium 4 processor, Pentium 4 processor Extreme Edition, - // Pentium D processor, Intel Xeon processor, Intel Xeon - // processor MP, Intel Celeron D processor. All processors are - // model 04h and manufactured using the 90 nm process. - case 6: // Pentium 4 processor, Pentium D processor, Pentium processor - // Extreme Edition, Intel Xeon processor, Intel Xeon processor - // MP, Intel Celeron D processor. All processors are model 06h - // and manufactured using the 65 nm process. - *Type = - ((Features & (1 << FEATURE_EM64T)) ? INTEL_NOCONA : INTEL_PRESCOTT); - break; - - default: - *Type = - ((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV); - break; - } - } default: - break; /*"generic"*/ + break; // Unknown. } } -static void getAMDProcessorTypeAndSubtype(unsigned int Family, - unsigned int Model, - unsigned int Features, unsigned *Type, +static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, + unsigned Features, unsigned *Type, unsigned *Subtype) { // FIXME: this poorly matches the generated SubtargetFeatureKV table. There // appears to be no way to generate the wide variety of AMD-specific targets // from the information returned from CPUID. switch (Family) { - case 4: - *Type = AMD_i486; - case 5: - *Type = AMDPENTIUM; - switch (Model) { - case 6: - case 7: - *Subtype = AMDPENTIUM_K6; - break; // "k6" - case 8: - *Subtype = AMDPENTIUM_K62; - break; // "k6-2" - case 9: - case 13: - *Subtype = AMDPENTIUM_K63; - break; // "k6-3" - case 10: - *Subtype = AMDPENTIUM_GEODE; - break; // "geode" - default: - break; - } - case 6: - *Type = AMDATHLON; - switch (Model) { - case 4: - *Subtype = AMDATHLON_TBIRD; - break; // "athlon-tbird" - case 6: - case 7: - case 8: - *Subtype = AMDATHLON_MP; - break; // "athlon-mp" - case 10: - *Subtype = AMDATHLON_XP; - break; // "athlon-xp" - default: - break; - } - case 15: - *Type = AMDATHLON; - if (Features & (1 << FEATURE_SSE3)) { - *Subtype = AMDATHLON_K8SSE3; - break; // "k8-sse3" - } - switch (Model) { - case 1: - *Subtype = AMDATHLON_OPTERON; - break; // "opteron" - case 5: - *Subtype = AMDATHLON_FX; - break; // "athlon-fx"; also opteron - default: - *Subtype = AMDATHLON_64; - break; // "athlon64" - } case 16: *Type = AMDFAM10H; // "amdfam10" switch (Model) { case 2: *Subtype = AMDFAM10H_BARCELONA; break; case 4: *Subtype = AMDFAM10H_SHANGHAI; break; case 8: *Subtype = AMDFAM10H_ISTANBUL; break; - default: - break; } + break; case 20: - *Type = AMDFAM14H; - *Subtype = AMD_BTVER1; + *Type = AMD_BTVER1; break; // "btver1"; case 21: *Type = AMDFAM15H; - if (!(Features & - (1 << FEATURE_AVX))) { // If no AVX support, provide a sane fallback. - *Subtype = AMD_BTVER1; - break; // "btver1" - } - if (Model >= 0x50 && Model <= 0x6f) { + if (Model >= 0x60 && Model <= 0x7f) { *Subtype = AMDFAM15H_BDVER4; - break; // "bdver4"; 50h-6Fh: Excavator + break; // "bdver4"; 60h-7Fh: Excavator } if (Model >= 0x30 && Model <= 0x3f) { *Subtype = AMDFAM15H_BDVER3; break; // "bdver3"; 30h-3Fh: Steamroller } if (Model >= 0x10 && Model <= 0x1f) { *Subtype = AMDFAM15H_BDVER2; break; // "bdver2"; 10h-1Fh: Piledriver } if (Model <= 0x0f) { *Subtype = AMDFAM15H_BDVER1; break; // "bdver1"; 00h-0Fh: Bulldozer } break; case 22: - *Type = AMDFAM16H; - if (!(Features & - (1 << FEATURE_AVX))) { // If no AVX support provide a sane fallback. - *Subtype = AMD_BTVER1; - break; // "btver1"; - } - *Subtype = AMD_BTVER2; + *Type = AMD_BTVER2; break; // "btver2" + case 23: + *Type = AMDFAM17H; + *Subtype = AMDFAM17H_ZNVER1; + break; default: break; // "generic" } } -static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX, - unsigned MaxLeaf) { +static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, + unsigned *FeaturesOut) { unsigned Features = 0; - unsigned int EAX, EBX; - Features |= (((EDX >> 23) & 1) << FEATURE_MMX); - Features |= (((EDX >> 25) & 1) << FEATURE_SSE); - Features |= (((EDX >> 26) & 1) << FEATURE_SSE2); - Features |= (((ECX >> 0) & 1) << FEATURE_SSE3); - Features |= (((ECX >> 9) & 1) << FEATURE_SSSE3); - Features |= (((ECX >> 19) & 1) << FEATURE_SSE4_1); - Features |= (((ECX >> 20) & 1) << FEATURE_SSE4_2); - Features |= (((ECX >> 22) & 1) << FEATURE_MOVBE); + unsigned EAX, EBX; + if ((EDX >> 15) & 1) + Features |= 1 << FEATURE_CMOV; + if ((EDX >> 23) & 1) + Features |= 1 << FEATURE_MMX; + if ((EDX >> 25) & 1) + Features |= 1 << FEATURE_SSE; + if ((EDX >> 26) & 1) + Features |= 1 << FEATURE_SSE2; + + if ((ECX >> 0) & 1) + Features |= 1 << FEATURE_SSE3; + if ((ECX >> 1) & 1) + Features |= 1 << FEATURE_PCLMUL; + if ((ECX >> 9) & 1) + Features |= 1 << FEATURE_SSSE3; + if ((ECX >> 12) & 1) + Features |= 1 << FEATURE_FMA; + if ((ECX >> 19) & 1) + Features |= 1 << FEATURE_SSE4_1; + if ((ECX >> 20) & 1) + Features |= 1 << FEATURE_SSE4_2; + if ((ECX >> 23) & 1) + Features |= 1 << FEATURE_POPCNT; + if ((ECX >> 25) & 1) + Features |= 1 << FEATURE_AES; + // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV // indicates that the AVX registers will be saved and restored on context // switch, then we have full AVX support. const unsigned AVXBits = (1 << 27) | (1 << 28); bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) && ((EAX & 0x6) == 0x6); bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0); - bool HasLeaf7 = MaxLeaf >= 0x7; - getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX); - bool HasADX = HasLeaf7 && ((EBX >> 19) & 1); - bool HasAVX2 = HasAVX && HasLeaf7 && (EBX & 0x20); - bool HasAVX512 = HasLeaf7 && HasAVX512Save && ((EBX >> 16) & 1); - Features |= (HasAVX << FEATURE_AVX); - Features |= (HasAVX2 << FEATURE_AVX2); - Features |= (HasAVX512 << FEATURE_AVX512); - Features |= (HasAVX512Save << FEATURE_AVX512SAVE); - Features |= (HasADX << FEATURE_ADX); - getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); - Features |= (((EDX >> 29) & 0x1) << FEATURE_EM64T); - return Features; + if (HasAVX) + Features |= 1 << FEATURE_AVX; + + bool HasLeaf7 = + MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX); + + if (HasLeaf7 && ((EBX >> 3) & 1)) + Features |= 1 << FEATURE_BMI; + if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX) + Features |= 1 << FEATURE_AVX2; + if (HasLeaf7 && ((EBX >> 9) & 1)) + Features |= 1 << FEATURE_BMI2; + if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512F; + if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512DQ; + if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512IFMA; + if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512PF; + if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512ER; + if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512CD; + if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512BW; + if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512VL; + + if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512VBMI; + if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX512VPOPCNTDQ; + + if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX5124VNNIW; + if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) + Features |= 1 << FEATURE_AVX5124FMAPS; + + unsigned MaxExtLevel; + getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); + + bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 && + !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); + if (HasExtLeaf1 && ((ECX >> 6) & 1)) + Features |= 1 << FEATURE_SSE4_A; + if (HasExtLeaf1 && ((ECX >> 11) & 1)) + Features |= 1 << FEATURE_XOP; + if (HasExtLeaf1 && ((ECX >> 16) & 1)) + Features |= 1 << FEATURE_FMA4; + + *FeaturesOut = Features; } #if defined(HAVE_INIT_PRIORITY) #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101)) #elif __has_attribute(__constructor__) #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__)) #else // FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that // this runs during initialization. #define CONSTRUCTOR_ATTRIBUTE #endif int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE; struct __processor_model { unsigned int __cpu_vendor; unsigned int __cpu_type; unsigned int __cpu_subtype; unsigned int __cpu_features[1]; } __cpu_model = {0, 0, 0, {0}}; /* A constructor function that is sets __cpu_model and __cpu_features with the right values. This needs to run only once. This constructor is given the highest priority and it should run before constructors without the priority set. However, it still runs after ifunc initializers and needs to be called explicitly there. */ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) { - unsigned int EAX, EBX, ECX, EDX; - unsigned int MaxLeaf = 5; - unsigned int Vendor; - unsigned int Model, Family, Brand_id; - unsigned int Features = 0; + unsigned EAX, EBX, ECX, EDX; + unsigned MaxLeaf = 5; + unsigned Vendor; + unsigned Model, Family, Brand_id; + unsigned Features = 0; /* This function needs to run just once. */ if (__cpu_model.__cpu_vendor) return 0; if (!isCpuIdSupported()) return -1; /* Assume cpuid insn present. Run in level 0 to get vendor id. */ - getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX); - - if (MaxLeaf < 1) { + if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) { __cpu_model.__cpu_vendor = VENDOR_OTHER; return -1; } getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX); detectX86FamilyModel(EAX, &Family, &Model); Brand_id = EBX & 0xff; /* Find available features. */ - Features = getAvailableFeatures(ECX, EDX, MaxLeaf); + getAvailableFeatures(ECX, EDX, MaxLeaf, &Features); __cpu_model.__cpu_features[0] = Features; if (Vendor == SIG_INTEL) { /* Get CPU type. */ getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features, &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_INTEL; } else if (Vendor == SIG_AMD) { /* Get CPU type. */ getAMDProcessorTypeAndSubtype(Family, Model, Features, &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_AMD; } else __cpu_model.__cpu_vendor = VENDOR_OTHER; assert(__cpu_model.__cpu_vendor < VENDOR_MAX); assert(__cpu_model.__cpu_type < CPU_TYPE_MAX); assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX); return 0; } #endif Index: vendor/compiler-rt/dist/lib/builtins/int_util.c =================================================================== --- vendor/compiler-rt/dist/lib/builtins/int_util.c (revision 320960) +++ vendor/compiler-rt/dist/lib/builtins/int_util.c (revision 320961) @@ -1,61 +1,71 @@ /* ===-- int_util.c - Implement internal utilities --------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ #include "int_lib.h" #include "int_util.h" /* NOTE: The definitions in this file are declared weak because we clients to be * able to arbitrarily package individual functions into separate .a files. If * we did not declare these weak, some link situations might end up seeing * duplicate strong definitions of the same symbol. * * We can't use this solution for kernel use (which may not support weak), but * currently expect that when built for kernel use all the functionality is * packaged into a single library. */ #ifdef KERNEL_USE NORETURN extern void panic(const char *, ...); #ifndef _WIN32 __attribute__((visibility("hidden"))) #endif void compilerrt_abort_impl(const char *file, int line, const char *function) { panic("%s:%d: abort in %s", file, line, function); } #elif __APPLE__ /* from libSystem.dylib */ NORETURN extern void __assert_rtn(const char *func, const char *file, int line, const char *message); #ifndef _WIN32 __attribute__((weak)) __attribute__((visibility("hidden"))) #endif void compilerrt_abort_impl(const char *file, int line, const char *function) { __assert_rtn(function, file, line, "libcompiler_rt abort"); } +#elif __Fuchsia__ + +#ifndef _WIN32 +__attribute__((weak)) +__attribute__((visibility("hidden"))) +#endif +void compilerrt_abort_impl(const char *file, int line, const char *function) { + __builtin_trap(); +} + #else /* Get the system definition of abort() */ #include #ifndef _WIN32 __attribute__((weak)) __attribute__((visibility("hidden"))) #endif void compilerrt_abort_impl(const char *file, int line, const char *function) { abort(); } #endif Index: vendor/compiler-rt/dist/lib/esan/working_set.cpp =================================================================== --- vendor/compiler-rt/dist/lib/esan/working_set.cpp (revision 320960) +++ vendor/compiler-rt/dist/lib/esan/working_set.cpp (revision 320961) @@ -1,279 +1,280 @@ //===-- working_set.cpp ---------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of EfficiencySanitizer, a family of performance tuners. // // This file contains working-set-specific code. //===----------------------------------------------------------------------===// #include "working_set.h" #include "esan.h" #include "esan_circular_buffer.h" #include "esan_flags.h" #include "esan_shadow.h" #include "esan_sideline.h" #include "sanitizer_common/sanitizer_procmaps.h" // We shadow every cache line of app memory with one shadow byte. // - The highest bit of each shadow byte indicates whether the corresponding // cache line has ever been accessed. // - The lowest bit of each shadow byte indicates whether the corresponding // cache line was accessed since the last sample. // - The other bits are used for working set snapshots at successively // lower frequencies, each bit to the left from the lowest bit stepping // down the frequency by 2 to the power of getFlags()->snapshot_step. // Thus we have something like this: // Bit 0: Since last sample // Bit 1: Since last 2^2 samples // Bit 2: Since last 2^4 samples // Bit 3: ... // Bit 7: Ever accessed. // We live with races in accessing each shadow byte. typedef unsigned char byte; namespace __esan { // Our shadow memory assumes that the line size is 64. static const u32 CacheLineSize = 64; // See the shadow byte layout description above. static const u32 TotalWorkingSetBitIdx = 7; // We accumulate to the left until we hit this bit. // We don't need to accumulate to the final bit as it's set on each ref // by the compiler instrumentation. static const u32 MaxAccumBitIdx = 6; static const u32 CurWorkingSetBitIdx = 0; static const byte ShadowAccessedVal = (1 << TotalWorkingSetBitIdx) | (1 << CurWorkingSetBitIdx); static SidelineThread Thread; // If we use real-time-based timer samples this won't overflow in any realistic // scenario, but if we switch to some other unit (such as memory accesses) we // may want to consider a 64-bit int. static u32 SnapshotNum; // We store the wset size for each of 8 different sampling frequencies. static const u32 NumFreq = 8; // One for each bit of our shadow bytes. // We cannot use static objects as the global destructor is called // prior to our finalize routine. // These are each circular buffers, sized up front. CircularBuffer SizePerFreq[NumFreq]; // We cannot rely on static initializers (they may run too late) but // we record the size here for clarity: u32 CircularBufferSizes[NumFreq] = { // These are each mmap-ed so our minimum is one page. 32*1024, 16*1024, 8*1024, 4*1024, 4*1024, 4*1024, 4*1024, 4*1024, }; void processRangeAccessWorkingSet(uptr PC, uptr Addr, SIZE_T Size, bool IsWrite) { if (Size == 0) return; SIZE_T I = 0; uptr LineSize = getFlags()->cache_line_size; // As Addr+Size could overflow at the top of a 32-bit address space, // we avoid the simpler formula that rounds the start and end. SIZE_T NumLines = Size / LineSize + // Add any extra at the start or end adding on an extra line: (LineSize - 1 + Addr % LineSize + Size % LineSize) / LineSize; byte *Shadow = (byte *)appToShadow(Addr); // Write shadow bytes until we're word-aligned. while (I < NumLines && (uptr)Shadow % 4 != 0) { if ((*Shadow & ShadowAccessedVal) != ShadowAccessedVal) *Shadow |= ShadowAccessedVal; ++Shadow; ++I; } // Write whole shadow words at a time. // Using a word-stride loop improves the runtime of a microbenchmark of // memset calls by 10%. u32 WordValue = ShadowAccessedVal | ShadowAccessedVal << 8 | ShadowAccessedVal << 16 | ShadowAccessedVal << 24; while (I + 4 <= NumLines) { if ((*(u32*)Shadow & WordValue) != WordValue) *(u32*)Shadow |= WordValue; Shadow += 4; I += 4; } // Write any trailing shadow bytes. while (I < NumLines) { if ((*Shadow & ShadowAccessedVal) != ShadowAccessedVal) *Shadow |= ShadowAccessedVal; ++Shadow; ++I; } } // This routine will word-align ShadowStart and ShadowEnd prior to scanning. // It does *not* clear for BitIdx==TotalWorkingSetBitIdx, as that top bit // measures the access during the entire execution and should never be cleared. static u32 countAndClearShadowValues(u32 BitIdx, uptr ShadowStart, uptr ShadowEnd) { u32 WorkingSetSize = 0; u32 ByteValue = 0x1 << BitIdx; u32 WordValue = ByteValue | ByteValue << 8 | ByteValue << 16 | ByteValue << 24; // Get word aligned start. ShadowStart = RoundDownTo(ShadowStart, sizeof(u32)); bool Accum = getFlags()->record_snapshots && BitIdx < MaxAccumBitIdx; // Do not clear the bit that measures access during the entire execution. bool Clear = BitIdx < TotalWorkingSetBitIdx; for (u32 *Ptr = (u32 *)ShadowStart; Ptr < (u32 *)ShadowEnd; ++Ptr) { if ((*Ptr & WordValue) != 0) { byte *BytePtr = (byte *)Ptr; for (u32 j = 0; j < sizeof(u32); ++j) { if (BytePtr[j] & ByteValue) { ++WorkingSetSize; if (Accum) { // Accumulate to the lower-frequency bit to the left. BytePtr[j] |= (ByteValue << 1); } } } if (Clear) { // Clear this bit from every shadow byte. *Ptr &= ~WordValue; } } } return WorkingSetSize; } // Scan shadow memory to calculate the number of cache lines being accessed, // i.e., the number of non-zero bits indexed by BitIdx in each shadow byte. // We also clear the lowest bits (most recent working set snapshot). // We do *not* clear for BitIdx==TotalWorkingSetBitIdx, as that top bit // measures the access during the entire execution and should never be cleared. static u32 computeWorkingSizeAndReset(u32 BitIdx) { u32 WorkingSetSize = 0; MemoryMappingLayout MemIter(true/*cache*/); - uptr Start, End, Prot; - while (MemIter.Next(&Start, &End, nullptr/*offs*/, nullptr/*file*/, - 0/*file size*/, &Prot)) { - VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n", - __FUNCTION__, Start, End, Prot, isAppMem(Start), - isShadowMem(Start)); - if (isShadowMem(Start) && (Prot & MemoryMappingLayout::kProtectionWrite)) { - VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Start, End); - WorkingSetSize += countAndClearShadowValues(BitIdx, Start, End); + MemoryMappedSegment Segment; + while (MemIter.Next(&Segment)) { + VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n", __FUNCTION__, + Segment.start, Segment.end, Segment.protection, + isAppMem(Segment.start), isShadowMem(Segment.start)); + if (isShadowMem(Segment.start) && Segment.IsWritable()) { + VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Segment.start, + Segment.end); + WorkingSetSize += + countAndClearShadowValues(BitIdx, Segment.start, Segment.end); } } return WorkingSetSize; } // This is invoked from a signal handler but in a sideline thread doing nothing // else so it is a little less fragile than a typical signal handler. static void takeSample(void *Arg) { u32 BitIdx = CurWorkingSetBitIdx; u32 Freq = 1; ++SnapshotNum; // Simpler to skip 0 whose mod matches everything. while (BitIdx <= MaxAccumBitIdx && (SnapshotNum % Freq) == 0) { u32 NumLines = computeWorkingSizeAndReset(BitIdx); VReport(1, "%s: snapshot #%5d bit %d freq %4d: %8u\n", SanitizerToolName, SnapshotNum, BitIdx, Freq, NumLines); SizePerFreq[BitIdx].push_back(NumLines); Freq = Freq << getFlags()->snapshot_step; BitIdx++; } } unsigned int getSampleCountWorkingSet() { return SnapshotNum; } // Initialization that must be done before any instrumented code is executed. void initializeShadowWorkingSet() { CHECK(getFlags()->cache_line_size == CacheLineSize); registerMemoryFaultHandler(); } void initializeWorkingSet() { if (getFlags()->record_snapshots) { for (u32 i = 0; i < NumFreq; ++i) SizePerFreq[i].initialize(CircularBufferSizes[i]); Thread.launchThread(takeSample, nullptr, getFlags()->sample_freq); } } static u32 getPeriodForPrinting(u32 MilliSec, const char *&Unit) { if (MilliSec > 600000) { Unit = "min"; return MilliSec / 60000; } else if (MilliSec > 10000) { Unit = "sec"; return MilliSec / 1000; } else { Unit = "ms"; return MilliSec; } } static u32 getSizeForPrinting(u32 NumOfCachelines, const char *&Unit) { // We need a constant to avoid software divide support: static const u32 KilobyteCachelines = (0x1 << 10) / CacheLineSize; static const u32 MegabyteCachelines = KilobyteCachelines << 10; if (NumOfCachelines > 10 * MegabyteCachelines) { Unit = "MB"; return NumOfCachelines / MegabyteCachelines; } else if (NumOfCachelines > 10 * KilobyteCachelines) { Unit = "KB"; return NumOfCachelines / KilobyteCachelines; } else { Unit = "Bytes"; return NumOfCachelines * CacheLineSize; } } void reportWorkingSet() { const char *Unit; if (getFlags()->record_snapshots) { u32 Freq = 1; Report(" Total number of samples: %u\n", SnapshotNum); for (u32 i = 0; i < NumFreq; ++i) { u32 Time = getPeriodForPrinting(getFlags()->sample_freq*Freq, Unit); Report(" Samples array #%d at period %u %s\n", i, Time, Unit); // FIXME: report whether we wrapped around and thus whether we // have data on the whole run or just the last N samples. for (u32 j = 0; j < SizePerFreq[i].size(); ++j) { u32 Size = getSizeForPrinting(SizePerFreq[i][j], Unit); Report("#%4d: %8u %s (%9u cache lines)\n", j, Size, Unit, SizePerFreq[i][j]); } Freq = Freq << getFlags()->snapshot_step; } } // Get the working set size for the entire execution. u32 NumOfCachelines = computeWorkingSizeAndReset(TotalWorkingSetBitIdx); u32 Size = getSizeForPrinting(NumOfCachelines, Unit); Report(" %s: the total working set size: %u %s (%u cache lines)\n", SanitizerToolName, Size, Unit, NumOfCachelines); } int finalizeWorkingSet() { if (getFlags()->record_snapshots) Thread.joinThread(); reportWorkingSet(); if (getFlags()->record_snapshots) { for (u32 i = 0; i < NumFreq; ++i) SizePerFreq[i].free(); } return 0; } } // namespace __esan Index: vendor/compiler-rt/dist/lib/lsan/lsan_common.cc =================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common.cc (revision 320961) @@ -1,866 +1,868 @@ //=-- lsan_common.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Implementation of common leak checking functionality. // //===----------------------------------------------------------------------===// #include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS namespace __lsan { // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and // also to protect the global list of root regions. BlockingMutex global_mutex(LINKER_INITIALIZED); Flags lsan_flags; void DisableCounterUnderflow() { if (common_flags()->detect_leaks) { Report("Unmatched call to __lsan_enable().\n"); Die(); } } void Flags::SetDefaults() { #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; #include "lsan_flags.inc" #undef LSAN_FLAG } void RegisterLsanFlags(FlagParser *parser, Flags *f) { #define LSAN_FLAG(Type, Name, DefaultValue, Description) \ RegisterFlag(parser, #Name, Description, &f->Name); #include "lsan_flags.inc" #undef LSAN_FLAG } #define LOG_POINTERS(...) \ do { \ if (flags()->log_pointers) Report(__VA_ARGS__); \ } while (0); #define LOG_THREADS(...) \ do { \ if (flags()->log_threads) Report(__VA_ARGS__); \ } while (0); ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; static SuppressionContext *suppression_ctx = nullptr; static const char kSuppressionLeak[] = "leak"; static const char *kSuppressionTypes[] = { kSuppressionLeak }; static const char kStdSuppressions[] = #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // definition. "leak:*pthread_exit*\n" #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT +#if SANITIZER_MAC + // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 + "leak:*_os_trace*\n" +#endif // TLS leak in some glibc versions, described in // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. "leak:*tls_get_addr*\n"; void InitializeSuppressions() { CHECK_EQ(nullptr, suppression_ctx); suppression_ctx = new (suppression_placeholder) // NOLINT SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); suppression_ctx->ParseFromFile(flags()->suppressions); if (&__lsan_default_suppressions) suppression_ctx->Parse(__lsan_default_suppressions()); suppression_ctx->Parse(kStdSuppressions); } static SuppressionContext *GetSuppressionContext() { CHECK(suppression_ctx); return suppression_ctx; } static InternalMmapVector *root_regions; InternalMmapVector const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); ALIGNED(64) static char placeholder[sizeof(InternalMmapVector)]; root_regions = new(placeholder) InternalMmapVector(1); } void InitCommonLsan() { InitializeRootRegions(); if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if // LSan is actually enabled. InitializeSuppressions(); InitializePlatformSpecificModules(); } } class Decorator: public __sanitizer::SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() { } const char *Error() { return Red(); } const char *Leak() { return Blue(); } const char *End() { return Default(); } }; static inline bool CanBeAHeapPointer(uptr p) { // Since our heap is located in mmap-ed memory, we can assume a sensible lower // bound on heap addresses. const uptr kMinAddress = 4 * 4096; if (p < kMinAddress) return false; #if defined(__x86_64__) // Accept only canonical form user-space addresses. return ((p >> 47) == 0); #elif defined(__mips64) return ((p >> 40) == 0); #elif defined(__aarch64__) unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); return ((p >> runtimeVMA) == 0); #else return true; #endif } // Scans the memory range, looking for byte patterns that point into allocator // chunks. Marks those chunks with |tag| and adds them to |frontier|. // There are two usage modes for this function: finding reachable chunks // (|tag| = kReachable) and finding indirectly leaked chunks // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // so |frontier| = 0. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { CHECK(tag == kReachable || tag == kIndirectlyLeaked); const uptr alignment = flags()->pointer_alignment(); LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); uptr pp = begin; if (pp % alignment) pp = pp + alignment - pp % alignment; for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT void *p = *reinterpret_cast(pp); if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; uptr chunk = PointsIntoChunk(p); if (!chunk) continue; // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. if (chunk == begin) continue; LsanMetadata m(chunk); if (m.tag() == kReachable || m.tag() == kIgnored) continue; // Do this check relatively late so we can log only the interesting cases. if (!flags()->use_poisoned && WordIsPoisoned(pp)) { LOG_POINTERS( "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size()); continue; } m.set_tag(tag); LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size()); if (frontier) frontier->push_back(chunk); } } // Scans a global range for pointers void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { uptr allocator_begin = 0, allocator_end = 0; GetAllocatorGlobalRange(&allocator_begin, &allocator_end); if (begin <= allocator_begin && allocator_begin < end) { CHECK_LE(allocator_begin, allocator_end); CHECK_LE(allocator_end, end); if (begin < allocator_begin) ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", kReachable); if (allocator_end < end) ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); } else { ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); } } void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { Frontier *frontier = reinterpret_cast(arg); ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); } // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { InternalScopedBuffer registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast(registers.data()); uptr registers_end = registers_begin + registers.size(); for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; DTLS *dtls; bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin, &tls_end, &cache_begin, &cache_end, &dtls); if (!thread_found) { // If a thread can't be found in the thread registry, it's probably in the // process of destruction. Log this event and move on. LOG_THREADS("Thread %d not found in registry.\n", os_id); continue; } uptr sp; PtraceRegistersStatus have_registers = suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); if (have_registers != REGISTERS_AVAILABLE) { Report("Unable to get registers from thread %d.\n", os_id); // If unable to get SP, consider the entire stack to be reachable unless // GetRegistersAndSP failed with ESRCH. if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; sp = stack_begin; } if (flags()->use_registers && have_registers) ScanRangeForPointers(registers_begin, registers_end, frontier, "REGISTERS", kReachable); if (flags()->use_stacks) { LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); if (sp < stack_begin || sp >= stack_end) { // SP is outside the recorded stack range (e.g. the thread is running a // signal handler on alternate stack, or swapcontext was used). // Again, consider the entire stack range to be reachable. LOG_THREADS("WARNING: stack pointer not in stack range.\n"); uptr page_size = GetPageSizeCached(); int skipped = 0; while (stack_begin < stack_end && !IsAccessibleMemoryRange(stack_begin, 1)) { skipped++; stack_begin += page_size; } LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped, stack_begin, stack_end); } else { // Shrink the stack range to ignore out-of-scope values. stack_begin = sp; } ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", kReachable); ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); } if (flags()->use_tls) { if (tls_begin) { LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); // If the tls and cache ranges don't overlap, scan full tls range, // otherwise, only scan the non-overlapping portions if (cache_begin == cache_end || tls_end < cache_begin || tls_begin > cache_end) { ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); } else { if (tls_begin < cache_begin) ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable); if (tls_end > cache_end) ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); } } if (dtls && !DTLSInDestruction(dtls)) { for (uptr j = 0; j < dtls->dtv_size; ++j) { uptr dtls_beg = dtls->dtv[j].beg; uptr dtls_end = dtls_beg + dtls->dtv[j].size; if (dtls_beg < dtls_end) { LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable); } } } else { // We are handling a thread with DTLS under destruction. Log about // this and continue. LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id); } } } } void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, - uptr region_begin, uptr region_end, uptr prot) { + uptr region_begin, uptr region_end, bool is_readable) { uptr intersection_begin = Max(root_region.begin, region_begin); uptr intersection_end = Min(region_end, root_region.begin + root_region.size); if (intersection_begin >= intersection_end) return; - bool is_readable = prot & MemoryMappingLayout::kProtectionRead; LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", root_region.begin, root_region.begin + root_region.size, region_begin, region_end, is_readable ? "readable" : "unreadable"); if (is_readable) ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", kReachable); } static void ProcessRootRegion(Frontier *frontier, const RootRegion &root_region) { MemoryMappingLayout proc_maps(/*cache_enabled*/ true); - uptr begin, end, prot; - while (proc_maps.Next(&begin, &end, - /*offset*/ nullptr, /*filename*/ nullptr, - /*filename_size*/ 0, &prot)) { - ScanRootRegion(frontier, root_region, begin, end, prot); + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + ScanRootRegion(frontier, root_region, segment.start, segment.end, + segment.IsReadable()); } } // Scans root regions for heap pointers. static void ProcessRootRegions(Frontier *frontier) { if (!flags()->use_root_regions) return; CHECK(root_regions); for (uptr i = 0; i < root_regions->size(); i++) { ProcessRootRegion(frontier, (*root_regions)[i]); } } static void FloodFillTag(Frontier *frontier, ChunkTag tag) { while (frontier->size()) { uptr next_chunk = frontier->back(); frontier->pop_back(); LsanMetadata m(next_chunk); ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, "HEAP", tag); } } // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks // which are reachable from it as indirectly leaked. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { ScanRangeForPointers(chunk, chunk + m.requested_size(), /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); } } // ForEachChunk callback. If chunk is marked as ignored, adds its address to // frontier. static void CollectIgnoredCb(uptr chunk, void *arg) { CHECK(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() == kIgnored) { LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", chunk, chunk + m.requested_size(), m.requested_size()); reinterpret_cast(arg)->push_back(chunk); } } static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { CHECK(stack_id); StackTrace stack = map->Get(stack_id); // The top frame is our malloc/calloc/etc. The next frame is the caller. if (stack.size >= 2) return stack.trace[1]; return 0; } struct InvalidPCParam { Frontier *frontier; StackDepotReverseMap *stack_depot_reverse_map; bool skip_linker_allocations; }; // ForEachChunk callback. If the caller pc is invalid or is within the linker, // mark as reachable. Called by ProcessPlatformSpecificAllocations. static void MarkInvalidPCCb(uptr chunk, void *arg) { CHECK(arg); InvalidPCParam *param = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { u32 stack_id = m.stack_trace_id(); uptr caller_pc = 0; if (stack_id > 0) caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark // it as reachable, as we can't properly report its allocation stack anyway. if (caller_pc == 0 || (param->skip_linker_allocations && GetLinker()->containsAddress(caller_pc))) { m.set_tag(kReachable); param->frontier->push_back(chunk); } } } // On Linux, handles dynamically allocated TLS blocks by treating all chunks // allocated from ld-linux.so as reachable. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. // They are allocated with a __libc_memalign() call in allocate_and_init() // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those // blocks, but we can make sure they come from our own allocator by intercepting // __libc_memalign(). On top of that, there is no easy way to reach them. Their // addresses are stored in a dynamically allocated array (the DTV) which is // referenced from the static TLS. Unfortunately, we can't just rely on the DTV // being reachable from the static TLS, and the dynamic TLS being reachable from // the DTV. This is because the initial DTV is allocated before our interception // mechanism kicks in, and thus we don't recognize it as allocated memory. We // can't special-case it either, since we don't know its size. // Our solution is to include in the root set all allocations made from // ld-linux.so (which is where allocate_and_init() is implemented). This is // guaranteed to include all dynamic TLS blocks (and possibly other allocations // which we don't care about). // On all other platforms, this simply checks to ensure that the caller pc is // valid before reporting chunks as leaked. void ProcessPC(Frontier *frontier) { StackDepotReverseMap stack_depot_reverse_map; InvalidPCParam arg; arg.frontier = frontier; arg.stack_depot_reverse_map = &stack_depot_reverse_map; arg.skip_linker_allocations = flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; ForEachChunk(MarkInvalidPCCb, &arg); } // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. Frontier frontier(1); ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); ProcessThreads(suspended_threads, &frontier); ProcessRootRegions(&frontier); FloodFillTag(&frontier, kReachable); CHECK_EQ(0, frontier.size()); ProcessPC(&frontier); // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); ProcessPlatformSpecificAllocations(&frontier); FloodFillTag(&frontier, kReachable); // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. LOG_POINTERS("Scanning leaked chunks.\n"); ForEachChunk(MarkIndirectlyLeakedCb, nullptr); } // ForEachChunk callback. Resets the tags to pre-leak-check state. static void ResetTagsCb(uptr chunk, void *arg) { (void)arg; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kIgnored) m.set_tag(kDirectlyLeaked); } static void PrintStackTraceById(u32 stack_trace_id) { CHECK(stack_trace_id); StackDepotGet(stack_trace_id).Print(); } // ForEachChunk callback. Aggregates information about unreachable chunks into // a LeakReport. static void CollectLeaksCb(uptr chunk, void *arg) { CHECK(arg); LeakReport *leak_report = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (!m.allocated()) return; if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { u32 resolution = flags()->resolution; u32 stack_trace_id = 0; if (resolution > 0) { StackTrace stack = StackDepotGet(m.stack_trace_id()); stack.size = Min(stack.size, resolution); stack_trace_id = StackDepotPut(stack); } else { stack_trace_id = m.stack_trace_id(); } leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), m.tag()); } } static void PrintMatchedSuppressions() { InternalMmapVector matched(1); GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; const char *line = "-----------------------------------------------------"; Printf("%s\n", line); Printf("Suppressions used:\n"); Printf(" count bytes template\n"); for (uptr i = 0; i < matched.size(); i++) Printf("%7zu %10zu %s\n", static_cast(atomic_load_relaxed( &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); Printf("%s\n\n", line); } struct CheckForLeaksParam { bool success; LeakReport leak_report; }; static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast(arg); CHECK(param); CHECK(!param->success); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. ForEachChunk(ResetTagsCb, nullptr); param->success = true; } static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) return false; EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; param.success = false; LockThreadRegistry(); LockAllocator(); DoStopTheWorld(CheckForLeaksCallback, ¶m); UnlockAllocator(); UnlockThreadRegistry(); if (!param.success) { Report("LeakSanitizer has encountered a fatal error.\n"); Report( "HINT: For debugging, try setting environment variable " "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); Report( "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); Die(); } param.leak_report.ApplySuppressions(); uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); if (unsuppressed_count > 0) { Decorator d; Printf("\n" "=================================================================" "\n"); Printf("%s", d.Error()); Report("ERROR: LeakSanitizer: detected memory leaks\n"); Printf("%s", d.End()); param.leak_report.ReportTopLeaks(flags()->max_leaks); } if (common_flags()->print_suppressions) PrintMatchedSuppressions(); if (unsuppressed_count > 0) { param.leak_report.PrintSummary(); return true; } return false; } void DoLeakCheck() { BlockingMutexLock l(&global_mutex); static bool already_done; if (already_done) return; already_done = true; bool have_leaks = CheckForLeaks(); if (!have_leaks) { return; } if (common_flags()->exitcode) { Die(); } } static int DoRecoverableLeakCheck() { BlockingMutexLock l(&global_mutex); bool have_leaks = CheckForLeaks(); return have_leaks ? 1 : 0; } static Suppression *GetSuppressionForAddr(uptr addr) { Suppression *s = nullptr; // Suppress by module name. SuppressionContext *suppressions = GetSuppressionContext(); if (const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) if (suppressions->Match(module_name, kSuppressionLeak, &s)) return s; // Suppress by file or function name. SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); for (SymbolizedStack *cur = frames; cur; cur = cur->next) { if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { break; } } frames->ClearAll(); return s; } static Suppression *GetSuppressionForStack(u32 stack_trace_id) { StackTrace stack = StackDepotGet(stack_trace_id); for (uptr i = 0; i < stack.size; i++) { Suppression *s = GetSuppressionForAddr( StackTrace::GetPreviousInstructionPc(stack.trace[i])); if (s) return s; } return nullptr; } ///// LeakReport implementation. ///// // A hard limit on the number of distinct leaks, to avoid quadratic complexity // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks // in real-world applications. // FIXME: Get rid of this limit by changing the implementation of LeakReport to // use a hash table. const uptr kMaxLeaksConsidered = 5000; void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); bool is_directly_leaked = (tag == kDirectlyLeaked); uptr i; for (i = 0; i < leaks_.size(); i++) { if (leaks_[i].stack_trace_id == stack_trace_id && leaks_[i].is_directly_leaked == is_directly_leaked) { leaks_[i].hit_count++; leaks_[i].total_size += leaked_size; break; } } if (i == leaks_.size()) { if (leaks_.size() == kMaxLeaksConsidered) return; Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, is_directly_leaked, /* is_suppressed */ false }; leaks_.push_back(leak); } if (flags()->report_objects) { LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; leaked_objects_.push_back(obj); } } static bool LeakComparator(const Leak &leak1, const Leak &leak2) { if (leak1.is_directly_leaked == leak2.is_directly_leaked) return leak1.total_size > leak2.total_size; else return leak1.is_directly_leaked; } void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { CHECK(leaks_.size() <= kMaxLeaksConsidered); Printf("\n"); if (leaks_.size() == kMaxLeaksConsidered) Printf("Too many leaks! Only the first %zu leaks encountered will be " "reported.\n", kMaxLeaksConsidered); uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) Printf("The %zu top leak(s):\n", num_leaks_to_report); InternalSort(&leaks_, leaks_.size(), LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; PrintReportForLeak(i); leaks_reported++; if (leaks_reported == num_leaks_to_report) break; } if (leaks_reported < unsuppressed_count) { uptr remaining = unsuppressed_count - leaks_reported; Printf("Omitting %zu more leak(s).\n", remaining); } } void LeakReport::PrintReportForLeak(uptr index) { Decorator d; Printf("%s", d.Leak()); Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", leaks_[index].is_directly_leaked ? "Direct" : "Indirect", leaks_[index].total_size, leaks_[index].hit_count); Printf("%s", d.End()); PrintStackTraceById(leaks_[index].stack_trace_id); if (flags()->report_objects) { Printf("Objects leaked above:\n"); PrintLeakedObjectsForLeak(index); Printf("\n"); } } void LeakReport::PrintLeakedObjectsForLeak(uptr index) { u32 leak_id = leaks_[index].id; for (uptr j = 0; j < leaked_objects_.size(); j++) { if (leaked_objects_[j].leak_id == leak_id) Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, leaked_objects_[j].size); } } void LeakReport::PrintSummary() { CHECK(leaks_.size() <= kMaxLeaksConsidered); uptr bytes = 0, allocations = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; bytes += leaks_[i].total_size; allocations += leaks_[i].hit_count; } InternalScopedString summary(kMaxSummaryLength); summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, allocations); ReportErrorSummary(summary.data()); } void LeakReport::ApplySuppressions() { for (uptr i = 0; i < leaks_.size(); i++) { Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); if (s) { s->weight += leaks_[i].total_size; atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + leaks_[i].hit_count); leaks_[i].is_suppressed = true; } } } uptr LeakReport::UnsuppressedLeakCount() { uptr result = 0; for (uptr i = 0; i < leaks_.size(); i++) if (!leaks_[i].is_suppressed) result++; return result; } } // namespace __lsan #else // CAN_SANITIZE_LEAKS namespace __lsan { void InitCommonLsan() { } void DoLeakCheck() { } void DisableInThisThread() { } void EnableInThisThread() { } } #endif // CAN_SANITIZE_LEAKS using namespace __lsan; // NOLINT extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __lsan_ignore_object(const void *p) { #if CAN_SANITIZE_LEAKS if (!common_flags()->detect_leaks) return; // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not // locked. BlockingMutexLock l(&global_mutex); IgnoreObjectResult res = IgnoreObjectLocked(p); if (res == kIgnoreObjectInvalid) VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); if (res == kIgnoreObjectAlreadyIgnored) VReport(1, "__lsan_ignore_object(): " "heap object at %p is already being ignored\n", p); if (res == kIgnoreObjectSuccess) VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_register_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS BlockingMutexLock l(&global_mutex); CHECK(root_regions); RootRegion region = {reinterpret_cast(begin), size}; root_regions->push_back(region); VReport(1, "Registered root region at %p of size %llu\n", begin, size); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_unregister_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS BlockingMutexLock l(&global_mutex); CHECK(root_regions); bool removed = false; for (uptr i = 0; i < root_regions->size(); i++) { RootRegion region = (*root_regions)[i]; if (region.begin == reinterpret_cast(begin) && region.size == size) { removed = true; uptr last_index = root_regions->size() - 1; (*root_regions)[i] = (*root_regions)[last_index]; root_regions->pop_back(); VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); break; } } if (!removed) { Report( "__lsan_unregister_root_region(): region at %p of size %llu has not " "been registered.\n", begin, size); Die(); } #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_disable() { #if CAN_SANITIZE_LEAKS __lsan::DisableInThisThread(); #endif } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_enable() { #if CAN_SANITIZE_LEAKS __lsan::EnableInThisThread(); #endif } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_do_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) __lsan::DoLeakCheck(); #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE int __lsan_do_recoverable_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) return __lsan::DoRecoverableLeakCheck(); #endif // CAN_SANITIZE_LEAKS return 0; } #if !SANITIZER_SUPPORTS_WEAK_HOOKS SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int __lsan_is_turned_off() { return 0; } SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *__lsan_default_suppressions() { return ""; } #endif } // extern "C" Index: vendor/compiler-rt/dist/lib/lsan/lsan_common.h =================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common.h (revision 320960) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common.h (revision 320961) @@ -1,253 +1,253 @@ //=-- lsan_common.h -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Private LSan header. // //===----------------------------------------------------------------------===// #ifndef LSAN_COMMON_H #define LSAN_COMMON_H #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_symbolizer.h" // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus // supported for Linux only. Also, LSan doesn't like 32 bit architectures // because of "small" (4 bytes) pointer size that leads to high false negative // ratio on large leaks. But we still want to have it for some 32 bit arches // (e.g. x86), see https://github.com/google/sanitizers/issues/403. // To enable LeakSanitizer on new architecture, one need to implement // internal_clone function as well as (probably) adjust TLS machinery for // new architecture inside sanitizer library. #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \ (SANITIZER_WORDSIZE == 64) && \ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ defined(__powerpc64__)) #define CAN_SANITIZE_LEAKS 1 #elif defined(__i386__) && \ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) #define CAN_SANITIZE_LEAKS 1 #elif defined(__arm__) && \ SANITIZER_LINUX && !SANITIZER_ANDROID #define CAN_SANITIZE_LEAKS 1 #else #define CAN_SANITIZE_LEAKS 0 #endif namespace __sanitizer { class FlagParser; struct DTLS; } namespace __lsan { // Chunk tags. enum ChunkTag { kDirectlyLeaked = 0, // default kIndirectlyLeaked = 1, kReachable = 2, kIgnored = 3 }; const u32 kInvalidTid = (u32) -1; struct Flags { #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; #include "lsan_flags.inc" #undef LSAN_FLAG void SetDefaults(); uptr pointer_alignment() const { return use_unaligned ? 1 : sizeof(uptr); } }; extern Flags lsan_flags; inline Flags *flags() { return &lsan_flags; } void RegisterLsanFlags(FlagParser *parser, Flags *f); struct Leak { u32 id; uptr hit_count; uptr total_size; u32 stack_trace_id; bool is_directly_leaked; bool is_suppressed; }; struct LeakedObject { u32 leak_id; uptr addr; uptr size; }; // Aggregates leaks by stack trace prefix. class LeakReport { public: LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {} void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag); void ReportTopLeaks(uptr max_leaks); void PrintSummary(); void ApplySuppressions(); uptr UnsuppressedLeakCount(); private: void PrintReportForLeak(uptr index); void PrintLeakedObjectsForLeak(uptr index); u32 next_id_; InternalMmapVector leaks_; InternalMmapVector leaked_objects_; }; typedef InternalMmapVector Frontier; // Platform-specific functions. void InitializePlatformSpecificModules(); void ProcessGlobalRegions(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier); struct RootRegion { uptr begin; uptr size; }; InternalMmapVector const *GetRootRegions(); void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, - uptr region_begin, uptr region_end, uptr prot); + uptr region_begin, uptr region_end, bool is_readable); // Run stoptheworld while holding any platform-specific locks. void DoStopTheWorld(StopTheWorldCallback callback, void* argument); void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag); void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier); enum IgnoreObjectResult { kIgnoreObjectSuccess, kIgnoreObjectAlreadyIgnored, kIgnoreObjectInvalid }; // Functions called from the parent tool. void InitCommonLsan(); void DoLeakCheck(); void DisableCounterUnderflow(); bool DisabledInThisThread(); // Used to implement __lsan::ScopedDisabler. void DisableInThisThread(); void EnableInThisThread(); // Can be used to ignore memory allocated by an intercepted // function. struct ScopedInterceptorDisabler { ScopedInterceptorDisabler() { DisableInThisThread(); } ~ScopedInterceptorDisabler() { EnableInThisThread(); } }; // According to Itanium C++ ABI array cookie is a one word containing // size of allocated array. static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size, uptr addr) { return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr && *reinterpret_cast(chunk_beg) == 0; } // According to ARM C++ ABI array cookie consists of two words: // struct array_cookie { // std::size_t element_size; // element_size != 0 // std::size_t element_count; // }; static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size, uptr addr) { return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr && *reinterpret_cast(chunk_beg + sizeof(uptr)) == 0; } // Special case for "new T[0]" where T is a type with DTOR. // new T[0] will allocate a cookie (one or two words) for the array size (0) // and store a pointer to the end of allocated chunk. The actual cookie layout // varies between platforms according to their C++ ABI implementation. inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size, uptr addr) { #if defined(__arm__) return IsARMABIArrayCookie(chunk_beg, chunk_size, addr); #else return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr); #endif } // The following must be implemented in the parent tool. void ForEachChunk(ForEachChunkCallback callback, void *arg); // Returns the address range occupied by the global allocator object. void GetAllocatorGlobalRange(uptr *begin, uptr *end); // Wrappers for allocator's ForceLock()/ForceUnlock(). void LockAllocator(); void UnlockAllocator(); // Returns true if [addr, addr + sizeof(void *)) is poisoned. bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. void LockThreadRegistry(); void UnlockThreadRegistry(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg); // If called from the main thread, updates the main thread's TID in the thread // registry. We need this to handle processes that fork() without a subsequent // exec(), which invalidates the recorded TID. To update it, we must call // gettid() from the main thread. Our solution is to call this function before // leak checking and also before every call to pthread_create() (to handle cases // where leak checking is initiated from a non-main thread). void EnsureMainThreadIDIsCorrect(); // If p points into a chunk that has been allocated to the user, returns its // user-visible address. Otherwise, returns 0. uptr PointsIntoChunk(void *p); // Returns address of user-visible chunk contained in this allocator chunk. uptr GetUserBegin(uptr chunk); // Helper for __lsan_ignore_object(). IgnoreObjectResult IgnoreObjectLocked(const void *p); // Return the linker module, if valid for the platform. LoadedModule *GetLinker(); // Wrapper for chunk metadata operations. class LsanMetadata { public: // Constructor accepts address of user-visible chunk. explicit LsanMetadata(uptr chunk); bool allocated() const; ChunkTag tag() const; void set_tag(ChunkTag value); uptr requested_size() const; u32 stack_trace_id() const; private: void *metadata_; }; } // namespace __lsan extern "C" { SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int __lsan_is_turned_off(); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *__lsan_default_suppressions(); } // extern "C" #endif // LSAN_COMMON_H Index: vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc (revision 320961) @@ -1,173 +1,173 @@ //=-- lsan_common_mac.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of LeakSanitizer. // Implementation of common leak checking functionality. Darwin-specific code. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #include "lsan_common.h" #if CAN_SANITIZE_LEAKS && SANITIZER_MAC #include "sanitizer_common/sanitizer_allocator_internal.h" #include "lsan_allocator.h" #include #include namespace __lsan { typedef struct { int disable_counter; u32 current_thread_id; AllocatorCache cache; } thread_local_data_t; static pthread_key_t key; static pthread_once_t key_once = PTHREAD_ONCE_INIT; // The main thread destructor requires the current thread id, // so we can't destroy it until it's been used and reset to invalid tid void restore_tid_data(void *ptr) { thread_local_data_t *data = (thread_local_data_t *)ptr; if (data->current_thread_id != kInvalidTid) pthread_setspecific(key, data); } static void make_tls_key() { CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); } static thread_local_data_t *get_tls_val(bool alloc) { pthread_once(&key_once, make_tls_key); thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); if (ptr == NULL && alloc) { ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); ptr->disable_counter = 0; ptr->current_thread_id = kInvalidTid; ptr->cache = AllocatorCache(); pthread_setspecific(key, ptr); } return ptr; } bool DisabledInThisThread() { thread_local_data_t *data = get_tls_val(false); return data ? data->disable_counter > 0 : false; } void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } void EnableInThisThread() { int *disable_counter = &get_tls_val(true)->disable_counter; if (*disable_counter == 0) { DisableCounterUnderflow(); } --*disable_counter; } u32 GetCurrentThread() { thread_local_data_t *data = get_tls_val(false); return data ? data->current_thread_id : kInvalidTid; } void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } LoadedModule *GetLinker() { return nullptr; } // Required on Linux for initialization of TLS behavior, but should not be // required on Darwin. void InitializePlatformSpecificModules() {} // Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { MemoryMappingLayout memory_mapping(false); InternalMmapVector modules(/*initial_capacity*/ 128); memory_mapping.DumpListOfModules(&modules); for (uptr i = 0; i < modules.size(); ++i) { // Even when global scanning is disabled, we still need to scan // system libraries for stashed pointers if (!flags()->use_globals && modules[i].instrumented()) continue; for (const __sanitizer::LoadedModule::AddressRange &range : modules[i].ranges()) { // Sections storing global variables are writable and non-executable if (range.executable || !range.writable) continue; ScanGlobalRange(range.beg, range.end, frontier); } } } void ProcessPlatformSpecificAllocations(Frontier *frontier) { mach_port_name_t port; if (task_for_pid(mach_task_self(), internal_getpid(), &port) != KERN_SUCCESS) { return; } unsigned depth = 1; vm_size_t size = 0; vm_address_t address = 0; kern_return_t err = KERN_SUCCESS; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; InternalMmapVector const *root_regions = GetRootRegions(); while (err == KERN_SUCCESS) { struct vm_region_submap_info_64 info; err = vm_region_recurse_64(port, &address, &size, &depth, (vm_region_info_t)&info, &count); uptr end_address = address + size; // libxpc stashes some pointers in the Kernel Alloc Once page, // make sure not to report those as leaks. if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { ScanRangeForPointers(address, end_address, frontier, "GLOBAL", kReachable); // Recursing over the full memory map is very slow, break out // early if we don't need the full iteration. if (!flags()->use_root_regions || !root_regions->size()) break; } // This additional root region scan is required on Darwin in order to // detect root regions contained within mmap'd memory regions, because // the Darwin implementation of sanitizer_procmaps traverses images // as loaded by dyld, and not the complete set of all memory regions. // // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same // behavior as sanitizer_procmaps_linux and traverses all memory regions if (flags()->use_root_regions) { for (uptr i = 0; i < root_regions->size(); i++) { ScanRootRegion(frontier, (*root_regions)[i], address, end_address, - info.protection); + info.protection & kProtectionRead); } } address = end_address; } } void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { StopTheWorld(callback, argument); } } // namespace __lsan #endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/msan/msan_interceptors.cc =================================================================== --- vendor/compiler-rt/dist/lib/msan/msan_interceptors.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/msan/msan_interceptors.cc (revision 320961) @@ -1,1592 +1,1587 @@ //===-- msan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of MemorySanitizer. // // Interceptors for standard library functions. // // FIXME: move as many interceptors as possible into // sanitizer_common/sanitizer_common_interceptors.h //===----------------------------------------------------------------------===// #include "interception/interception.h" #include "msan.h" #include "msan_chained_origin_depot.h" #include "msan_origin.h" #include "msan_thread.h" #include "msan_poisoning.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include // ACHTUNG! No other system header includes in this file. // Ideally, we should get rid of stdarg.h as well. using namespace __msan; using __sanitizer::memory_order; using __sanitizer::atomic_load; using __sanitizer::atomic_store; using __sanitizer::atomic_uintptr_t; DECLARE_REAL(SIZE_T, strlen, const char *s) DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen) DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n) DECLARE_REAL(void *, memset, void *dest, int c, uptr n) -#if SANITIZER_FREEBSD -#define __errno_location __error -#endif - // True if this is a nested interceptor. static THREADLOCAL int in_interceptor_scope; -extern "C" int *__errno_location(void); - struct InterceptorScope { InterceptorScope() { ++in_interceptor_scope; } ~InterceptorScope() { --in_interceptor_scope; } }; bool IsInInterceptorScope() { return in_interceptor_scope; } static uptr allocated_for_dlsym; static const uptr kDlsymAllocPoolSize = 1024; static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; static bool IsInDlsymAllocPool(const void *ptr) { uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; return off < sizeof(alloc_memory_for_dlsym); } static void *AllocateFromLocalPool(uptr size_in_bytes) { uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; allocated_for_dlsym += size_in_words; CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); return mem; } #define ENSURE_MSAN_INITED() do { \ CHECK(!msan_init_is_running); \ if (!msan_inited) { \ __msan_init(); \ } \ } while (0) // Check that [x, x+n) range is unpoisoned. #define CHECK_UNPOISONED_0(x, n) \ do { \ sptr offset = __msan_test_shadow(x, n); \ if (__msan::IsInSymbolizer()) \ break; \ if (offset >= 0 && __msan::flags()->report_umrs) { \ GET_CALLER_PC_BP_SP; \ (void) sp; \ ReportUMRInsideAddressRange(__func__, x, n, offset); \ __msan::PrintWarningWithOrigin( \ pc, bp, __msan_get_origin((const char *)x + offset)); \ if (__msan::flags()->halt_on_error) { \ Printf("Exiting\n"); \ Die(); \ } \ } \ } while (0) // Check that [x, x+n) range is unpoisoned unless we are in a nested // interceptor. #define CHECK_UNPOISONED(x, n) \ do { \ if (!IsInInterceptorScope()) CHECK_UNPOISONED_0(x, n); \ } while (0); #define CHECK_UNPOISONED_STRING_OF_LEN(x, len, n) \ CHECK_UNPOISONED((x), \ common_flags()->strict_string_checks ? (len) + 1 : (n) ) #define CHECK_UNPOISONED_STRING(x, n) \ CHECK_UNPOISONED_STRING_OF_LEN((x), internal_strlen(x), (n)) #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) { ENSURE_MSAN_INITED(); SIZE_T res = REAL(fread_unlocked)(ptr, size, nmemb, file); if (res > 0) __msan_unpoison(ptr, res *size); return res; } #define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED INTERCEPT_FUNCTION(fread_unlocked) #else #define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED #endif INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED_STRING(path, 0) SSIZE_T res = REAL(readlink)(path, buf, bufsiz); if (res > 0) __msan_unpoison(buf, res); return res; } INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) { return (char *)__msan_memcpy(dest, src, n) + n; } INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) { ENSURE_MSAN_INITED(); void *res = REAL(memccpy)(dest, src, c, n); CHECK(!res || (res >= dest && res <= (char *)dest + n)); SIZE_T sz = res ? (char *)res - (char *)dest : n; CHECK_UNPOISONED(src, sz); __msan_unpoison(dest, sz); return res; } INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) { return __msan_memmove(dest, src, n); } INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; CHECK_EQ(alignment & (alignment - 1), 0); CHECK_NE(memptr, 0); *memptr = MsanReallocate(&stack, nullptr, size, alignment, false); CHECK_NE(*memptr, 0); __msan_unpoison(memptr, sizeof(*memptr)); return 0; } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) { GET_MALLOC_STACK_TRACE; CHECK_EQ(boundary & (boundary - 1), 0); void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); return ptr; } #define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) #else #define MSAN_MAYBE_INTERCEPT_MEMALIGN #endif INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) { GET_MALLOC_STACK_TRACE; CHECK_EQ(boundary & (boundary - 1), 0); void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); return ptr; } INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) { GET_MALLOC_STACK_TRACE; CHECK_EQ(boundary & (boundary - 1), 0); void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); DTLS_on_libc_memalign(ptr, size); return ptr; } INTERCEPTOR(void *, valloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false); return ptr; } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, pvalloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; uptr PageSize = GetPageSizeCached(); size = RoundUpTo(size, PageSize); if (size == 0) { // pvalloc(0) should allocate one page. size = PageSize; } void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false); return ptr; } #define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else #define MSAN_MAYBE_INTERCEPT_PVALLOC #endif INTERCEPTOR(void, free, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #if !SANITIZER_FREEBSD INTERCEPTOR(void, cfree, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) #else #define MSAN_MAYBE_INTERCEPT_CFREE #endif INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { return __sanitizer_get_allocated_size(ptr); } #if !SANITIZER_FREEBSD // This function actually returns a struct by value, but we can't unpoison a // temporary! The following is equivalent on all supported platforms but // aarch64 (which uses a different register for sret value). We have a test // to confirm that. INTERCEPTOR(void, mallinfo, __sanitizer_mallinfo *sret) { #ifdef __aarch64__ uptr r8; asm volatile("mov %0,x8" : "=r" (r8)); sret = reinterpret_cast<__sanitizer_mallinfo*>(r8); #endif REAL(memset)(sret, 0, sizeof(*sret)); __msan_unpoison(sret, sizeof(*sret)); } #define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo) #else #define MSAN_MAYBE_INTERCEPT_MALLINFO #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, mallopt, int cmd, int value) { return -1; } #define MSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt) #else #define MSAN_MAYBE_INTERCEPT_MALLOPT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(void, malloc_stats, void) { // FIXME: implement, but don't call REAL(malloc_stats)! } #define MSAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats) #else #define MSAN_MAYBE_INTERCEPT_MALLOC_STATS #endif INTERCEPTOR(char *, strcpy, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(strcpy)(dest, src); // NOLINT CopyShadowAndOrigin(dest, src, n + 1, &stack); return res; } INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T copy_size = REAL(strnlen)(src, n); if (copy_size < n) copy_size++; // trailing \0 char *res = REAL(strncpy)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest, src, copy_size, &stack); __msan_unpoison(dest + copy_size, n - copy_size); return res; } INTERCEPTOR(char *, stpcpy, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(stpcpy)(dest, src); // NOLINT CopyShadowAndOrigin(dest, src, n + 1, &stack); return res; } INTERCEPTOR(char *, strdup, char *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; // On FreeBSD strdup() leverages strlen(). InterceptorScope interceptor_scope; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(strdup)(src); CopyShadowAndOrigin(res, src, n + 1, &stack); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(char *, __strdup, char *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T n = REAL(strlen)(src); CHECK_UNPOISONED_STRING(src + n, 0); char *res = REAL(__strdup)(src); CopyShadowAndOrigin(res, src, n + 1, &stack); return res; } #define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup) #else #define MSAN_MAYBE_INTERCEPT___STRDUP #endif INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) { ENSURE_MSAN_INITED(); char *res = REAL(gcvt)(number, ndigit, buf); SIZE_T n = REAL(strlen)(buf); __msan_unpoison(buf, n + 1); return res; } INTERCEPTOR(char *, strcat, char *dest, const char *src) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T src_size = REAL(strlen)(src); SIZE_T dest_size = REAL(strlen)(dest); CHECK_UNPOISONED_STRING(src + src_size, 0); CHECK_UNPOISONED_STRING(dest + dest_size, 0); char *res = REAL(strcat)(dest, src); // NOLINT CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack); return res; } INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T dest_size = REAL(strlen)(dest); SIZE_T copy_size = REAL(strnlen)(src, n); CHECK_UNPOISONED_STRING(dest + dest_size, 0); char *res = REAL(strncat)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack); __msan_unpoison(dest + dest_size + copy_size, 1); // \0 return res; } // Hack: always pass nptr and endptr as part of __VA_ARGS_ to avoid having to // deal with empty __VA_ARGS__ in the case of INTERCEPTOR_STRTO. #define INTERCEPTOR_STRTO_BODY(ret_type, func, ...) \ ENSURE_MSAN_INITED(); \ ret_type res = REAL(func)(__VA_ARGS__); \ __msan_unpoison(endptr, sizeof(*endptr)); \ return res; #define INTERCEPTOR_STRTO(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \ } #define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ int base) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base); \ } #define INTERCEPTOR_STRTO_LOC(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ void *loc) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \ } #define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \ int base, void *loc) { \ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base, loc); \ } #define INTERCEPTORS_STRTO(ret_type, func, char_type) \ INTERCEPTOR_STRTO(ret_type, func, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type) #define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_l, char_type) \ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_internal, char_type) INTERCEPTORS_STRTO(double, strtod, char) // NOLINT INTERCEPTORS_STRTO(float, strtof, char) // NOLINT INTERCEPTORS_STRTO(long double, strtold, char) // NOLINT INTERCEPTORS_STRTO_BASE(long, strtol, char) // NOLINT INTERCEPTORS_STRTO_BASE(long long, strtoll, char) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long long, strtoull, char) // NOLINT INTERCEPTORS_STRTO(double, wcstod, wchar_t) // NOLINT INTERCEPTORS_STRTO(float, wcstof, wchar_t) // NOLINT INTERCEPTORS_STRTO(long double, wcstold, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t) // NOLINT INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t) // NOLINT #define INTERCEPT_STRTO(func) \ INTERCEPT_FUNCTION(func); \ INTERCEPT_FUNCTION(func##_l); \ INTERCEPT_FUNCTION(__##func##_l); \ INTERCEPT_FUNCTION(__##func##_internal); // FIXME: support *wprintf in common format interceptors. INTERCEPTOR(int, vswprintf, void *str, uptr size, void *format, va_list ap) { ENSURE_MSAN_INITED(); int res = REAL(vswprintf)(str, size, format, ap); if (res >= 0) { __msan_unpoison(str, 4 * (res + 1)); } return res; } INTERCEPTOR(int, swprintf, void *str, uptr size, void *format, ...) { ENSURE_MSAN_INITED(); va_list ap; va_start(ap, format); int res = vswprintf(str, size, format, ap); va_end(ap); return res; } INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T n) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); SIZE_T res = REAL(strxfrm)(dest, src, n); if (res < n) __msan_unpoison(dest, res + 1); return res; } INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T n, void *loc) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); SIZE_T res = REAL(strxfrm_l)(dest, src, n, loc); if (res < n) __msan_unpoison(dest, res + 1); return res; } #define INTERCEPTOR_STRFTIME_BODY(char_type, ret_type, func, s, ...) \ ENSURE_MSAN_INITED(); \ ret_type res = REAL(func)(s, __VA_ARGS__); \ if (s) __msan_unpoison(s, sizeof(char_type) * (res + 1)); \ return res; INTERCEPTOR(SIZE_T, strftime, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime, s, max, format, tm); } INTERCEPTOR(SIZE_T, strftime_l, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime_l, s, max, format, tm, loc); } #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, __strftime_l, char *s, SIZE_T max, const char *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, __strftime_l, s, max, format, tm, loc); } #define MSAN_MAYBE_INTERCEPT___STRFTIME_L INTERCEPT_FUNCTION(__strftime_l) #else #define MSAN_MAYBE_INTERCEPT___STRFTIME_L #endif INTERCEPTOR(SIZE_T, wcsftime, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime, s, max, format, tm); } INTERCEPTOR(SIZE_T, wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime_l, s, max, format, tm, loc); } #if !SANITIZER_FREEBSD INTERCEPTOR(SIZE_T, __wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format, __sanitizer_tm *tm, void *loc) { INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, __wcsftime_l, s, max, format, tm, loc); } #define MSAN_MAYBE_INTERCEPT___WCSFTIME_L INTERCEPT_FUNCTION(__wcsftime_l) #else #define MSAN_MAYBE_INTERCEPT___WCSFTIME_L #endif INTERCEPTOR(int, mbtowc, wchar_t *dest, const char *src, SIZE_T n) { ENSURE_MSAN_INITED(); int res = REAL(mbtowc)(dest, src, n); if (res != -1 && dest) __msan_unpoison(dest, sizeof(wchar_t)); return res; } INTERCEPTOR(int, mbrtowc, wchar_t *dest, const char *src, SIZE_T n, void *ps) { ENSURE_MSAN_INITED(); SIZE_T res = REAL(mbrtowc)(dest, src, n, ps); if (res != (SIZE_T)-1 && dest) __msan_unpoison(dest, sizeof(wchar_t)); return res; } // wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n); INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmemcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(wchar_t *, wmempcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmempcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(wchar_t *, wmemset, wchar_t *s, wchar_t c, SIZE_T n) { CHECK(MEM_IS_APP(s)); ENSURE_MSAN_INITED(); wchar_t *res = REAL(wmemset)(s, c, n); __msan_unpoison(s, n * sizeof(wchar_t)); return res; } INTERCEPTOR(wchar_t *, wmemmove, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wmemmove)(dest, src, n); MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack); return res; } INTERCEPTOR(int, wcscmp, const wchar_t *s1, const wchar_t *s2) { ENSURE_MSAN_INITED(); int res = REAL(wcscmp)(s1, s2); return res; } INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { ENSURE_MSAN_INITED(); int res = REAL(gettimeofday)(tv, tz); if (tv) __msan_unpoison(tv, 16); if (tz) __msan_unpoison(tz, 8); return res; } INTERCEPTOR(char *, fcvt, double x, int a, int *b, int *c) { ENSURE_MSAN_INITED(); char *res = REAL(fcvt)(x, a, b, c); __msan_unpoison(b, sizeof(*b)); __msan_unpoison(c, sizeof(*c)); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } INTERCEPTOR(char *, getenv, char *name) { if (msan_init_is_running) return REAL(getenv)(name); ENSURE_MSAN_INITED(); char *res = REAL(getenv)(name); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } extern char **environ; static void UnpoisonEnviron() { char **envp = environ; for (; *envp; ++envp) { __msan_unpoison(envp, sizeof(*envp)); __msan_unpoison(*envp, REAL(strlen)(*envp) + 1); } // Trailing NULL pointer. __msan_unpoison(envp, sizeof(*envp)); } INTERCEPTOR(int, setenv, const char *name, const char *value, int overwrite) { ENSURE_MSAN_INITED(); CHECK_UNPOISONED_STRING(name, 0) int res = REAL(setenv)(name, value, overwrite); if (!res) UnpoisonEnviron(); return res; } INTERCEPTOR(int, putenv, char *string) { ENSURE_MSAN_INITED(); int res = REAL(putenv)(string); if (!res) UnpoisonEnviron(); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat)(magic, fd, buf); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat) #else #define MSAN_MAYBE_INTERCEPT___FXSTAT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat64)(magic, fd, buf); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64) #else #define MSAN_MAYBE_INTERCEPT___FXSTAT64 #endif #if SANITIZER_FREEBSD INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(fstatat)(fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } # define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat) #else INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(__fxstatat)(magic, fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } # define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat) #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf, int flags) { ENSURE_MSAN_INITED(); int res = REAL(__fxstatat64)(magic, fd, pathname, buf, flags); if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } #define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64) #else #define MSAN_MAYBE_INTERCEPT___FXSTATAT64 #endif INTERCEPTOR(int, pipe, int pipefd[2]) { if (msan_init_is_running) return REAL(pipe)(pipefd); ENSURE_MSAN_INITED(); int res = REAL(pipe)(pipefd); if (!res) __msan_unpoison(pipefd, sizeof(int[2])); return res; } INTERCEPTOR(int, pipe2, int pipefd[2], int flags) { ENSURE_MSAN_INITED(); int res = REAL(pipe2)(pipefd, flags); if (!res) __msan_unpoison(pipefd, sizeof(int[2])); return res; } INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int sv[2]) { ENSURE_MSAN_INITED(); int res = REAL(socketpair)(domain, type, protocol, sv); if (!res) __msan_unpoison(sv, sizeof(int[2])); return res; } INTERCEPTOR(char *, fgets, char *s, int size, void *stream) { ENSURE_MSAN_INITED(); char *res = REAL(fgets)(s, size, stream); if (res) __msan_unpoison(s, REAL(strlen)(s) + 1); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) { ENSURE_MSAN_INITED(); char *res = REAL(fgets_unlocked)(s, size, stream); if (res) __msan_unpoison(s, REAL(strlen)(s) + 1); return res; } #define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked) #else #define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED #endif INTERCEPTOR(int, getrlimit, int resource, void *rlim) { if (msan_init_is_running) return REAL(getrlimit)(resource, rlim); ENSURE_MSAN_INITED(); int res = REAL(getrlimit)(resource, rlim); if (!res) __msan_unpoison(rlim, __sanitizer::struct_rlimit_sz); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, getrlimit64, int resource, void *rlim) { if (msan_init_is_running) return REAL(getrlimit64)(resource, rlim); ENSURE_MSAN_INITED(); int res = REAL(getrlimit64)(resource, rlim); if (!res) __msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz); return res; } INTERCEPTOR(int, prlimit, int pid, int resource, void *new_rlimit, void *old_rlimit) { if (msan_init_is_running) return REAL(prlimit)(pid, resource, new_rlimit, old_rlimit); ENSURE_MSAN_INITED(); CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit_sz); int res = REAL(prlimit)(pid, resource, new_rlimit, old_rlimit); if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit_sz); return res; } INTERCEPTOR(int, prlimit64, int pid, int resource, void *new_rlimit, void *old_rlimit) { if (msan_init_is_running) return REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit); ENSURE_MSAN_INITED(); CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit64_sz); int res = REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit); if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit64_sz); return res; } #define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 INTERCEPT_FUNCTION(getrlimit64) #define MSAN_MAYBE_INTERCEPT_PRLIMIT INTERCEPT_FUNCTION(prlimit) #define MSAN_MAYBE_INTERCEPT_PRLIMIT64 INTERCEPT_FUNCTION(prlimit64) #else #define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 #define MSAN_MAYBE_INTERCEPT_PRLIMIT #define MSAN_MAYBE_INTERCEPT_PRLIMIT64 #endif #if SANITIZER_FREEBSD // FreeBSD's define uname() as // static __inline int uname(struct utsname *name) { // return __xuname(SYS_NMLN, (void*)name); // } INTERCEPTOR(int, __xuname, int size, void *utsname) { ENSURE_MSAN_INITED(); int res = REAL(__xuname)(size, utsname); if (!res) __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); return res; } #define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname) #else INTERCEPTOR(int, uname, struct utsname *utsname) { ENSURE_MSAN_INITED(); int res = REAL(uname)(utsname); if (!res) __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); return res; } #define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname) #endif INTERCEPTOR(int, gethostname, char *name, SIZE_T len) { ENSURE_MSAN_INITED(); int res = REAL(gethostname)(name, len); if (!res) { SIZE_T real_len = REAL(strnlen)(name, len); if (real_len < len) ++real_len; __msan_unpoison(name, real_len); } return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents, int timeout) { ENSURE_MSAN_INITED(); int res = REAL(epoll_wait)(epfd, events, maxevents, timeout); if (res > 0) { __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } #define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT INTERCEPT_FUNCTION(epoll_wait) #else #define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT #endif #if !SANITIZER_FREEBSD INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents, int timeout, void *sigmask) { ENSURE_MSAN_INITED(); int res = REAL(epoll_pwait)(epfd, events, maxevents, timeout, sigmask); if (res > 0) { __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } #define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT INTERCEPT_FUNCTION(epoll_pwait) #else #define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT #endif INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(!msan_inited)) // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. return AllocateFromLocalPool(nmemb * size); return MsanCalloc(&stack, nmemb, size); } INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(IsInDlsymAllocPool(ptr))) { uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); void *new_ptr; if (UNLIKELY(!msan_inited)) { new_ptr = AllocateFromLocalPool(copy_size); } else { copy_size = size; new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false); } internal_memcpy(new_ptr, ptr, copy_size); return new_ptr; } return MsanReallocate(&stack, ptr, size, sizeof(u64), false); } INTERCEPTOR(void *, malloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; if (UNLIKELY(!msan_inited)) // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. return AllocateFromLocalPool(size); return MsanReallocate(&stack, nullptr, size, sizeof(u64), false); } void __msan_allocated_memory(const void *data, uptr size) { GET_MALLOC_STACK_TRACE; if (flags()->poison_in_malloc) { stack.tag = STACK_TRACE_TAG_POISON; PoisonMemory(data, size, &stack); } } void __msan_copy_shadow(void *dest, const void *src, uptr n) { GET_STORE_STACK_TRACE; MoveShadowAndOrigin(dest, src, n, &stack); } void __sanitizer_dtor_callback(const void *data, uptr size) { GET_MALLOC_STACK_TRACE; if (flags()->poison_in_dtor) { stack.tag = STACK_TRACE_TAG_POISON; PoisonMemory(data, size, &stack); } } INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, int fd, OFF_T offset) { if (msan_init_is_running) return REAL(mmap)(addr, length, prot, flags, fd, offset); ENSURE_MSAN_INITED(); if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { - *__errno_location() = errno_EINVAL; + errno = errno_EINVAL; return (void *)-1; } else { addr = nullptr; } } void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); if (res != (void*)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); return res; } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, int fd, OFF64_T offset) { ENSURE_MSAN_INITED(); if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { - *__errno_location() = errno_EINVAL; + errno = errno_EINVAL; return (void *)-1; } else { addr = nullptr; } } void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); if (res != (void*)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); return res; } #define MSAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64) #else #define MSAN_MAYBE_INTERCEPT_MMAP64 #endif INTERCEPTOR(int, getrusage, int who, void *usage) { ENSURE_MSAN_INITED(); int res = REAL(getrusage)(who, usage); if (res == 0) { __msan_unpoison(usage, __sanitizer::struct_rusage_sz); } return res; } class SignalHandlerScope { public: SignalHandlerScope() { if (MsanThread *t = GetCurrentThread()) t->EnterSignalHandler(); } ~SignalHandlerScope() { if (MsanThread *t = GetCurrentThread()) t->LeaveSignalHandler(); } }; // sigactions_mu guarantees atomicity of sigaction() and signal() calls. // Access to sigactions[] is gone with relaxed atomics to avoid data race with // the signal handler. const int kMaxSignals = 1024; static atomic_uintptr_t sigactions[kMaxSignals]; static StaticSpinMutex sigactions_mu; static void SignalHandler(int signo) { SignalHandlerScope signal_handler_scope; ScopedThreadLocalStateBackup stlsb; UnpoisonParam(1); typedef void (*signal_cb)(int x); signal_cb cb = (signal_cb)atomic_load(&sigactions[signo], memory_order_relaxed); cb(signo); } static void SignalAction(int signo, void *si, void *uc) { SignalHandlerScope signal_handler_scope; ScopedThreadLocalStateBackup stlsb; UnpoisonParam(3); __msan_unpoison(si, sizeof(__sanitizer_sigaction)); __msan_unpoison(uc, __sanitizer::ucontext_t_sz); typedef void (*sigaction_cb)(int, void *, void *); sigaction_cb cb = (sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed); cb(signo, si, uc); } INTERCEPTOR(int, sigaction, int signo, const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) { ENSURE_MSAN_INITED(); // FIXME: check that *act is unpoisoned. // That requires intercepting all of sigemptyset, sigfillset, etc. int res; if (flags()->wrap_signals) { SpinMutexLock lock(&sigactions_mu); CHECK_LT(signo, kMaxSignals); uptr old_cb = atomic_load(&sigactions[signo], memory_order_relaxed); __sanitizer_sigaction new_act; __sanitizer_sigaction *pnew_act = act ? &new_act : nullptr; if (act) { REAL(memcpy)(pnew_act, act, sizeof(__sanitizer_sigaction)); uptr cb = (uptr)pnew_act->sigaction; uptr new_cb = (pnew_act->sa_flags & __sanitizer::sa_siginfo) ? (uptr)SignalAction : (uptr)SignalHandler; if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { atomic_store(&sigactions[signo], cb, memory_order_relaxed); pnew_act->sigaction = (void (*)(int, void *, void *))new_cb; } } res = REAL(sigaction)(signo, pnew_act, oldact); if (res == 0 && oldact) { uptr cb = (uptr)oldact->sigaction; if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { oldact->sigaction = (void (*)(int, void *, void *))old_cb; } } } else { res = REAL(sigaction)(signo, act, oldact); } if (res == 0 && oldact) { __msan_unpoison(oldact, sizeof(__sanitizer_sigaction)); } return res; } INTERCEPTOR(int, signal, int signo, uptr cb) { ENSURE_MSAN_INITED(); if (flags()->wrap_signals) { CHECK_LT(signo, kMaxSignals); SpinMutexLock lock(&sigactions_mu); if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { atomic_store(&sigactions[signo], cb, memory_order_relaxed); cb = (uptr) SignalHandler; } return REAL(signal)(signo, cb); } else { return REAL(signal)(signo, cb); } } extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); static void *MsanThreadStartFunc(void *arg) { MsanThread *t = (MsanThread *)arg; SetCurrentThread(t); return t->ThreadStart(); } INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), void * param) { ENSURE_MSAN_INITED(); // for GetTlsSize() __sanitizer_pthread_attr_t myattr; if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } AdjustStackSize(attr); MsanThread *t = MsanThread::Create(callback, param); int res = REAL(pthread_create)(th, attr, MsanThreadStartFunc, t); if (attr == &myattr) pthread_attr_destroy(&myattr); if (!res) { __msan_unpoison(th, __sanitizer::pthread_t_sz); } return res; } INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key, void (*dtor)(void *value)) { if (msan_init_is_running) return REAL(pthread_key_create)(key, dtor); ENSURE_MSAN_INITED(); int res = REAL(pthread_key_create)(key, dtor); if (!res && key) __msan_unpoison(key, sizeof(*key)); return res; } INTERCEPTOR(int, pthread_join, void *th, void **retval) { ENSURE_MSAN_INITED(); int res = REAL(pthread_join)(th, retval); if (!res && retval) __msan_unpoison(retval, sizeof(*retval)); return res; } extern char *tzname[2]; INTERCEPTOR(void, tzset, int fake) { ENSURE_MSAN_INITED(); REAL(tzset)(fake); if (tzname[0]) __msan_unpoison(tzname[0], REAL(strlen)(tzname[0]) + 1); if (tzname[1]) __msan_unpoison(tzname[1], REAL(strlen)(tzname[1]) + 1); return; } struct MSanAtExitRecord { void (*func)(void *arg); void *arg; }; void MSanAtExitWrapper(void *arg) { UnpoisonParam(1); MSanAtExitRecord *r = (MSanAtExitRecord *)arg; r->func(r->arg); InternalFree(r); } // Unpoison argument shadow for C++ module destructors. INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, void *dso_handle) { if (msan_init_is_running) return REAL(__cxa_atexit)(func, arg, dso_handle); ENSURE_MSAN_INITED(); MSanAtExitRecord *r = (MSanAtExitRecord *)InternalAlloc(sizeof(MSanAtExitRecord)); r->func = func; r->arg = arg; return REAL(__cxa_atexit)(MSanAtExitWrapper, r, dso_handle); } DECLARE_REAL(int, shmctl, int shmid, int cmd, void *buf) INTERCEPTOR(void *, shmat, int shmid, const void *shmaddr, int shmflg) { ENSURE_MSAN_INITED(); void *p = REAL(shmat)(shmid, shmaddr, shmflg); if (p != (void *)-1) { __sanitizer_shmid_ds ds; int res = REAL(shmctl)(shmid, shmctl_ipc_stat, &ds); if (!res) { __msan_unpoison(p, ds.shm_segsz); } } return p; } static void BeforeFork() { StackDepotLockAll(); ChainedOriginDepotLockAll(); } static void AfterFork() { ChainedOriginDepotUnlockAll(); StackDepotUnlockAll(); } INTERCEPTOR(int, fork, void) { ENSURE_MSAN_INITED(); BeforeFork(); int pid = REAL(fork)(); AfterFork(); return pid; } INTERCEPTOR(int, openpty, int *amaster, int *aslave, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); InterceptorScope interceptor_scope; int res = REAL(openpty)(amaster, aslave, name, termp, winp); if (!res) { __msan_unpoison(amaster, sizeof(*amaster)); __msan_unpoison(aslave, sizeof(*aslave)); } return res; } INTERCEPTOR(int, forkpty, int *amaster, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); InterceptorScope interceptor_scope; int res = REAL(forkpty)(amaster, name, termp, winp); if (res != -1) __msan_unpoison(amaster, sizeof(*amaster)); return res; } struct MSanInterceptorContext { bool in_interceptor_scope; }; namespace __msan { int OnExit() { // FIXME: ask frontend whether we need to return failure. return 0; } } // namespace __msan // A version of CHECK_UNPOISONED using a saved scope value. Used in common // interceptors. #define CHECK_UNPOISONED_CTX(ctx, x, n) \ do { \ if (!((MSanInterceptorContext *)ctx)->in_interceptor_scope) \ CHECK_UNPOISONED_0(x, n); \ } while (0) #define MSAN_INTERCEPT_FUNC(name) \ do { \ if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ VReport(1, "MemorySanitizer: failed to intercept '" #name "'\n"); \ } while (0) #define MSAN_INTERCEPT_FUNC_VER(name, ver) \ do { \ if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \ VReport( \ 1, "MemorySanitizer: failed to intercept '" #name "@@" #ver "'\n"); \ } while (0) #define COMMON_INTERCEPT_FUNCTION(name) MSAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ MSAN_INTERCEPT_FUNC_VER(name, ver) #define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) \ UnpoisonParam(count) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ __msan_unpoison(ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ CHECK_UNPOISONED_CTX(ctx, ptr, size) #define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \ __msan_unpoison(ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ if (msan_init_is_running) return REAL(func)(__VA_ARGS__); \ ENSURE_MSAN_INITED(); \ MSanInterceptorContext msan_ctx = {IsInInterceptorScope()}; \ ctx = (void *)&msan_ctx; \ (void)ctx; \ InterceptorScope interceptor_scope; \ __msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */ #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ do { \ } while (false) // FIXME #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ do { \ } while (false) // FIXME #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ do { \ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \ if (filename && map) \ ForEachMappedRegion(map, __msan_unpoison); \ } while (false) #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (MsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ *end = t->tls_end(); \ } else { \ *begin = *end = 0; \ } #define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ { \ (void)ctx; \ return __msan_memset(block, c, size); \ } #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ { \ (void)ctx; \ return __msan_memmove(to, from, size); \ } #define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ { \ (void)ctx; \ return __msan_memcpy(to, from, size); \ } #define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \ do { \ GET_STORE_STACK_TRACE; \ CopyShadowAndOrigin(to, from, size, &stack); \ __msan_unpoison(to + size, 1); \ } while (false) #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_common_interceptors.inc" #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ do { \ } while (false) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s) #include "sanitizer_common/sanitizer_common_syscalls.inc" struct dlinfo { char *dli_fname; void *dli_fbase; char *dli_sname; void *dli_saddr; }; INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dladdr, addr, info); int res = REAL(dladdr)(addr, info); if (res != 0) { __msan_unpoison(info, sizeof(*info)); if (info->dli_fname) __msan_unpoison(info->dli_fname, REAL(strlen)(info->dli_fname) + 1); if (info->dli_sname) __msan_unpoison(info->dli_sname, REAL(strlen)(info->dli_sname) + 1); } return res; } INTERCEPTOR(char *, dlerror, int fake) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dlerror, fake); char *res = REAL(dlerror)(fake); if (res) __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } typedef int (*dl_iterate_phdr_cb)(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data); struct dl_iterate_phdr_data { dl_iterate_phdr_cb callback; void *data; }; static int msan_dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data) { if (info) { __msan_unpoison(info, size); if (info->dlpi_phdr && info->dlpi_phnum) __msan_unpoison(info->dlpi_phdr, struct_ElfW_Phdr_sz * info->dlpi_phnum); if (info->dlpi_name) __msan_unpoison(info->dlpi_name, REAL(strlen)(info->dlpi_name) + 1); } dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; UnpoisonParam(3); return cbdata->callback(info, size, cbdata->data); } INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb callback, void *data) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, dl_iterate_phdr, callback, data); dl_iterate_phdr_data cbdata; cbdata.callback = callback; cbdata.data = data; int res = REAL(dl_iterate_phdr)(msan_dl_iterate_phdr_cb, (void *)&cbdata); return res; } // wchar_t *wcschr(const wchar_t *wcs, wchar_t wc); INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) { ENSURE_MSAN_INITED(); wchar_t *res = REAL(wcschr)(s, wc, ps); return res; } // wchar_t *wcscpy(wchar_t *dest, const wchar_t *src); INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) { ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; wchar_t *res = REAL(wcscpy)(dest, src); CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), &stack); return res; } INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { // NOLINT ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; SIZE_T copy_size = REAL(wcsnlen)(src, n); if (copy_size < n) copy_size++; // trailing \0 wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack); __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t)); return res; } // These interface functions reside here so that they can use // REAL(memset), etc. void __msan_unpoison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, 0); } void __msan_poison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1); } void __msan_poison_stack(void *a, uptr size) { if (!MEM_IS_APP(a)) return; SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1); } void __msan_clear_and_unpoison(void *a, uptr size) { REAL(memset)(a, 0, size); SetShadow(a, size, 0); } void *__msan_memcpy(void *dest, const void *src, SIZE_T n) { if (!msan_inited) return internal_memcpy(dest, src, n); if (msan_init_is_running || __msan::IsInSymbolizer()) return REAL(memcpy)(dest, src, n); ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; void *res = REAL(memcpy)(dest, src, n); CopyShadowAndOrigin(dest, src, n, &stack); return res; } void *__msan_memset(void *s, int c, SIZE_T n) { if (!msan_inited) return internal_memset(s, c, n); if (msan_init_is_running) return REAL(memset)(s, c, n); ENSURE_MSAN_INITED(); void *res = REAL(memset)(s, c, n); __msan_unpoison(s, n); return res; } void *__msan_memmove(void *dest, const void *src, SIZE_T n) { if (!msan_inited) return internal_memmove(dest, src, n); if (msan_init_is_running) return REAL(memmove)(dest, src, n); ENSURE_MSAN_INITED(); GET_STORE_STACK_TRACE; void *res = REAL(memmove)(dest, src, n); MoveShadowAndOrigin(dest, src, n, &stack); return res; } void __msan_unpoison_string(const char* s) { if (!MEM_IS_APP(s)) return; __msan_unpoison(s, REAL(strlen)(s) + 1); } namespace __msan { void InitializeInterceptors() { static int inited = 0; CHECK_EQ(inited, 0); InitializeCommonInterceptors(); INTERCEPT_FUNCTION(mmap); MSAN_MAYBE_INTERCEPT_MMAP64; INTERCEPT_FUNCTION(posix_memalign); MSAN_MAYBE_INTERCEPT_MEMALIGN; INTERCEPT_FUNCTION(__libc_memalign); INTERCEPT_FUNCTION(valloc); MSAN_MAYBE_INTERCEPT_PVALLOC; INTERCEPT_FUNCTION(malloc); INTERCEPT_FUNCTION(calloc); INTERCEPT_FUNCTION(realloc); INTERCEPT_FUNCTION(free); MSAN_MAYBE_INTERCEPT_CFREE; INTERCEPT_FUNCTION(malloc_usable_size); MSAN_MAYBE_INTERCEPT_MALLINFO; MSAN_MAYBE_INTERCEPT_MALLOPT; MSAN_MAYBE_INTERCEPT_MALLOC_STATS; INTERCEPT_FUNCTION(fread); MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED; INTERCEPT_FUNCTION(readlink); INTERCEPT_FUNCTION(memccpy); INTERCEPT_FUNCTION(mempcpy); INTERCEPT_FUNCTION(bcopy); INTERCEPT_FUNCTION(wmemset); INTERCEPT_FUNCTION(wmemcpy); INTERCEPT_FUNCTION(wmempcpy); INTERCEPT_FUNCTION(wmemmove); INTERCEPT_FUNCTION(strcpy); // NOLINT INTERCEPT_FUNCTION(stpcpy); // NOLINT INTERCEPT_FUNCTION(strdup); MSAN_MAYBE_INTERCEPT___STRDUP; INTERCEPT_FUNCTION(strncpy); // NOLINT INTERCEPT_FUNCTION(gcvt); INTERCEPT_FUNCTION(strcat); // NOLINT INTERCEPT_FUNCTION(strncat); // NOLINT INTERCEPT_STRTO(strtod); INTERCEPT_STRTO(strtof); INTERCEPT_STRTO(strtold); INTERCEPT_STRTO(strtol); INTERCEPT_STRTO(strtoul); INTERCEPT_STRTO(strtoll); INTERCEPT_STRTO(strtoull); INTERCEPT_STRTO(wcstod); INTERCEPT_STRTO(wcstof); INTERCEPT_STRTO(wcstold); INTERCEPT_STRTO(wcstol); INTERCEPT_STRTO(wcstoul); INTERCEPT_STRTO(wcstoll); INTERCEPT_STRTO(wcstoull); #ifdef SANITIZER_NLDBL_VERSION INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION); INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION); #else INTERCEPT_FUNCTION(vswprintf); INTERCEPT_FUNCTION(swprintf); #endif INTERCEPT_FUNCTION(strxfrm); INTERCEPT_FUNCTION(strxfrm_l); INTERCEPT_FUNCTION(strftime); INTERCEPT_FUNCTION(strftime_l); MSAN_MAYBE_INTERCEPT___STRFTIME_L; INTERCEPT_FUNCTION(wcsftime); INTERCEPT_FUNCTION(wcsftime_l); MSAN_MAYBE_INTERCEPT___WCSFTIME_L; INTERCEPT_FUNCTION(mbtowc); INTERCEPT_FUNCTION(mbrtowc); INTERCEPT_FUNCTION(wcslen); INTERCEPT_FUNCTION(wcsnlen); INTERCEPT_FUNCTION(wcschr); INTERCEPT_FUNCTION(wcscpy); INTERCEPT_FUNCTION(wcsncpy); INTERCEPT_FUNCTION(wcscmp); INTERCEPT_FUNCTION(getenv); INTERCEPT_FUNCTION(setenv); INTERCEPT_FUNCTION(putenv); INTERCEPT_FUNCTION(gettimeofday); INTERCEPT_FUNCTION(fcvt); MSAN_MAYBE_INTERCEPT___FXSTAT; MSAN_INTERCEPT_FSTATAT; MSAN_MAYBE_INTERCEPT___FXSTAT64; MSAN_MAYBE_INTERCEPT___FXSTATAT64; INTERCEPT_FUNCTION(pipe); INTERCEPT_FUNCTION(pipe2); INTERCEPT_FUNCTION(socketpair); INTERCEPT_FUNCTION(fgets); MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED; INTERCEPT_FUNCTION(getrlimit); MSAN_MAYBE_INTERCEPT_GETRLIMIT64; MSAN_MAYBE_INTERCEPT_PRLIMIT; MSAN_MAYBE_INTERCEPT_PRLIMIT64; MSAN_INTERCEPT_UNAME; INTERCEPT_FUNCTION(gethostname); MSAN_MAYBE_INTERCEPT_EPOLL_WAIT; MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT; INTERCEPT_FUNCTION(dladdr); INTERCEPT_FUNCTION(dlerror); INTERCEPT_FUNCTION(dl_iterate_phdr); INTERCEPT_FUNCTION(getrusage); INTERCEPT_FUNCTION(sigaction); INTERCEPT_FUNCTION(signal); #if defined(__mips__) INTERCEPT_FUNCTION_VER(pthread_create, "GLIBC_2.2"); #else INTERCEPT_FUNCTION(pthread_create); #endif INTERCEPT_FUNCTION(pthread_key_create); INTERCEPT_FUNCTION(pthread_join); INTERCEPT_FUNCTION(tzset); INTERCEPT_FUNCTION(__cxa_atexit); INTERCEPT_FUNCTION(shmat); INTERCEPT_FUNCTION(fork); INTERCEPT_FUNCTION(openpty); INTERCEPT_FUNCTION(forkpty); inited = 1; } } // namespace __msan Index: vendor/compiler-rt/dist/lib/sanitizer_common/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/CMakeLists.txt (revision 320961) @@ -1,242 +1,246 @@ # Build system for the common Sanitizer runtime support library components. # These components are shared between AddressSanitizer and ThreadSanitizer. set(SANITIZER_SOURCES_NOTERMINATION sanitizer_allocator.cc sanitizer_common.cc sanitizer_deadlock_detector1.cc sanitizer_deadlock_detector2.cc + sanitizer_errno.cc sanitizer_flags.cc sanitizer_flag_parser.cc sanitizer_libc.cc sanitizer_libignore.cc sanitizer_linux.cc sanitizer_linux_s390.cc sanitizer_mac.cc sanitizer_persistent_allocator.cc sanitizer_platform_limits_linux.cc sanitizer_platform_limits_posix.cc sanitizer_posix.cc sanitizer_printf.cc sanitizer_procmaps_common.cc sanitizer_procmaps_freebsd.cc sanitizer_procmaps_linux.cc sanitizer_procmaps_mac.cc sanitizer_stackdepot.cc sanitizer_stacktrace.cc sanitizer_stacktrace_printer.cc sanitizer_stoptheworld_mac.cc sanitizer_suppressions.cc sanitizer_symbolizer.cc sanitizer_symbolizer_libbacktrace.cc sanitizer_symbolizer_mac.cc sanitizer_symbolizer_win.cc sanitizer_tls_get_addr.cc sanitizer_thread_registry.cc sanitizer_win.cc) if(UNIX AND NOT APPLE) list(APPEND SANITIZER_SOURCES_NOTERMINATION sanitizer_linux_x86_64.S) list(APPEND SANITIZER_SOURCES_NOTERMINATION sanitizer_linux_mips64.S) endif() set(SANITIZER_SOURCES ${SANITIZER_SOURCES_NOTERMINATION} sanitizer_termination.cc) # Libc functions stubs. These sources should be linked instead of # SANITIZER_LIBCDEP_SOURCES when sanitizer_common library must not depend on # libc. set(SANITIZER_NOLIBC_SOURCES sanitizer_common_nolibc.cc) set(SANITIZER_LIBCDEP_SOURCES sanitizer_common_libcdep.cc sancov_flags.cc sanitizer_coverage_libcdep_new.cc sanitizer_coverage_win_sections.cc sanitizer_linux_libcdep.cc + sanitizer_mac_libcdep.cc sanitizer_posix_libcdep.cc sanitizer_stacktrace_libcdep.cc sanitizer_stoptheworld_linux_libcdep.cc sanitizer_symbolizer_libcdep.cc sanitizer_symbolizer_posix_libcdep.cc sanitizer_unwind_linux_libcdep.cc) # Explicitly list all sanitizer_common headers. Not all of these are # included in sanitizer_common source files, but we need to depend on # headers when building our custom unit tests. set(SANITIZER_HEADERS sanitizer_addrhashmap.h sanitizer_allocator.h sanitizer_allocator_bytemap.h sanitizer_allocator_combined.h sanitizer_allocator_interface.h sanitizer_allocator_internal.h sanitizer_allocator_local_cache.h sanitizer_allocator_primary32.h sanitizer_allocator_primary64.h sanitizer_allocator_secondary.h sanitizer_allocator_size_class_map.h sanitizer_allocator_stats.h sanitizer_atomic.h sanitizer_atomic_clang.h sanitizer_atomic_msvc.h sanitizer_bitvector.h sanitizer_bvgraph.h sanitizer_common.h sanitizer_common_interceptors.inc sanitizer_common_interceptors_ioctl.inc sanitizer_common_interceptors_format.inc sanitizer_common_syscalls.inc sanitizer_deadlock_detector.h sanitizer_deadlock_detector_interface.h + sanitizer_errno.h + sanitizer_errno_codes.h sanitizer_flag_parser.h sanitizer_flags.h sanitizer_flags.inc sanitizer_interface_internal.h sanitizer_internal_defs.h sanitizer_lfstack.h sanitizer_libc.h sanitizer_libignore.h sanitizer_linux.h sanitizer_list.h sanitizer_mac.h sanitizer_mutex.h sanitizer_persistent_allocator.h sanitizer_placement_new.h sanitizer_platform.h sanitizer_platform_interceptors.h sanitizer_platform_limits_posix.h sanitizer_posix.h sanitizer_procmaps.h sanitizer_quarantine.h sanitizer_report_decorator.h sanitizer_stackdepot.h sanitizer_stackdepotbase.h sanitizer_stacktrace.h sanitizer_stacktrace_printer.h sanitizer_stoptheworld.h sanitizer_suppressions.h sanitizer_symbolizer.h sanitizer_symbolizer_internal.h sanitizer_symbolizer_libbacktrace.h sanitizer_symbolizer_mac.h sanitizer_syscall_generic.inc sanitizer_syscall_linux_x86_64.inc sanitizer_syscall_linux_aarch64.inc sanitizer_thread_registry.h sanitizer_win.h) include_directories(..) set(SANITIZER_COMMON_DEFINITIONS) include(CheckIncludeFile) append_have_file_definition(rpc/xdr.h HAVE_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS) append_have_file_definition(tirpc/rpc/xdr.h HAVE_TIRPC_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS) set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS}) append_rtti_flag(OFF SANITIZER_CFLAGS) append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=570 SANITIZER_CFLAGS) append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors SANITIZER_CFLAGS) if (LLVM_ENABLE_PEDANTIC AND UNIX AND NOT APPLE) # With -pedantic, our .S files raise warnings about empty macro arguments # from __USER_LABEL_PREFIX__ being an empty arg to GLUE(). Unfortunately, # there is no simple way to test for an empty define, nor to disable just # that warning or to disable -pedantic. There is also no simple way to # remove -pedantic from just this file (we'd have to remove from # CMAKE_C*_FLAGS and re-add as a source property to all the non-.S files). set_source_files_properties(sanitizer_linux_x86_64.S PROPERTIES COMPILE_FLAGS "-w") set_source_files_properties(sanitizer_linux_mips64.S PROPERTIES COMPILE_FLAGS "-w") endif () if(APPLE) set(OS_OPTION OS ${SANITIZER_COMMON_SUPPORTED_OS}) endif() add_compiler_rt_object_libraries(RTSanitizerCommon ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_SOURCES} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonNoTermination ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_SOURCES_NOTERMINATION} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonNoLibc ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_NOLIBC_SOURCES} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonLibc ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_LIBCDEP_SOURCES} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) if(WIN32) add_compiler_rt_object_libraries(SanitizerCommonWeakInterception ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_win_weak_interception.cc CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(SancovWeakInterception ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_coverage_win_weak_interception.cc CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(SanitizerCommonDllThunk ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_win_dll_thunk.cc CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(SancovDllThunk ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_coverage_win_dll_thunk.cc sanitizer_coverage_win_sections.cc CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK DEFS ${SANITIZER_COMMON_DEFINITIONS}) set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK") if(MSVC) list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl") elseif(CMAKE_C_COMPILER_ID MATCHES Clang) list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs") endif() add_compiler_rt_object_libraries(SanitizerCommonDynamicRuntimeThunk ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_win_dynamic_runtime_thunk.cc CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(SancovDynamicRuntimeThunk ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES sanitizer_coverage_win_dynamic_runtime_thunk.cc sanitizer_coverage_win_sections.cc CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) endif() # Unit tests for common sanitizer runtime. if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(tests) endif() Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h (revision 320961) @@ -1,941 +1,942 @@ //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between run-time libraries of sanitizers. // // It declares common functions and classes that are used in both runtimes. // Implementation of some functions are provided in sanitizer_common, while // others must be defined by run-time library itself. //===----------------------------------------------------------------------===// #ifndef SANITIZER_COMMON_H #define SANITIZER_COMMON_H #include "sanitizer_flags.h" #include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_list.h" #include "sanitizer_mutex.h" #if defined(_MSC_VER) && !defined(__clang__) extern "C" void _ReadWriteBarrier(); #pragma intrinsic(_ReadWriteBarrier) #endif namespace __sanitizer { struct StackTrace; struct AddressInfo; // Constants. const uptr kWordSize = SANITIZER_WORDSIZE / 8; const uptr kWordSizeInBits = 8 * kWordSize; #if defined(__powerpc__) || defined(__powerpc64__) const uptr kCacheLineSize = 128; #else const uptr kCacheLineSize = 64; #endif const uptr kMaxPathLength = 4096; const uptr kMaxThreadStackSize = 1 << 30; // 1Gb static const uptr kErrorMessageBufferSize = 1 << 16; // Denotes fake PC values that come from JIT/JAVA/etc. // For such PC values __tsan_symbolize_external() will be called. const u64 kExternalPCBit = 1ULL << 60; extern const char *SanitizerToolName; // Can be changed by the tool. extern atomic_uint32_t current_verbosity; INLINE void SetVerbosity(int verbosity) { atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); } INLINE int Verbosity() { return atomic_load(¤t_verbosity, memory_order_relaxed); } uptr GetPageSize(); extern uptr PageSizeCached; INLINE uptr GetPageSizeCached() { if (!PageSizeCached) PageSizeCached = GetPageSize(); return PageSizeCached; } uptr GetMmapGranularity(); uptr GetMaxVirtualAddress(); // Threads tid_t GetTid(); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size); // Memory management void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) { return MmapOrDie(size, mem_type, /*raw_report*/ true); } void UnmapOrDie(void *addr, uptr size); // Behaves just like MmapOrDie, but tolerates out of memory condition, in that // case returns nullptr. void *MmapOrDieOnFatalError(uptr size, const char *mem_type); void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in // that case returns nullptr. void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size); void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. // Dies on all but out of memory errors, in the latter case returns nullptr. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type); // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an // unaccessible memory. bool MprotectNoAccess(uptr addr, uptr size); bool MprotectReadOnly(uptr addr, uptr size); // Find an available address space. -uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding); +uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, + uptr *largest_gap_found); // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); // Releases memory pages entirely within the [beg, end] address range. Noop if // the provided range does not contain at least one entire page. void ReleaseMemoryPagesToOS(uptr beg, uptr end); void IncreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size); uptr GetRSS(); void NoHugePagesInRegion(uptr addr, uptr length); void DontDumpShadowMemory(uptr addr, uptr length); // Check if the built VMA size matches the runtime one. void CheckVMASize(); void RunMallocHooks(const void *ptr, uptr size); void RunFreeHooks(const void *ptr); // InternalScopedBuffer can be used instead of large stack arrays to // keep frame size low. // FIXME: use InternalAlloc instead of MmapOrDie once // InternalAlloc is made libc-free. template class InternalScopedBuffer { public: explicit InternalScopedBuffer(uptr cnt) { cnt_ = cnt; ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer"); } ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); } T &operator[](uptr i) { return ptr_[i]; } T *data() { return ptr_; } uptr size() { return cnt_ * sizeof(T); } private: T *ptr_; uptr cnt_; // Disallow copies and moves. InternalScopedBuffer(const InternalScopedBuffer &) = delete; InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete; InternalScopedBuffer(InternalScopedBuffer &&) = delete; InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete; }; class InternalScopedString : public InternalScopedBuffer { public: explicit InternalScopedString(uptr max_length) : InternalScopedBuffer(max_length), length_(0) { (*this)[0] = '\0'; } uptr length() { return length_; } void clear() { (*this)[0] = '\0'; length_ = 0; } void append(const char *format, ...); private: uptr length_; }; // Simple low-level (mmap-based) allocator for internal use. Doesn't have // constructor, so all instances of LowLevelAllocator should be // linker initialized. class LowLevelAllocator { public: // Requires an external lock. void *Allocate(uptr size); private: char *allocated_end_; char *allocated_current_; }; typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); // Allows to register tool-specific callbacks for LowLevelAllocator. // Passing NULL removes the callback. void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); // IO void RawWrite(const char *buffer); bool ColorizeReports(); void RemoveANSIEscapeSequencesFromString(char *buffer); void Printf(const char *format, ...); void Report(const char *format, ...); void SetPrintfAndReportCallback(void (*callback)(const char *)); #define VReport(level, ...) \ do { \ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ } while (0) #define VPrintf(level, ...) \ do { \ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ } while (0) // Can be used to prevent mixing error reports from different sanitizers. extern StaticSpinMutex CommonSanitizerReportMutex; struct ReportFile { void Write(const char *buffer, uptr length); bool SupportsColors(); void SetReportPath(const char *path); // Don't use fields directly. They are only declared public to allow // aggregate initialization. // Protects fields below. StaticSpinMutex *mu; // Opened file descriptor. Defaults to stderr. It may be equal to // kInvalidFd, in which case new file will be opened when necessary. fd_t fd; // Path prefix of report file, set via __sanitizer_set_report_path. char path_prefix[kMaxPathLength]; // Full path to report, obtained as .PID char full_path[kMaxPathLength]; // PID of the process that opened fd. If a fork() occurs, // the PID of child will be different from fd_pid. uptr fd_pid; private: void ReopenIfNecessary(); }; extern ReportFile report_file; extern uptr stoptheworld_tracer_pid; extern uptr stoptheworld_tracer_ppid; enum FileAccessMode { RdOnly, WrOnly, RdWr }; // Returns kInvalidFd on error. fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p = nullptr); void CloseFile(fd_t); // Return true on success, false on error. bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read = nullptr, error_t *error_p = nullptr); bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written = nullptr, error_t *error_p = nullptr); bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p = nullptr); // Scoped file handle closer. struct FileCloser { explicit FileCloser(fd_t fd) : fd(fd) {} ~FileCloser() { CloseFile(fd); } fd_t fd; }; bool SupportsColoredOutput(fd_t fd); // Opens the file 'file_name" and reads up to 'max_len' bytes. // The resulting buffer is mmaped and stored in '*buff'. // The size of the mmaped region is stored in '*buff_size'. // The total number of read bytes is stored in '*read_len'. // Returns true if file was successfully opened and read. bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, uptr *read_len, uptr max_len = 1 << 26, error_t *errno_p = nullptr); // Maps given file to virtual memory, and returns pointer to it // (or NULL if mapping fails). Stores the size of mmaped region // in '*buff_size'. void *MapFileToMemory(const char *file_name, uptr *buff_size); void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset); bool IsAccessibleMemoryRange(uptr beg, uptr size); // Error report formatting. const char *StripPathPrefix(const char *filepath, const char *strip_file_prefix); // Strip the directories from the module name. const char *StripModuleName(const char *module); // OS uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); const char *GetProcessName(); void UpdateProcessName(); void CacheBinaryName(); void DisableCoreDumperIfNecessary(); void DumpProcessMap(); void PrintModuleMap(); bool FileExists(const char *filename); const char *GetEnv(const char *name); bool SetEnv(const char *name, const char *value); const char *GetPwd(); char *FindPathToBinary(const char *name); bool IsPathSeparator(const char c); bool IsAbsolutePath(const char *path); // Starts a subprocess and returs its pid. // If *_fd parameters are not kInvalidFd their corresponding input/output // streams will be redirect to the file. The files will always be closed // in parent process even in case of an error. // The child process will close all fds after STDERR_FILENO // before passing control to a program. pid_t StartSubprocess(const char *filename, const char *const argv[], fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd, fd_t stderr_fd = kInvalidFd); // Checks if specified process is still running bool IsProcessRunning(pid_t pid); // Waits for the process to finish and returns its exit code. // Returns -1 in case of an error. int WaitForProcess(pid_t pid); u32 GetUid(); void ReExec(); char **GetArgv(); void PrintCmdline(); bool StackSizeIsUnlimited(); uptr GetStackSizeLimitInBytes(); void SetStackSizeLimitInBytes(uptr limit); bool AddressSpaceIsUnlimited(); void SetAddressSpaceUnlimited(); void AdjustStackSize(void *attr); void PrepareForSandboxing(__sanitizer_sandbox_arguments *args); void SetSandboxingCallback(void (*f)()); void InitializeCoverage(bool enabled, const char *coverage_dir); void InitTlsSize(); uptr GetTlsSize(); // Other void SleepForSeconds(int seconds); void SleepForMillis(int millis); u64 NanoTime(); int Atexit(void (*function)(void)); void SortArray(uptr *array, uptr size); void SortArray(u32 *array, uptr size); bool TemplateMatch(const char *templ, const char *str); // Exit void NORETURN Abort(); void NORETURN Die(); void NORETURN CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, const char *mmap_type, error_t err, bool raw_report = false); // Set the name of the current thread to 'name', return true on succees. // The name may be truncated to a system-dependent limit. bool SanitizerSetThreadName(const char *name); // Get the name of the current thread (no more than max_len bytes), // return true on succees. name should have space for at least max_len+1 bytes. bool SanitizerGetThreadName(char *name, int max_len); // Specific tools may override behavior of "Die" and "CheckFailed" functions // to do tool-specific job. typedef void (*DieCallbackType)(void); // It's possible to add several callbacks that would be run when "Die" is // called. The callbacks will be run in the opposite order. The tools are // strongly recommended to setup all callbacks during initialization, when there // is only a single thread. bool AddDieCallback(DieCallbackType callback); bool RemoveDieCallback(DieCallbackType callback); void SetUserDieCallback(DieCallbackType callback); typedef void (*CheckFailedCallbackType)(const char *, int, const char *, u64, u64); void SetCheckFailedCallback(CheckFailedCallbackType callback); // Callback will be called if soft_rss_limit_mb is given and the limit is // exceeded (exceeded==true) or if rss went down below the limit // (exceeded==false). // The callback should be registered once at the tool init time. void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); // Functions related to signal handling. typedef void (*SignalHandlerType)(int, void *, void *); HandleSignalMode GetHandleSignalMode(int signum); void InstallDeadlySignalHandlers(SignalHandlerType handler); const char *DescribeSignalOrException(int signo); // Alternative signal stack (POSIX-only). void SetAlternateSignalStack(); void UnsetAlternateSignalStack(); // We don't want a summary too long. const int kMaxSummaryLength = 1024; // Construct a one-line string: // SUMMARY: SanitizerToolName: error_message // and pass it to __sanitizer_report_error_summary. // If alt_tool_name is provided, it's used in place of SanitizerToolName. void ReportErrorSummary(const char *error_message, const char *alt_tool_name = nullptr); // Same as above, but construct error_message as: // error_type file:line[:column][ function] void ReportErrorSummary(const char *error_type, const AddressInfo &info, const char *alt_tool_name = nullptr); // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. void ReportErrorSummary(const char *error_type, const StackTrace *trace, const char *alt_tool_name = nullptr); // Math #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) extern "C" { unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT #if defined(_WIN64) unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT #endif } #endif INLINE uptr MostSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; // NOLINT #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) # ifdef _WIN64 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); # else up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); # endif #elif defined(_WIN64) _BitScanReverse64(&up, x); #else _BitScanReverse(&up, x); #endif return up; } INLINE uptr LeastSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; // NOLINT #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) # ifdef _WIN64 up = __builtin_ctzll(x); # else up = __builtin_ctzl(x); # endif #elif defined(_WIN64) _BitScanForward64(&up, x); #else _BitScanForward(&up, x); #endif return up; } INLINE bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } INLINE uptr RoundUpToPowerOfTwo(uptr size) { CHECK(size); if (IsPowerOfTwo(size)) return size; uptr up = MostSignificantSetBitIndex(size); CHECK_LT(size, (1ULL << (up + 1))); CHECK_GT(size, (1ULL << up)); return 1ULL << (up + 1); } INLINE uptr RoundUpTo(uptr size, uptr boundary) { RAW_CHECK(IsPowerOfTwo(boundary)); return (size + boundary - 1) & ~(boundary - 1); } INLINE uptr RoundDownTo(uptr x, uptr boundary) { return x & ~(boundary - 1); } INLINE bool IsAligned(uptr a, uptr alignment) { return (a & (alignment - 1)) == 0; } INLINE uptr Log2(uptr x) { CHECK(IsPowerOfTwo(x)); return LeastSignificantSetBitIndex(x); } // Don't use std::min, std::max or std::swap, to minimize dependency // on libstdc++. template T Min(T a, T b) { return a < b ? a : b; } template T Max(T a, T b) { return a > b ? a : b; } template void Swap(T& a, T& b) { T tmp = a; a = b; b = tmp; } // Char handling INLINE bool IsSpace(int c) { return (c == ' ') || (c == '\n') || (c == '\t') || (c == '\f') || (c == '\r') || (c == '\v'); } INLINE bool IsDigit(int c) { return (c >= '0') && (c <= '9'); } INLINE int ToLower(int c) { return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; } // A low-level vector based on mmap. May incur a significant memory overhead for // small vectors. // WARNING: The current implementation supports only POD types. template class InternalMmapVectorNoCtor { public: void Initialize(uptr initial_capacity) { capacity_ = Max(initial_capacity, (uptr)1); size_ = 0; data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor"); } void Destroy() { UnmapOrDie(data_, capacity_ * sizeof(T)); } T &operator[](uptr i) { CHECK_LT(i, size_); return data_[i]; } const T &operator[](uptr i) const { CHECK_LT(i, size_); return data_[i]; } void push_back(const T &element) { CHECK_LE(size_, capacity_); if (size_ == capacity_) { uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); Resize(new_capacity); } internal_memcpy(&data_[size_++], &element, sizeof(T)); } T &back() { CHECK_GT(size_, 0); return data_[size_ - 1]; } void pop_back() { CHECK_GT(size_, 0); size_--; } uptr size() const { return size_; } const T *data() const { return data_; } T *data() { return data_; } uptr capacity() const { return capacity_; } void resize(uptr new_size) { Resize(new_size); if (new_size > size_) { internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); } size_ = new_size; } void clear() { size_ = 0; } bool empty() const { return size() == 0; } const T *begin() const { return data(); } T *begin() { return data(); } const T *end() const { return data() + size(); } T *end() { return data() + size(); } private: void Resize(uptr new_capacity) { CHECK_GT(new_capacity, 0); CHECK_LE(size_, new_capacity); T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T), "InternalMmapVector"); internal_memcpy(new_data, data_, size_ * sizeof(T)); T *old_data = data_; data_ = new_data; UnmapOrDie(old_data, capacity_ * sizeof(T)); capacity_ = new_capacity; } T *data_; uptr capacity_; uptr size_; }; template class InternalMmapVector : public InternalMmapVectorNoCtor { public: explicit InternalMmapVector(uptr initial_capacity) { InternalMmapVectorNoCtor::Initialize(initial_capacity); } ~InternalMmapVector() { InternalMmapVectorNoCtor::Destroy(); } // Disallow evil constructors. InternalMmapVector(const InternalMmapVector&); void operator=(const InternalMmapVector&); }; // HeapSort for arrays and InternalMmapVector. template void InternalSort(Container *v, uptr size, Compare comp) { if (size < 2) return; // Stage 1: insert elements to the heap. for (uptr i = 1; i < size; i++) { uptr j, p; for (j = i; j > 0; j = p) { p = (j - 1) / 2; if (comp((*v)[p], (*v)[j])) Swap((*v)[j], (*v)[p]); else break; } } // Stage 2: swap largest element with the last one, // and sink the new top. for (uptr i = size - 1; i > 0; i--) { Swap((*v)[0], (*v)[i]); uptr j, max_ind; for (j = 0; j < i; j = max_ind) { uptr left = 2 * j + 1; uptr right = 2 * j + 2; max_ind = j; if (left < i && comp((*v)[max_ind], (*v)[left])) max_ind = left; if (right < i && comp((*v)[max_ind], (*v)[right])) max_ind = right; if (max_ind != j) Swap((*v)[j], (*v)[max_ind]); else break; } } } // Works like std::lower_bound: finds the first element that is not less // than the val. template uptr InternalLowerBound(const Container &v, uptr first, uptr last, const Value &val, Compare comp) { while (last > first) { uptr mid = (first + last) / 2; if (comp(v[mid], val)) first = mid + 1; else last = mid; } return first; } enum ModuleArch { kModuleArchUnknown, kModuleArchI386, kModuleArchX86_64, kModuleArchX86_64H, kModuleArchARMV6, kModuleArchARMV7, kModuleArchARMV7S, kModuleArchARMV7K, kModuleArchARM64 }; // When adding a new architecture, don't forget to also update // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc. inline const char *ModuleArchToString(ModuleArch arch) { switch (arch) { case kModuleArchUnknown: return ""; case kModuleArchI386: return "i386"; case kModuleArchX86_64: return "x86_64"; case kModuleArchX86_64H: return "x86_64h"; case kModuleArchARMV6: return "armv6"; case kModuleArchARMV7: return "armv7"; case kModuleArchARMV7S: return "armv7s"; case kModuleArchARMV7K: return "armv7k"; case kModuleArchARM64: return "arm64"; } CHECK(0 && "Invalid module arch"); return ""; } const uptr kModuleUUIDSize = 16; // Represents a binary loaded into virtual memory (e.g. this can be an // executable or a shared object). class LoadedModule { public: LoadedModule() : full_name_(nullptr), base_address_(0), max_executable_address_(0), arch_(kModuleArchUnknown), instrumented_(false) { internal_memset(uuid_, 0, kModuleUUIDSize); ranges_.clear(); } void set(const char *module_name, uptr base_address); void set(const char *module_name, uptr base_address, ModuleArch arch, u8 uuid[kModuleUUIDSize], bool instrumented); void clear(); void addAddressRange(uptr beg, uptr end, bool executable, bool writable); bool containsAddress(uptr address) const; const char *full_name() const { return full_name_; } uptr base_address() const { return base_address_; } uptr max_executable_address() const { return max_executable_address_; } ModuleArch arch() const { return arch_; } const u8 *uuid() const { return uuid_; } bool instrumented() const { return instrumented_; } struct AddressRange { AddressRange *next; uptr beg; uptr end; bool executable; bool writable; AddressRange(uptr beg, uptr end, bool executable, bool writable) : next(nullptr), beg(beg), end(end), executable(executable), writable(writable) {} }; const IntrusiveList &ranges() const { return ranges_; } private: char *full_name_; // Owned. uptr base_address_; uptr max_executable_address_; ModuleArch arch_; u8 uuid_[kModuleUUIDSize]; bool instrumented_; IntrusiveList ranges_; }; // List of LoadedModules. OS-dependent implementation is responsible for // filling this information. class ListOfModules { public: ListOfModules() : modules_(kInitialCapacity) {} ~ListOfModules() { clear(); } void init(); const LoadedModule *begin() const { return modules_.begin(); } LoadedModule *begin() { return modules_.begin(); } const LoadedModule *end() const { return modules_.end(); } LoadedModule *end() { return modules_.end(); } uptr size() const { return modules_.size(); } const LoadedModule &operator[](uptr i) const { CHECK_LT(i, modules_.size()); return modules_[i]; } private: void clear() { for (auto &module : modules_) module.clear(); modules_.clear(); } InternalMmapVector modules_; // We rarely have more than 16K loaded modules. static const uptr kInitialCapacity = 1 << 14; }; // Callback type for iterating over a set of memory ranges. typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); enum AndroidApiLevel { ANDROID_NOT_ANDROID = 0, ANDROID_KITKAT = 19, ANDROID_LOLLIPOP_MR1 = 22, ANDROID_POST_LOLLIPOP = 23 }; void WriteToSyslog(const char *buffer); #if SANITIZER_MAC void LogFullErrorReport(const char *buffer); #else INLINE void LogFullErrorReport(const char *buffer) {} #endif #if SANITIZER_LINUX || SANITIZER_MAC void WriteOneLineToSyslog(const char *s); void LogMessageOnPrintf(const char *str); #else INLINE void WriteOneLineToSyslog(const char *s) {} INLINE void LogMessageOnPrintf(const char *str) {} #endif #if SANITIZER_LINUX // Initialize Android logging. Any writes before this are silently lost. void AndroidLogInit(); void SetAbortMessage(const char *); #else INLINE void AndroidLogInit() {} // FIXME: MacOS implementation could use CRSetCrashLogMessage. INLINE void SetAbortMessage(const char *) {} #endif #if SANITIZER_ANDROID void SanitizerInitializeUnwinder(); AndroidApiLevel AndroidGetApiLevel(); #else INLINE void AndroidLogWrite(const char *buffer_unused) {} INLINE void SanitizerInitializeUnwinder() {} INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } #endif INLINE uptr GetPthreadDestructorIterations() { #if SANITIZER_ANDROID return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; #elif SANITIZER_POSIX return 4; #else // Unused on Windows. return 0; #endif } void *internal_start_thread(void(*func)(void*), void *arg); void internal_join_thread(void *th); void MaybeStartBackgroudThread(); // Make the compiler think that something is going on there. // Use this inside a loop that looks like memset/memcpy/etc to prevent the // compiler from recognising it and turning it into an actual call to // memset/memcpy/etc. static inline void SanitizerBreakOptimization(void *arg) { #if defined(_MSC_VER) && !defined(__clang__) _ReadWriteBarrier(); #else __asm__ __volatile__("" : : "r" (arg) : "memory"); #endif } struct SignalContext { void *context; uptr addr; uptr pc; uptr sp; uptr bp; bool is_memory_access; enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp, bool is_memory_access, WriteFlag write_flag) : context(context), addr(addr), pc(pc), sp(sp), bp(bp), is_memory_access(is_memory_access), write_flag(write_flag) {} static void DumpAllRegisters(void *context); // Creates signal context in a platform-specific manner. static SignalContext Create(void *siginfo, void *context); // Returns true if the "context" indicates a memory write. static WriteFlag GetWriteFlag(void *context); }; void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); void MaybeReexec(); template class RunOnDestruction { public: explicit RunOnDestruction(Fn fn) : fn_(fn) {} ~RunOnDestruction() { fn_(); } private: Fn fn_; }; // A simple scope guard. Usage: // auto cleanup = at_scope_exit([]{ do_cleanup; }); template RunOnDestruction at_scope_exit(Fn fn) { return RunOnDestruction(fn); } // Linux on 64-bit s390 had a nasty bug that crashes the whole machine // if a process uses virtual memory over 4TB (as many sanitizers like // to do). This function will abort the process if running on a kernel // that looks vulnerable. #if SANITIZER_LINUX && SANITIZER_S390_64 void AvoidCVE_2016_2143(); #else INLINE void AvoidCVE_2016_2143() {} #endif struct StackDepotStats { uptr n_uniq_ids; uptr allocated; }; // The default value for allocator_release_to_os_interval_ms common flag to // indicate that sanitizer allocator should not attempt to release memory to OS. const s32 kReleaseToOSIntervalNever = -1; void CheckNoDeepBind(const char *filename, int flag); // Returns the requested amount of random data (up to 256 bytes) that can then // be used to seed a PRNG. bool GetRandom(void *buffer, uptr length); } // namespace __sanitizer inline void *operator new(__sanitizer::operator_new_size_type size, __sanitizer::LowLevelAllocator &alloc) { return alloc.Allocate(size); } #endif // SANITIZER_COMMON_H Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc (revision 320961) @@ -1,6459 +1,6460 @@ //===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Common function interceptors for tools like AddressSanitizer, // ThreadSanitizer, MemorySanitizer, etc. // // This file should be included into the tool's interceptor file, // which has to define its own macros: // COMMON_INTERCEPTOR_ENTER // COMMON_INTERCEPTOR_ENTER_NOIGNORE // COMMON_INTERCEPTOR_READ_RANGE // COMMON_INTERCEPTOR_WRITE_RANGE // COMMON_INTERCEPTOR_INITIALIZE_RANGE // COMMON_INTERCEPTOR_DIR_ACQUIRE // COMMON_INTERCEPTOR_FD_ACQUIRE // COMMON_INTERCEPTOR_FD_RELEASE // COMMON_INTERCEPTOR_FD_ACCESS // COMMON_INTERCEPTOR_SET_THREAD_NAME // COMMON_INTERCEPTOR_ON_DLOPEN // COMMON_INTERCEPTOR_ON_EXIT // COMMON_INTERCEPTOR_MUTEX_PRE_LOCK // COMMON_INTERCEPTOR_MUTEX_POST_LOCK // COMMON_INTERCEPTOR_MUTEX_UNLOCK // COMMON_INTERCEPTOR_MUTEX_REPAIR // COMMON_INTERCEPTOR_SET_PTHREAD_NAME // COMMON_INTERCEPTOR_HANDLE_RECVMSG // COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED // COMMON_INTERCEPTOR_MEMSET_IMPL // COMMON_INTERCEPTOR_MEMMOVE_IMPL // COMMON_INTERCEPTOR_MEMCPY_IMPL // COMMON_INTERCEPTOR_COPY_STRING // COMMON_INTERCEPTOR_STRNDUP_IMPL //===----------------------------------------------------------------------===// #include "interception/interception.h" #include "sanitizer_addrhashmap.h" +#include "sanitizer_errno.h" #include "sanitizer_placement_new.h" #include "sanitizer_platform_interceptors.h" #include "sanitizer_tls_get_addr.h" #include #if SANITIZER_INTERCEPTOR_HOOKS #define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__); #define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \ SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {} #else #define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) #define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) #endif // SANITIZER_INTERCEPTOR_HOOKS #if SANITIZER_WINDOWS && !defined(va_copy) #define va_copy(dst, src) ((dst) = (src)) #endif // _WIN32 #if SANITIZER_FREEBSD #define pthread_setname_np pthread_set_name_np #define inet_aton __inet_aton #define inet_pton __inet_pton #define iconv __bsd_iconv #endif // Platform-specific options. #if SANITIZER_MAC namespace __sanitizer { bool PlatformHasDifferentMemcpyAndMemmove(); } #define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove()) #elif SANITIZER_WINDOWS64 #define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false #else #define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true #endif // SANITIZER_MAC #ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE #define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {} #endif #ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM #define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {} #endif #ifndef COMMON_INTERCEPTOR_FD_ACCESS #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {} #endif #ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {} #endif #ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {} #endif #ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {} #endif #ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {} #endif #ifndef COMMON_INTERCEPTOR_MUTEX_INVALID #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {} #endif #ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg)) #endif #ifndef COMMON_INTERCEPTOR_FILE_OPEN #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {} #endif #ifndef COMMON_INTERCEPTOR_FILE_CLOSE #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {} #endif #ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {} #endif #ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {} #endif #ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__) #endif #ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0) #endif #define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \ common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) ) #ifndef COMMON_INTERCEPTOR_ON_DLOPEN #define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ CheckNoDeepBind(filename, flag); #endif #ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0; #endif #ifndef COMMON_INTERCEPTOR_ACQUIRE #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {} #endif #ifndef COMMON_INTERCEPTOR_RELEASE #define COMMON_INTERCEPTOR_RELEASE(ctx, u) {} #endif #ifndef COMMON_INTERCEPTOR_USER_CALLBACK_START #define COMMON_INTERCEPTOR_USER_CALLBACK_START() {} #endif #ifndef COMMON_INTERCEPTOR_USER_CALLBACK_END #define COMMON_INTERCEPTOR_USER_CALLBACK_END() {} #endif #ifdef SANITIZER_NLDBL_VERSION #define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \ COMMON_INTERCEPT_FUNCTION_VER(fn, SANITIZER_NLDBL_VERSION) #else #define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \ COMMON_INTERCEPT_FUNCTION(fn) #endif #ifndef COMMON_INTERCEPTOR_MEMSET_IMPL #define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \ { \ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \ return internal_memset(dst, v, size); \ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \ if (common_flags()->intercept_intrin) \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \ return REAL(memset)(dst, v, size); \ } #endif #ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \ { \ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \ return internal_memmove(dst, src, size); \ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \ if (common_flags()->intercept_intrin) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \ } \ return REAL(memmove)(dst, src, size); \ } #endif #ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL #define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \ { \ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \ return internal_memmove(dst, src, size); \ } \ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \ if (common_flags()->intercept_intrin) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \ } \ return REAL(memcpy)(dst, src, size); \ } #endif #ifndef COMMON_INTERCEPTOR_COPY_STRING #define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {} #endif #ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL #define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \ COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \ uptr copy_length = internal_strnlen(s, size); \ char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \ if (common_flags()->intercept_strndup) { \ COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \ } \ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \ internal_memcpy(new_mem, s, copy_length); \ new_mem[copy_length] = '\0'; \ return new_mem; #endif struct FileMetadata { // For open_memstream(). char **addr; SIZE_T *size; }; struct CommonInterceptorMetadata { enum { CIMT_INVALID = 0, CIMT_FILE } type; union { FileMetadata file; }; }; typedef AddrHashMap MetadataHashMap; static MetadataHashMap *interceptor_metadata_map; #if SI_NOT_WINDOWS UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr, const FileMetadata &file) { MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr); CHECK(h.created()); h->type = CommonInterceptorMetadata::CIMT_FILE; h->file = file; } UNUSED static const FileMetadata *GetInterceptorMetadata( __sanitizer_FILE *addr) { MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, /* remove */ false, /* create */ false); if (h.exists()) { CHECK(!h.created()); CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE); return &h->file; } else { return 0; } } UNUSED static void DeleteInterceptorMetadata(void *addr) { MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true); CHECK(h.exists()); } #endif // SI_NOT_WINDOWS #if SANITIZER_INTERCEPT_STRLEN INTERCEPTOR(SIZE_T, strlen, const char *s) { // Sometimes strlen is called prior to InitializeCommonInterceptors, // in which case the REAL(strlen) typically used in // COMMON_INTERCEPTOR_ENTER will fail. We use internal_strlen here // to handle that. if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strlen(s); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strlen, s); SIZE_T result = REAL(strlen)(s); if (common_flags()->intercept_strlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, result + 1); return result; } #define INIT_STRLEN COMMON_INTERCEPT_FUNCTION(strlen) #else #define INIT_STRLEN #endif #if SANITIZER_INTERCEPT_STRNLEN INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strnlen, s, maxlen); SIZE_T length = REAL(strnlen)(s, maxlen); if (common_flags()->intercept_strlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, Min(length + 1, maxlen)); return length; } #define INIT_STRNLEN COMMON_INTERCEPT_FUNCTION(strnlen) #else #define INIT_STRNLEN #endif #if SANITIZER_INTERCEPT_STRNDUP INTERCEPTOR(char*, strndup, const char *s, uptr size) { void *ctx; COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size); } #define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup) #else #define INIT_STRNDUP #endif // SANITIZER_INTERCEPT_STRNDUP #if SANITIZER_INTERCEPT___STRNDUP INTERCEPTOR(char*, __strndup, const char *s, uptr size) { void *ctx; COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size); } #define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup) #else #define INIT___STRNDUP #endif // SANITIZER_INTERCEPT___STRNDUP #if SANITIZER_INTERCEPT_TEXTDOMAIN INTERCEPTOR(char*, textdomain, const char *domainname) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname); if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0); char *domain = REAL(textdomain)(domainname); if (domain) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1); } return domain; } #define INIT_TEXTDOMAIN COMMON_INTERCEPT_FUNCTION(textdomain) #else #define INIT_TEXTDOMAIN #endif #if SANITIZER_INTERCEPT_STRCMP static inline int CharCmpX(unsigned char c1, unsigned char c2) { return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; } DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc, const char *s1, const char *s2, int result) INTERCEPTOR(int, strcmp, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2); unsigned char c1, c2; uptr i; for (i = 0;; i++) { c1 = (unsigned char)s1[i]; c2 = (unsigned char)s2[i]; if (c1 != c2 || c1 == '\0') break; } COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1); COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1); int result = CharCmpX(c1, c2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1, s2, result); return result; } DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc, const char *s1, const char *s2, uptr n, int result) INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) { if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strncmp(s1, s2, size); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size); unsigned char c1 = 0, c2 = 0; uptr i; for (i = 0; i < size; i++) { c1 = (unsigned char)s1[i]; c2 = (unsigned char)s2[i]; if (c1 != c2 || c1 == '\0') break; } uptr i1 = i; uptr i2 = i; if (common_flags()->strict_string_checks) { for (; i1 < size && s1[i1]; i1++) {} for (; i2 < size && s2[i2]; i2++) {} } COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size)); COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size)); int result = CharCmpX(c1, c2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1, s2, size, result); return result; } #define INIT_STRCMP COMMON_INTERCEPT_FUNCTION(strcmp) #define INIT_STRNCMP COMMON_INTERCEPT_FUNCTION(strncmp) #else #define INIT_STRCMP #define INIT_STRNCMP #endif #if SANITIZER_INTERCEPT_STRCASECMP static inline int CharCaseCmp(unsigned char c1, unsigned char c2) { int c1_low = ToLower(c1); int c2_low = ToLower(c2); return c1_low - c2_low; } DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, uptr called_pc, const char *s1, const char *s2, int result) INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2); unsigned char c1 = 0, c2 = 0; uptr i; for (i = 0;; i++) { c1 = (unsigned char)s1[i]; c2 = (unsigned char)s2[i]; if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break; } COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1); COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1); int result = CharCaseCmp(c1, c2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, GET_CALLER_PC(), s1, s2, result); return result; } DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc, const char *s1, const char *s2, uptr size, int result) INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size); unsigned char c1 = 0, c2 = 0; uptr i; for (i = 0; i < size; i++) { c1 = (unsigned char)s1[i]; c2 = (unsigned char)s2[i]; if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break; } uptr i1 = i; uptr i2 = i; if (common_flags()->strict_string_checks) { for (; i1 < size && s1[i1]; i1++) {} for (; i2 < size && s2[i2]; i2++) {} } COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size)); COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size)); int result = CharCaseCmp(c1, c2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(), s1, s2, size, result); return result; } #define INIT_STRCASECMP COMMON_INTERCEPT_FUNCTION(strcasecmp) #define INIT_STRNCASECMP COMMON_INTERCEPT_FUNCTION(strncasecmp) #else #define INIT_STRCASECMP #define INIT_STRNCASECMP #endif #if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR static inline void StrstrCheck(void *ctx, char *r, const char *s1, const char *s2) { uptr len1 = REAL(strlen)(s1); uptr len2 = REAL(strlen)(s2); COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1); } #endif #if SANITIZER_INTERCEPT_STRSTR DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc, const char *s1, const char *s2, char *result) INTERCEPTOR(char*, strstr, const char *s1, const char *s2) { if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strstr(s1, s2); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2); char *r = REAL(strstr)(s1, s2); if (common_flags()->intercept_strstr) StrstrCheck(ctx, r, s1, s2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, GET_CALLER_PC(), s1, s2, r); return r; } #define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr); #else #define INIT_STRSTR #endif #if SANITIZER_INTERCEPT_STRCASESTR DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc, const char *s1, const char *s2, char *result) INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2); char *r = REAL(strcasestr)(s1, s2); if (common_flags()->intercept_strstr) StrstrCheck(ctx, r, s1, s2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, GET_CALLER_PC(), s1, s2, r); return r; } #define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr); #else #define INIT_STRCASESTR #endif #if SANITIZER_INTERCEPT_STRTOK INTERCEPTOR(char*, strtok, char *str, const char *delimiters) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters); if (!common_flags()->intercept_strtok) { return REAL(strtok)(str, delimiters); } if (common_flags()->strict_string_checks) { // If strict_string_checks is enabled, we check the whole first argument // string on the first call (strtok saves this string in a static buffer // for subsequent calls). We do not need to check strtok's result. // As the delimiters can change, we check them every call. if (str != nullptr) { COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1); } COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, REAL(strlen)(delimiters) + 1); return REAL(strtok)(str, delimiters); } else { // However, when strict_string_checks is disabled we cannot check the // whole string on the first call. Instead, we check the result string // which is guaranteed to be a NULL-terminated substring of the first // argument. We also conservatively check one character of str and the // delimiters. if (str != nullptr) { COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1); } COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1); char *result = REAL(strtok)(str, delimiters); if (result != nullptr) { COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1); } else if (str != nullptr) { // No delimiter were found, it's safe to assume that the entire str was // scanned. COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1); } return result; } } #define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok) #else #define INIT_STRTOK #endif #if SANITIZER_INTERCEPT_MEMMEM DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc, const void *s1, SIZE_T len1, const void *s2, SIZE_T len2, void *result) INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2, SIZE_T len2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, memmem, s1, len1, s2, len2); void *r = REAL(memmem)(s1, len1, s2, len2); if (common_flags()->intercept_memmem) { COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, len1); COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2); } CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, GET_CALLER_PC(), s1, len1, s2, len2, r); return r; } #define INIT_MEMMEM COMMON_INTERCEPT_FUNCTION(memmem); #else #define INIT_MEMMEM #endif // SANITIZER_INTERCEPT_MEMMEM #if SANITIZER_INTERCEPT_STRCHR INTERCEPTOR(char*, strchr, const char *s, int c) { void *ctx; if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strchr(s, c); COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c); char *result = REAL(strchr)(s, c); if (common_flags()->intercept_strchr) { // Keep strlen as macro argument, as macro may ignore it. COMMON_INTERCEPTOR_READ_STRING(ctx, s, (result ? result - s : REAL(strlen)(s)) + 1); } return result; } #define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr) #else #define INIT_STRCHR #endif #if SANITIZER_INTERCEPT_STRCHRNUL INTERCEPTOR(char*, strchrnul, const char *s, int c) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strchrnul, s, c); char *result = REAL(strchrnul)(s, c); uptr len = result - s + 1; if (common_flags()->intercept_strchr) COMMON_INTERCEPTOR_READ_STRING(ctx, s, len); return result; } #define INIT_STRCHRNUL COMMON_INTERCEPT_FUNCTION(strchrnul) #else #define INIT_STRCHRNUL #endif #if SANITIZER_INTERCEPT_STRRCHR INTERCEPTOR(char*, strrchr, const char *s, int c) { void *ctx; if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strrchr(s, c); COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c); if (common_flags()->intercept_strchr) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1); return REAL(strrchr)(s, c); } #define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr) #else #define INIT_STRRCHR #endif #if SANITIZER_INTERCEPT_STRSPN INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2); SIZE_T r = REAL(strspn)(s1, s2); if (common_flags()->intercept_strspn) { COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1); } return r; } INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2); SIZE_T r = REAL(strcspn)(s1, s2); if (common_flags()->intercept_strspn) { COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1); } return r; } #define INIT_STRSPN \ COMMON_INTERCEPT_FUNCTION(strspn); \ COMMON_INTERCEPT_FUNCTION(strcspn); #else #define INIT_STRSPN #endif #if SANITIZER_INTERCEPT_STRPBRK INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2); char *r = REAL(strpbrk)(s1, s2); if (common_flags()->intercept_strpbrk) { COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + 1 : REAL(strlen)(s1) + 1); } return r; } #define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk); #else #define INIT_STRPBRK #endif #if SANITIZER_INTERCEPT_MEMSET INTERCEPTOR(void *, memset, void *dst, int v, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size); } #define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset) #else #define INIT_MEMSET #endif #if SANITIZER_INTERCEPT_MEMMOVE INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size); } #define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove) #else #define INIT_MEMMOVE #endif #if SANITIZER_INTERCEPT_MEMCPY INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) { // On OS X, calling internal_memcpy here will cause memory corruptions, // because memcpy and memmove are actually aliases of the same // implementation. We need to use internal_memmove here. // N.B.: If we switch this to internal_ we'll have to use internal_memmove // due to memcpy being an alias of memmove on OS X. void *ctx; if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size); } else { COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size); } } #define INIT_MEMCPY \ do { \ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \ COMMON_INTERCEPT_FUNCTION(memcpy); \ } else { \ ASSIGN_REAL(memcpy, memmove); \ } \ CHECK(REAL(memcpy)); \ } while (false) #else #define INIT_MEMCPY #endif #if SANITIZER_INTERCEPT_MEMCMP DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc, const void *s1, const void *s2, uptr n, int result) INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_memcmp(a1, a2, size); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size); if (common_flags()->intercept_memcmp) { if (common_flags()->strict_memcmp) { // Check the entire regions even if the first bytes of the buffers are // different. COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size); COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size); // Fallthrough to REAL(memcmp) below. } else { unsigned char c1 = 0, c2 = 0; const unsigned char *s1 = (const unsigned char*)a1; const unsigned char *s2 = (const unsigned char*)a2; uptr i; for (i = 0; i < size; i++) { c1 = s1[i]; c2 = s2[i]; if (c1 != c2) break; } COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size)); COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size)); int r = CharCmpX(c1, c2); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1, a2, size, r); return r; } } int result = REAL(memcmp(a1, a2, size)); CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1, a2, size, result); return result; } #define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp) #else #define INIT_MEMCMP #endif #if SANITIZER_INTERCEPT_MEMCHR INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) { if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_memchr(s, c, n); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n); #if SANITIZER_WINDOWS void *res; if (REAL(memchr)) { res = REAL(memchr)(s, c, n); } else { res = internal_memchr(s, c, n); } #else void *res = REAL(memchr)(s, c, n); #endif uptr len = res ? (char *)res - (const char *)s + 1 : n; COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len); return res; } #define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr) #else #define INIT_MEMCHR #endif #if SANITIZER_INTERCEPT_MEMRCHR INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n); COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n); return REAL(memrchr)(s, c, n); } #define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr) #else #define INIT_MEMRCHR #endif #if SANITIZER_INTERCEPT_FREXP INTERCEPTOR(double, frexp, double x, int *exp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp); // Assuming frexp() always writes to |exp|. COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); double res = REAL(frexp)(x, exp); return res; } #define INIT_FREXP COMMON_INTERCEPT_FUNCTION(frexp); #else #define INIT_FREXP #endif // SANITIZER_INTERCEPT_FREXP #if SANITIZER_INTERCEPT_FREXPF_FREXPL INTERCEPTOR(float, frexpf, float x, int *exp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. float res = REAL(frexpf)(x, exp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); return res; } INTERCEPTOR(long double, frexpl, long double x, int *exp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. long double res = REAL(frexpl)(x, exp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); return res; } #define INIT_FREXPF_FREXPL \ COMMON_INTERCEPT_FUNCTION(frexpf); \ COMMON_INTERCEPT_FUNCTION_LDBL(frexpl) #else #define INIT_FREXPF_FREXPL #endif // SANITIZER_INTERCEPT_FREXPF_FREXPL #if SI_NOT_WINDOWS static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec, SIZE_T iovlen, SIZE_T maxlen) { for (SIZE_T i = 0; i < iovlen && maxlen; ++i) { SSIZE_T sz = Min(iovec[i].iov_len, maxlen); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec[i].iov_base, sz); maxlen -= sz; } } static void read_iovec(void *ctx, struct __sanitizer_iovec *iovec, SIZE_T iovlen, SIZE_T maxlen) { COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec) * iovlen); for (SIZE_T i = 0; i < iovlen && maxlen; ++i) { SSIZE_T sz = Min(iovec[i].iov_len, maxlen); COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec[i].iov_base, sz); maxlen -= sz; } } #endif #if SANITIZER_INTERCEPT_READ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(read)(fd, ptr, count); if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_READ COMMON_INTERCEPT_FUNCTION(read) #else #define INIT_READ #endif #if SANITIZER_INTERCEPT_FREAD INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) { // libc file streams can call user-supplied functions, see fopencookie. void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(fread)(ptr, size, nmemb, file); if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size); return res; } #define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread) #else #define INIT_FREAD #endif #if SANITIZER_INTERCEPT_PREAD INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(pread)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_PREAD COMMON_INTERCEPT_FUNCTION(pread) #else #define INIT_PREAD #endif #if SANITIZER_INTERCEPT_PREAD64 INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(pread64)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_PREAD64 COMMON_INTERCEPT_FUNCTION(pread64) #else #define INIT_PREAD64 #endif #if SANITIZER_INTERCEPT_READV INTERCEPTOR_WITH_SUFFIX(SSIZE_T, readv, int fd, __sanitizer_iovec *iov, int iovcnt) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readv, fd, iov, iovcnt); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); SSIZE_T res = REAL(readv)(fd, iov, iovcnt); if (res > 0) write_iovec(ctx, iov, iovcnt, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_READV COMMON_INTERCEPT_FUNCTION(readv) #else #define INIT_READV #endif #if SANITIZER_INTERCEPT_PREADV INTERCEPTOR(SSIZE_T, preadv, int fd, __sanitizer_iovec *iov, int iovcnt, OFF_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, preadv, fd, iov, iovcnt, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); SSIZE_T res = REAL(preadv)(fd, iov, iovcnt, offset); if (res > 0) write_iovec(ctx, iov, iovcnt, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_PREADV COMMON_INTERCEPT_FUNCTION(preadv) #else #define INIT_PREADV #endif #if SANITIZER_INTERCEPT_PREADV64 INTERCEPTOR(SSIZE_T, preadv64, int fd, __sanitizer_iovec *iov, int iovcnt, OFF64_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, preadv64, fd, iov, iovcnt, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); SSIZE_T res = REAL(preadv64)(fd, iov, iovcnt, offset); if (res > 0) write_iovec(ctx, iov, iovcnt, res); if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } #define INIT_PREADV64 COMMON_INTERCEPT_FUNCTION(preadv64) #else #define INIT_PREADV64 #endif #if SANITIZER_INTERCEPT_WRITE INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(write)(fd, ptr, count); // FIXME: this check should be _before_ the call to REAL(write), not after if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } #define INIT_WRITE COMMON_INTERCEPT_FUNCTION(write) #else #define INIT_WRITE #endif #if SANITIZER_INTERCEPT_FWRITE INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) { // libc file streams can call user-supplied functions, see fopencookie. void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file); SIZE_T res = REAL(fwrite)(p, size, nmemb, file); if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size); return res; } #define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite) #else #define INIT_FWRITE #endif #if SANITIZER_INTERCEPT_PWRITE INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } #define INIT_PWRITE COMMON_INTERCEPT_FUNCTION(pwrite) #else #define INIT_PWRITE #endif #if SANITIZER_INTERCEPT_PWRITE64 INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count, OFF64_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } #define INIT_PWRITE64 COMMON_INTERCEPT_FUNCTION(pwrite64) #else #define INIT_PWRITE64 #endif #if SANITIZER_INTERCEPT_WRITEV INTERCEPTOR_WITH_SUFFIX(SSIZE_T, writev, int fd, __sanitizer_iovec *iov, int iovcnt) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, writev, fd, iov, iovcnt); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(writev)(fd, iov, iovcnt); if (res > 0) read_iovec(ctx, iov, iovcnt, res); return res; } #define INIT_WRITEV COMMON_INTERCEPT_FUNCTION(writev) #else #define INIT_WRITEV #endif #if SANITIZER_INTERCEPT_PWRITEV INTERCEPTOR(SSIZE_T, pwritev, int fd, __sanitizer_iovec *iov, int iovcnt, OFF_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pwritev, fd, iov, iovcnt, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(pwritev)(fd, iov, iovcnt, offset); if (res > 0) read_iovec(ctx, iov, iovcnt, res); return res; } #define INIT_PWRITEV COMMON_INTERCEPT_FUNCTION(pwritev) #else #define INIT_PWRITEV #endif #if SANITIZER_INTERCEPT_PWRITEV64 INTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt, OFF64_T offset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pwritev64, fd, iov, iovcnt, offset); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); SSIZE_T res = REAL(pwritev64)(fd, iov, iovcnt, offset); if (res > 0) read_iovec(ctx, iov, iovcnt, res); return res; } #define INIT_PWRITEV64 COMMON_INTERCEPT_FUNCTION(pwritev64) #else #define INIT_PWRITEV64 #endif #if SANITIZER_INTERCEPT_PRCTL INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, // NOLINT unsigned long arg4, unsigned long arg5) { // NOLINT void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5); static const int PR_SET_NAME = 15; int res = REAL(prctl(option, arg2, arg3, arg4, arg5)); if (option == PR_SET_NAME) { char buff[16]; internal_strncpy(buff, (char *)arg2, 15); buff[15] = 0; COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff); } return res; } #define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl) #else #define INIT_PRCTL #endif // SANITIZER_INTERCEPT_PRCTL #if SANITIZER_INTERCEPT_TIME INTERCEPTOR(unsigned long, time, unsigned long *t) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, time, t); unsigned long local_t; unsigned long res = REAL(time)(&local_t); if (t && res != (unsigned long)-1) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t)); *t = local_t; } return res; } #define INIT_TIME COMMON_INTERCEPT_FUNCTION(time); #else #define INIT_TIME #endif // SANITIZER_INTERCEPT_TIME #if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS static void unpoison_tm(void *ctx, __sanitizer_tm *tm) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm)); if (tm->tm_zone) { // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone // can point to shared memory and tsan would report a data race. COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone, REAL(strlen(tm->tm_zone)) + 1); } } INTERCEPTOR(__sanitizer_tm *, localtime, unsigned long *timep) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep); __sanitizer_tm *res = REAL(localtime)(timep); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); unpoison_tm(ctx, res); } return res; } INTERCEPTOR(__sanitizer_tm *, localtime_r, unsigned long *timep, void *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result); __sanitizer_tm *res = REAL(localtime_r)(timep, result); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); unpoison_tm(ctx, res); } return res; } INTERCEPTOR(__sanitizer_tm *, gmtime, unsigned long *timep) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep); __sanitizer_tm *res = REAL(gmtime)(timep); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); unpoison_tm(ctx, res); } return res; } INTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result); __sanitizer_tm *res = REAL(gmtime_r)(timep, result); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); unpoison_tm(ctx, res); } return res; } INTERCEPTOR(char *, ctime, unsigned long *timep) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(ctime)(timep); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); } return res; } INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(ctime_r)(timep, result); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); } return res; } INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(asctime)(tm); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); } return res; } INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(asctime_r)(tm, result); if (res) { COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); } return res; } INTERCEPTOR(long, mktime, __sanitizer_tm *tm) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst)); long res = REAL(mktime)(tm); if (res != -1) unpoison_tm(ctx, tm); return res; } #define INIT_LOCALTIME_AND_FRIENDS \ COMMON_INTERCEPT_FUNCTION(localtime); \ COMMON_INTERCEPT_FUNCTION(localtime_r); \ COMMON_INTERCEPT_FUNCTION(gmtime); \ COMMON_INTERCEPT_FUNCTION(gmtime_r); \ COMMON_INTERCEPT_FUNCTION(ctime); \ COMMON_INTERCEPT_FUNCTION(ctime_r); \ COMMON_INTERCEPT_FUNCTION(asctime); \ COMMON_INTERCEPT_FUNCTION(asctime_r); \ COMMON_INTERCEPT_FUNCTION(mktime); #else #define INIT_LOCALTIME_AND_FRIENDS #endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS #if SANITIZER_INTERCEPT_STRPTIME INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm); if (format) COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(strptime)(s, format, tm); COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0); if (res && tm) { // Do not call unpoison_tm here, because strptime does not, in fact, // initialize the entire struct tm. For example, tm_zone pointer is left // uninitialized. COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm)); } return res; } #define INIT_STRPTIME COMMON_INTERCEPT_FUNCTION(strptime); #else #define INIT_STRPTIME #endif #if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF #include "sanitizer_common_interceptors_format.inc" #define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \ { \ void *ctx; \ va_list ap; \ va_start(ap, format); \ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \ int res = WRAP(vname)(__VA_ARGS__, ap); \ va_end(ap); \ return res; \ } #endif #if SANITIZER_INTERCEPT_SCANF #define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \ { \ void *ctx; \ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \ va_list aq; \ va_copy(aq, ap); \ int res = REAL(vname)(__VA_ARGS__); \ if (res > 0) \ scanf_common(ctx, res, allowGnuMalloc, format, aq); \ va_end(aq); \ return res; \ } INTERCEPTOR(int, vscanf, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap) INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap) INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap) #if SANITIZER_INTERCEPT_ISOC99_SCANF INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap) INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap) INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap) VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap) #endif // SANITIZER_INTERCEPT_ISOC99_SCANF INTERCEPTOR(int, scanf, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format) INTERCEPTOR(int, fscanf, void *stream, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format) INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format) #if SANITIZER_INTERCEPT_ISOC99_SCANF INTERCEPTOR(int, __isoc99_scanf, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format) INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format) INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format) #endif #endif #if SANITIZER_INTERCEPT_SCANF #define INIT_SCANF \ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf); #else #define INIT_SCANF #endif #if SANITIZER_INTERCEPT_ISOC99_SCANF #define INIT_ISOC99_SCANF \ COMMON_INTERCEPT_FUNCTION(__isoc99_scanf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_sscanf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); #else #define INIT_ISOC99_SCANF #endif #if SANITIZER_INTERCEPT_PRINTF #define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \ void *ctx; \ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \ va_list aq; \ va_copy(aq, ap); #define VPRINTF_INTERCEPTOR_RETURN() \ va_end(aq); #define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \ { \ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \ if (common_flags()->check_printf) \ printf_common(ctx, format, aq); \ int res = REAL(vname)(__VA_ARGS__); \ VPRINTF_INTERCEPTOR_RETURN(); \ return res; \ } // FIXME: under ASan the REAL() call below may write to freed memory and // corrupt its metadata. See // https://github.com/google/sanitizers/issues/321. #define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \ { \ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \ if (common_flags()->check_printf) { \ printf_common(ctx, format, aq); \ } \ int res = REAL(vname)(str, __VA_ARGS__); \ if (res >= 0) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \ } \ VPRINTF_INTERCEPTOR_RETURN(); \ return res; \ } // FIXME: under ASan the REAL() call below may write to freed memory and // corrupt its metadata. See // https://github.com/google/sanitizers/issues/321. #define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \ { \ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \ if (common_flags()->check_printf) { \ printf_common(ctx, format, aq); \ } \ int res = REAL(vname)(str, size, __VA_ARGS__); \ if (res >= 0) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \ } \ VPRINTF_INTERCEPTOR_RETURN(); \ return res; \ } // FIXME: under ASan the REAL() call below may write to freed memory and // corrupt its metadata. See // https://github.com/google/sanitizers/issues/321. #define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \ { \ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \ if (common_flags()->check_printf) { \ printf_common(ctx, format, aq); \ } \ int res = REAL(vname)(strp, __VA_ARGS__); \ if (res >= 0) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \ } \ VPRINTF_INTERCEPTOR_RETURN(); \ return res; \ } INTERCEPTOR(int, vprintf, const char *format, va_list ap) VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap) INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format, va_list ap) VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap) INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format, va_list ap) VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap) #if SANITIZER_INTERCEPT_PRINTF_L INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc, const char *format, va_list ap) VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap) INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format) #endif // SANITIZER_INTERCEPT_PRINTF_L INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap) VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap) INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap) VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap) #if SANITIZER_INTERCEPT_ISOC99_PRINTF INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap) VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap) INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream, const char *format, va_list ap) VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap) INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format, va_list ap) VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap) INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format, va_list ap) VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format, ap) #endif // SANITIZER_INTERCEPT_ISOC99_PRINTF INTERCEPTOR(int, printf, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format) INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format) INTERCEPTOR(int, sprintf, char *str, const char *format, ...) // NOLINT FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format) // NOLINT INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format) INTERCEPTOR(int, asprintf, char **strp, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format) #if SANITIZER_INTERCEPT_ISOC99_PRINTF INTERCEPTOR(int, __isoc99_printf, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format) INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format) INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format) INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, format) #endif // SANITIZER_INTERCEPT_ISOC99_PRINTF #endif // SANITIZER_INTERCEPT_PRINTF #if SANITIZER_INTERCEPT_PRINTF #define INIT_PRINTF \ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf); #else #define INIT_PRINTF #endif #if SANITIZER_INTERCEPT_PRINTF_L #define INIT_PRINTF_L \ COMMON_INTERCEPT_FUNCTION(snprintf_l); \ COMMON_INTERCEPT_FUNCTION(vsnprintf_l); #else #define INIT_PRINTF_L #endif #if SANITIZER_INTERCEPT_ISOC99_PRINTF #define INIT_ISOC99_PRINTF \ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf); #else #define INIT_ISOC99_PRINTF #endif #if SANITIZER_INTERCEPT_IOCTL #include "sanitizer_common_interceptors_ioctl.inc" INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) { // We need a frame pointer, because we call into ioctl_common_[pre|post] which // can trigger a report and we need to be able to unwind through this // function. On Mac in debug mode we might not have a frame pointer, because // ioctl_common_[pre|post] doesn't get inlined here. ENABLE_FRAME_POINTER; void *ctx; va_list ap; va_start(ap, request); void *arg = va_arg(ap, void *); va_end(ap); COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg); CHECK(ioctl_initialized); // Note: TSan does not use common flags, and they are zero-initialized. // This effectively disables ioctl handling in TSan. if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg); // Although request is unsigned long, the rest of the interceptor uses it // as just "unsigned" to save space, because we know that all values fit in // "unsigned" - they are compile-time constants. const ioctl_desc *desc = ioctl_lookup(request); ioctl_desc decoded_desc; if (!desc) { VPrintf(2, "Decoding unknown ioctl 0x%x\n", request); if (!ioctl_decode(request, &decoded_desc)) Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request); else desc = &decoded_desc; } if (desc) ioctl_common_pre(ctx, desc, d, request, arg); int res = REAL(ioctl)(d, request, arg); // FIXME: some ioctls have different return values for success and failure. if (desc && res != -1) ioctl_common_post(ctx, desc, res, d, request, arg); return res; } #define INIT_IOCTL \ ioctl_init(); \ COMMON_INTERCEPT_FUNCTION(ioctl); #else #define INIT_IOCTL #endif #if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || \ SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT || \ SANITIZER_INTERCEPT_GETPWENT_R || SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) { if (pwd) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd)); if (pwd->pw_name) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_name, REAL(strlen)(pwd->pw_name) + 1); if (pwd->pw_passwd) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_passwd, REAL(strlen)(pwd->pw_passwd) + 1); #if !SANITIZER_ANDROID if (pwd->pw_gecos) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_gecos, REAL(strlen)(pwd->pw_gecos) + 1); #endif #if SANITIZER_MAC if (pwd->pw_class) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_class, REAL(strlen)(pwd->pw_class) + 1); #endif if (pwd->pw_dir) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_dir, REAL(strlen)(pwd->pw_dir) + 1); if (pwd->pw_shell) COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_shell, REAL(strlen)(pwd->pw_shell) + 1); } } static void unpoison_group(void *ctx, __sanitizer_group *grp) { if (grp) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp)); if (grp->gr_name) COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_name, REAL(strlen)(grp->gr_name) + 1); if (grp->gr_passwd) COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_passwd, REAL(strlen)(grp->gr_passwd) + 1); char **p = grp->gr_mem; for (; *p; ++p) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(*p, REAL(strlen)(*p) + 1); } COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_mem, (p - grp->gr_mem + 1) * sizeof(*p)); } } #endif // SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT || // SANITIZER_INTERCEPT_GETPWENT_R || // SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS #if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); __sanitizer_passwd *res = REAL(getpwnam)(name); if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid); __sanitizer_passwd *res = REAL(getpwuid)(uid); if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); __sanitizer_group *res = REAL(getgrnam)(name); if (res) unpoison_group(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid); __sanitizer_group *res = REAL(getgrgid)(gid); if (res) unpoison_group(ctx, res); return res; } #define INIT_GETPWNAM_AND_FRIENDS \ COMMON_INTERCEPT_FUNCTION(getpwnam); \ COMMON_INTERCEPT_FUNCTION(getpwuid); \ COMMON_INTERCEPT_FUNCTION(getgrnam); \ COMMON_INTERCEPT_FUNCTION(getgrgid); #else #define INIT_GETPWNAM_AND_FRIENDS #endif #if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd, char *buf, SIZE_T buflen, __sanitizer_passwd **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result); if (!res) { if (result && *result) unpoison_passwd(ctx, *result); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf, SIZE_T buflen, __sanitizer_passwd **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result); if (!res) { if (result && *result) unpoison_passwd(ctx, *result); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp, char *buf, SIZE_T buflen, __sanitizer_group **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getgrnam_r)(name, grp, buf, buflen, result); if (!res) { if (result && *result) unpoison_group(ctx, *result); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf, SIZE_T buflen, __sanitizer_group **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result); if (!res) { if (result && *result) unpoison_group(ctx, *result); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } #define INIT_GETPWNAM_R_AND_FRIENDS \ COMMON_INTERCEPT_FUNCTION(getpwnam_r); \ COMMON_INTERCEPT_FUNCTION(getpwuid_r); \ COMMON_INTERCEPT_FUNCTION(getgrnam_r); \ COMMON_INTERCEPT_FUNCTION(getgrgid_r); #else #define INIT_GETPWNAM_R_AND_FRIENDS #endif #if SANITIZER_INTERCEPT_GETPWENT INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy); __sanitizer_passwd *res = REAL(getpwent)(dummy); if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy); __sanitizer_group *res = REAL(getgrent)(dummy); if (res) unpoison_group(ctx, res);; return res; } #define INIT_GETPWENT \ COMMON_INTERCEPT_FUNCTION(getpwent); \ COMMON_INTERCEPT_FUNCTION(getgrent); #else #define INIT_GETPWENT #endif #if SANITIZER_INTERCEPT_FGETPWENT INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp); __sanitizer_passwd *res = REAL(fgetpwent)(fp); if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp); __sanitizer_group *res = REAL(fgetgrent)(fp); if (res) unpoison_group(ctx, res); return res; } #define INIT_FGETPWENT \ COMMON_INTERCEPT_FUNCTION(fgetpwent); \ COMMON_INTERCEPT_FUNCTION(fgetgrent); #else #define INIT_FGETPWENT #endif #if SANITIZER_INTERCEPT_GETPWENT_R INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf, SIZE_T buflen, __sanitizer_passwd **pwbufp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp); if (!res) { if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp)); return res; } INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf, SIZE_T buflen, __sanitizer_passwd **pwbufp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp); if (!res) { if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp)); return res; } INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen, __sanitizer_group **pwbufp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp); if (!res) { if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp)); return res; } INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen, __sanitizer_group **pwbufp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp); if (!res) { if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); } if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp)); return res; } #define INIT_GETPWENT_R \ COMMON_INTERCEPT_FUNCTION(getpwent_r); \ COMMON_INTERCEPT_FUNCTION(fgetpwent_r); \ COMMON_INTERCEPT_FUNCTION(getgrent_r); \ COMMON_INTERCEPT_FUNCTION(fgetgrent_r); #else #define INIT_GETPWENT_R #endif #if SANITIZER_INTERCEPT_SETPWENT // The only thing these interceptors do is disable any nested interceptors. // These functions may open nss modules and call uninstrumented functions from // them, and we don't want things like strlen() to trigger. INTERCEPTOR(void, setpwent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy); REAL(setpwent)(dummy); } INTERCEPTOR(void, endpwent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy); REAL(endpwent)(dummy); } INTERCEPTOR(void, setgrent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy); REAL(setgrent)(dummy); } INTERCEPTOR(void, endgrent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy); REAL(endgrent)(dummy); } #define INIT_SETPWENT \ COMMON_INTERCEPT_FUNCTION(setpwent); \ COMMON_INTERCEPT_FUNCTION(endpwent); \ COMMON_INTERCEPT_FUNCTION(setgrent); \ COMMON_INTERCEPT_FUNCTION(endgrent); #else #define INIT_SETPWENT #endif #if SANITIZER_INTERCEPT_CLOCK_GETTIME INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(clock_getres)(clk_id, tp); if (!res && tp) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz); } return res; } INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(clock_gettime)(clk_id, tp); if (!res) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz); } return res; } INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp); COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz); return REAL(clock_settime)(clk_id, tp); } #define INIT_CLOCK_GETTIME \ COMMON_INTERCEPT_FUNCTION(clock_getres); \ COMMON_INTERCEPT_FUNCTION(clock_gettime); \ COMMON_INTERCEPT_FUNCTION(clock_settime); #else #define INIT_CLOCK_GETTIME #endif #if SANITIZER_INTERCEPT_GETITIMER INTERCEPTOR(int, getitimer, int which, void *curr_value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getitimer)(which, curr_value); if (!res && curr_value) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz); } return res; } INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value); if (new_value) COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(setitimer)(which, new_value, old_value); if (!res && old_value) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz); } return res; } #define INIT_GETITIMER \ COMMON_INTERCEPT_FUNCTION(getitimer); \ COMMON_INTERCEPT_FUNCTION(setitimer); #else #define INIT_GETITIMER #endif #if SANITIZER_INTERCEPT_GLOB static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob)); // +1 for NULL pointer at the end. if (pglob->gl_pathv) COMMON_INTERCEPTOR_WRITE_RANGE( ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv)); for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) { char *p = pglob->gl_pathv[i]; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1); } } static THREADLOCAL __sanitizer_glob_t *pglob_copy; static void wrapped_gl_closedir(void *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); pglob_copy->gl_closedir(dir); } static void *wrapped_gl_readdir(void *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); return pglob_copy->gl_readdir(dir); } static void *wrapped_gl_opendir(const char *s) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); return pglob_copy->gl_opendir(s); } static int wrapped_gl_lstat(const char *s, void *st) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); return pglob_copy->gl_lstat(s, st); } static int wrapped_gl_stat(const char *s, void *st) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); return pglob_copy->gl_stat(s, st); } static const __sanitizer_glob_t kGlobCopy = { 0, 0, 0, 0, wrapped_gl_closedir, wrapped_gl_readdir, wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat}; INTERCEPTOR(int, glob, const char *pattern, int flags, int (*errfunc)(const char *epath, int eerrno), __sanitizer_glob_t *pglob) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob); COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0); __sanitizer_glob_t glob_copy; internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy)); if (flags & glob_altdirfunc) { Swap(pglob->gl_closedir, glob_copy.gl_closedir); Swap(pglob->gl_readdir, glob_copy.gl_readdir); Swap(pglob->gl_opendir, glob_copy.gl_opendir); Swap(pglob->gl_lstat, glob_copy.gl_lstat); Swap(pglob->gl_stat, glob_copy.gl_stat); pglob_copy = &glob_copy; } int res = REAL(glob)(pattern, flags, errfunc, pglob); if (flags & glob_altdirfunc) { Swap(pglob->gl_closedir, glob_copy.gl_closedir); Swap(pglob->gl_readdir, glob_copy.gl_readdir); Swap(pglob->gl_opendir, glob_copy.gl_opendir); Swap(pglob->gl_lstat, glob_copy.gl_lstat); Swap(pglob->gl_stat, glob_copy.gl_stat); } pglob_copy = 0; if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob); return res; } INTERCEPTOR(int, glob64, const char *pattern, int flags, int (*errfunc)(const char *epath, int eerrno), __sanitizer_glob_t *pglob) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob); COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0); __sanitizer_glob_t glob_copy; internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy)); if (flags & glob_altdirfunc) { Swap(pglob->gl_closedir, glob_copy.gl_closedir); Swap(pglob->gl_readdir, glob_copy.gl_readdir); Swap(pglob->gl_opendir, glob_copy.gl_opendir); Swap(pglob->gl_lstat, glob_copy.gl_lstat); Swap(pglob->gl_stat, glob_copy.gl_stat); pglob_copy = &glob_copy; } int res = REAL(glob64)(pattern, flags, errfunc, pglob); if (flags & glob_altdirfunc) { Swap(pglob->gl_closedir, glob_copy.gl_closedir); Swap(pglob->gl_readdir, glob_copy.gl_readdir); Swap(pglob->gl_opendir, glob_copy.gl_opendir); Swap(pglob->gl_lstat, glob_copy.gl_lstat); Swap(pglob->gl_stat, glob_copy.gl_stat); } pglob_copy = 0; if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob); return res; } #define INIT_GLOB \ COMMON_INTERCEPT_FUNCTION(glob); \ COMMON_INTERCEPT_FUNCTION(glob64); #else // SANITIZER_INTERCEPT_GLOB #define INIT_GLOB #endif // SANITIZER_INTERCEPT_GLOB #if SANITIZER_INTERCEPT_WAIT // According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version // suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for // details. INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wait, status); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(wait)(status); if (res != -1 && status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); return res; } // On FreeBSD id_t is always 64-bit wide. #if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop, int options) { #else INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop, int options) { #endif void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(waitid)(idtype, id, infop, options); if (res != -1 && infop) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz); return res; } INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(waitpid)(pid, status, options); if (res != -1 && status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); return res; } INTERCEPTOR(int, wait3, int *status, int options, void *rusage) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(wait3)(status, options, rusage); if (res != -1) { if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz); } return res; } #if SANITIZER_ANDROID INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(__wait4)(pid, status, options, rusage); if (res != -1) { if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz); } return res; } #define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4); #else INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(wait4)(pid, status, options, rusage); if (res != -1) { if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz); } return res; } #define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4); #endif // SANITIZER_ANDROID #define INIT_WAIT \ COMMON_INTERCEPT_FUNCTION(wait); \ COMMON_INTERCEPT_FUNCTION(waitid); \ COMMON_INTERCEPT_FUNCTION(waitpid); \ COMMON_INTERCEPT_FUNCTION(wait3); #else #define INIT_WAIT #define INIT_WAIT4 #endif #if SANITIZER_INTERCEPT_INET INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size); uptr sz = __sanitizer_in_addr_sz(af); if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz); // FIXME: figure out read size based on the address family. // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(inet_ntop)(af, src, dst, size); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst); COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0); // FIXME: figure out read size based on the address family. // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(inet_pton)(af, src, dst); if (res == 1) { uptr sz = __sanitizer_in_addr_sz(af); if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz); } return res; } #define INIT_INET \ COMMON_INTERCEPT_FUNCTION(inet_ntop); \ COMMON_INTERCEPT_FUNCTION(inet_pton); #else #define INIT_INET #endif #if SANITIZER_INTERCEPT_INET INTERCEPTOR(int, inet_aton, const char *cp, void *dst) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst); if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(inet_aton)(cp, dst); if (res != 0) { uptr sz = __sanitizer_in_addr_sz(af_inet); if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz); } return res; } #define INIT_INET_ATON COMMON_INTERCEPT_FUNCTION(inet_aton); #else #define INIT_INET_ATON #endif #if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(pthread_getschedparam)(thread, policy, param); if (res == 0) { if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy)); if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param)); } return res; } #define INIT_PTHREAD_GETSCHEDPARAM \ COMMON_INTERCEPT_FUNCTION(pthread_getschedparam); #else #define INIT_PTHREAD_GETSCHEDPARAM #endif #if SANITIZER_INTERCEPT_GETADDRINFO INTERCEPTOR(int, getaddrinfo, char *node, char *service, struct __sanitizer_addrinfo *hints, struct __sanitizer_addrinfo **out) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out); if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1); if (service) COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1); if (hints) COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo)); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getaddrinfo)(node, service, hints, out); if (res == 0 && out) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out)); struct __sanitizer_addrinfo *p = *out; while (p) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); if (p->ai_addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen); if (p->ai_canonname) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname, REAL(strlen)(p->ai_canonname) + 1); p = p->ai_next; } } return res; } #define INIT_GETADDRINFO COMMON_INTERCEPT_FUNCTION(getaddrinfo); #else #define INIT_GETADDRINFO #endif #if SANITIZER_INTERCEPT_GETNAMEINFO INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host, unsigned hostlen, char *serv, unsigned servlen, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getnameinfo, sockaddr, salen, host, hostlen, serv, servlen, flags); // FIXME: consider adding READ_RANGE(sockaddr, salen) // There is padding in in_addr that may make this too noisy // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags); if (res == 0) { if (host && hostlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1); if (serv && servlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1); } return res; } #define INIT_GETNAMEINFO COMMON_INTERCEPT_FUNCTION(getnameinfo); #else #define INIT_GETNAMEINFO #endif #if SANITIZER_INTERCEPT_GETSOCKNAME INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen); COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen)); int addrlen_in = *addrlen; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getsockname)(sock_fd, addr, addrlen); if (res == 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen)); } return res; } #define INIT_GETSOCKNAME COMMON_INTERCEPT_FUNCTION(getsockname); #else #define INIT_GETSOCKNAME #endif #if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R static void write_hostent(void *ctx, struct __sanitizer_hostent *h) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent)); if (h->h_name) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1); char **p = h->h_aliases; while (*p) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1); ++p; } COMMON_INTERCEPTOR_WRITE_RANGE( ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases)); p = h->h_addr_list; while (*p) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length); ++p; } COMMON_INTERCEPTOR_WRITE_RANGE( ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list)); } #endif #if SANITIZER_INTERCEPT_GETHOSTBYNAME INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name); struct __sanitizer_hostent *res = REAL(gethostbyname)(name); if (res) write_hostent(ctx, res); return res; } INTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len, int type) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type); COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len); struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type); if (res) write_hostent(ctx, res); return res; } INTERCEPTOR(struct __sanitizer_hostent *, gethostent, int fake) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostent, fake); struct __sanitizer_hostent *res = REAL(gethostent)(fake); if (res) write_hostent(ctx, res); return res; } INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af); struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af); if (res) write_hostent(ctx, res); return res; } #define INIT_GETHOSTBYNAME \ COMMON_INTERCEPT_FUNCTION(gethostent); \ COMMON_INTERCEPT_FUNCTION(gethostbyaddr); \ COMMON_INTERCEPT_FUNCTION(gethostbyname); \ COMMON_INTERCEPT_FUNCTION(gethostbyname2); #else #define INIT_GETHOSTBYNAME #endif #if SANITIZER_INTERCEPT_GETHOSTBYNAME_R INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result, h_errnop); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop); if (result) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (res == 0 && *result) write_hostent(ctx, *result); } if (h_errnop) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); return res; } #define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r); #else #define INIT_GETHOSTBYNAME_R #endif #if SANITIZER_INTERCEPT_GETHOSTENT_R INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result, h_errnop); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop); if (result) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (res == 0 && *result) write_hostent(ctx, *result); } if (h_errnop) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); return res; } #define INIT_GETHOSTENT_R \ COMMON_INTERCEPT_FUNCTION(gethostent_r); #else #define INIT_GETHOSTENT_R #endif #if SANITIZER_INTERCEPT_GETHOSTBYADDR_R INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type, struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf, buflen, result, h_errnop); COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result, h_errnop); if (result) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (res == 0 && *result) write_hostent(ctx, *result); } if (h_errnop) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); return res; } #define INIT_GETHOSTBYADDR_R \ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r); #else #define INIT_GETHOSTBYADDR_R #endif #if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R INTERCEPTOR(int, gethostbyname2_r, char *name, int af, struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen, result, h_errnop); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop); if (result) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (res == 0 && *result) write_hostent(ctx, *result); } if (h_errnop) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); return res; } #define INIT_GETHOSTBYNAME2_R \ COMMON_INTERCEPT_FUNCTION(gethostbyname2_r); #else #define INIT_GETHOSTBYNAME2_R #endif #if SANITIZER_INTERCEPT_GETSOCKOPT INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval, int *optlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval, optlen); if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen)); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen); if (res == 0) if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen); return res; } #define INIT_GETSOCKOPT COMMON_INTERCEPT_FUNCTION(getsockopt); #else #define INIT_GETSOCKOPT #endif #if SANITIZER_INTERCEPT_ACCEPT INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen); unsigned addrlen0 = 0; if (addrlen) { COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen)); addrlen0 = *addrlen; } int fd2 = REAL(accept)(fd, addr, addrlen); if (fd2 >= 0) { if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2); if (addr && addrlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0)); } return fd2; } #define INIT_ACCEPT COMMON_INTERCEPT_FUNCTION(accept); #else #define INIT_ACCEPT #endif #if SANITIZER_INTERCEPT_ACCEPT4 INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f); unsigned addrlen0 = 0; if (addrlen) { COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen)); addrlen0 = *addrlen; } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int fd2 = REAL(accept4)(fd, addr, addrlen, f); if (fd2 >= 0) { if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2); if (addr && addrlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0)); } return fd2; } #define INIT_ACCEPT4 COMMON_INTERCEPT_FUNCTION(accept4); #else #define INIT_ACCEPT4 #endif #if SANITIZER_INTERCEPT_MODF INTERCEPTOR(double, modf, double x, double *iptr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. double res = REAL(modf)(x, iptr); if (iptr) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr)); } return res; } INTERCEPTOR(float, modff, float x, float *iptr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. float res = REAL(modff)(x, iptr); if (iptr) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr)); } return res; } INTERCEPTOR(long double, modfl, long double x, long double *iptr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. long double res = REAL(modfl)(x, iptr); if (iptr) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr)); } return res; } #define INIT_MODF \ COMMON_INTERCEPT_FUNCTION(modf); \ COMMON_INTERCEPT_FUNCTION(modff); \ COMMON_INTERCEPT_FUNCTION_LDBL(modfl); #else #define INIT_MODF #endif #if SANITIZER_INTERCEPT_RECVMSG static void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg, SSIZE_T maxlen) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg, sizeof(*msg)); if (msg->msg_name && msg->msg_namelen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_name, msg->msg_namelen); if (msg->msg_iov && msg->msg_iovlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_iov, sizeof(*msg->msg_iov) * msg->msg_iovlen); write_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen); if (msg->msg_control && msg->msg_controllen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_control, msg->msg_controllen); } INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(recvmsg)(fd, msg, flags); if (res >= 0) { if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); if (msg) { write_msghdr(ctx, msg, res); COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg); } } return res; } #define INIT_RECVMSG COMMON_INTERCEPT_FUNCTION(recvmsg); #else #define INIT_RECVMSG #endif #if SANITIZER_INTERCEPT_SENDMSG static void read_msghdr_control(void *ctx, void *control, uptr controllen) { const unsigned kCmsgDataOffset = RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr)); char *p = (char *)control; char *const control_end = p + controllen; while (true) { if (p + sizeof(__sanitizer_cmsghdr) > control_end) break; __sanitizer_cmsghdr *cmsg = (__sanitizer_cmsghdr *)p; COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_len, sizeof(cmsg->cmsg_len)); if (p + RoundUpTo(cmsg->cmsg_len, sizeof(uptr)) > control_end) break; COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_level, sizeof(cmsg->cmsg_level)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_type, sizeof(cmsg->cmsg_type)); if (cmsg->cmsg_len > kCmsgDataOffset) { char *data = p + kCmsgDataOffset; unsigned data_len = cmsg->cmsg_len - kCmsgDataOffset; if (data_len > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, data_len); } p += RoundUpTo(cmsg->cmsg_len, sizeof(uptr)); } } static void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg, SSIZE_T maxlen) { #define R(f) \ COMMON_INTERCEPTOR_READ_RANGE(ctx, &msg->msg_##f, sizeof(msg->msg_##f)) R(name); R(namelen); R(iov); R(iovlen); R(control); R(controllen); R(flags); #undef R if (msg->msg_name && msg->msg_namelen) COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_name, msg->msg_namelen); if (msg->msg_iov && msg->msg_iovlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_iov, sizeof(*msg->msg_iov) * msg->msg_iovlen); read_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen); if (msg->msg_control && msg->msg_controllen) read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen); } INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sendmsg, fd, msg, flags); if (fd >= 0) { COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); } SSIZE_T res = REAL(sendmsg)(fd, msg, flags); if (common_flags()->intercept_send && res >= 0 && msg) read_msghdr(ctx, msg, res); return res; } #define INIT_SENDMSG COMMON_INTERCEPT_FUNCTION(sendmsg); #else #define INIT_SENDMSG #endif #if SANITIZER_INTERCEPT_GETPEERNAME INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen); unsigned addr_sz; if (addrlen) addr_sz = *addrlen; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getpeername)(sockfd, addr, addrlen); if (!res && addr && addrlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen)); return res; } #define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername); #else #define INIT_GETPEERNAME #endif #if SANITIZER_INTERCEPT_SYSINFO INTERCEPTOR(int, sysinfo, void *info) { void *ctx; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info); int res = REAL(sysinfo)(info); if (!res && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, struct_sysinfo_sz); return res; } #define INIT_SYSINFO COMMON_INTERCEPT_FUNCTION(sysinfo); #else #define INIT_SYSINFO #endif #if SANITIZER_INTERCEPT_READDIR INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, opendir, path); COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); __sanitizer_dirent *res = REAL(opendir)(path); if (res) COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path); return res; } INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_dirent *res = REAL(readdir)(dirp); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen); return res; } INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry, __sanitizer_dirent **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(readdir_r)(dirp, entry, result); if (!res) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (*result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen); } return res; } #define INIT_READDIR \ COMMON_INTERCEPT_FUNCTION(opendir); \ COMMON_INTERCEPT_FUNCTION(readdir); \ COMMON_INTERCEPT_FUNCTION(readdir_r); #else #define INIT_READDIR #endif #if SANITIZER_INTERCEPT_READDIR64 INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_dirent64 *res = REAL(readdir64)(dirp); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen); return res; } INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry, __sanitizer_dirent64 **result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(readdir64_r)(dirp, entry, result); if (!res) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); if (*result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen); } return res; } #define INIT_READDIR64 \ COMMON_INTERCEPT_FUNCTION(readdir64); \ COMMON_INTERCEPT_FUNCTION(readdir64_r); #else #define INIT_READDIR64 #endif #if SANITIZER_INTERCEPT_PTRACE INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ptrace, request, pid, addr, data); __sanitizer_iovec local_iovec; if (data) { if (request == ptrace_setregs) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_regs_struct_sz); else if (request == ptrace_setfpregs) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpregs_struct_sz); else if (request == ptrace_setfpxregs) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpxregs_struct_sz); else if (request == ptrace_setvfpregs) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_vfpregs_struct_sz); else if (request == ptrace_setsiginfo) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, siginfo_t_sz); // Some kernel might zero the iovec::iov_base in case of invalid // write access. In this case copy the invalid address for further // inspection. else if (request == ptrace_setregset || request == ptrace_getregset) { __sanitizer_iovec *iovec = (__sanitizer_iovec*)data; COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec)); local_iovec = *iovec; if (request == ptrace_setregset) COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec->iov_base, iovec->iov_len); } } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. uptr res = REAL(ptrace)(request, pid, addr, data); if (!res && data) { // Note that PEEK* requests assign different meaning to the return value. // This function does not handle them (nor does it need to). if (request == ptrace_getregs) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz); else if (request == ptrace_getfpregs) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpregs_struct_sz); else if (request == ptrace_getfpxregs) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz); else if (request == ptrace_getvfpregs) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_vfpregs_struct_sz); else if (request == ptrace_getsiginfo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz); else if (request == ptrace_geteventmsg) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long)); else if (request == ptrace_getregset) { __sanitizer_iovec *iovec = (__sanitizer_iovec*)data; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec, sizeof(*iovec)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, local_iovec.iov_base, local_iovec.iov_len); } } return res; } #define INIT_PTRACE COMMON_INTERCEPT_FUNCTION(ptrace); #else #define INIT_PTRACE #endif #if SANITIZER_INTERCEPT_SETLOCALE INTERCEPTOR(char *, setlocale, int category, char *locale) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale); if (locale) COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1); char *res = REAL(setlocale)(category, locale); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } #define INIT_SETLOCALE COMMON_INTERCEPT_FUNCTION(setlocale); #else #define INIT_SETLOCALE #endif #if SANITIZER_INTERCEPT_GETCWD INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(getcwd)(buf, size); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } #define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd); #else #define INIT_GETCWD #endif #if SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME INTERCEPTOR(char *, get_current_dir_name, int fake) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(get_current_dir_name)(fake); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } #define INIT_GET_CURRENT_DIR_NAME \ COMMON_INTERCEPT_FUNCTION(get_current_dir_name); #else #define INIT_GET_CURRENT_DIR_NAME #endif UNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { CHECK(endptr); if (nptr == *endptr) { // No digits were found at strtol call, we need to find out the last // symbol accessed by strtoll on our own. // We get this symbol by skipping leading blanks and optional +/- sign. while (IsSpace(*nptr)) nptr++; if (*nptr == '+' || *nptr == '-') nptr++; *endptr = const_cast(nptr); } CHECK(*endptr >= nptr); } UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr, char **endptr, char *real_endptr, int base) { if (endptr) { *endptr = real_endptr; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr)); } // If base has unsupported value, strtol can exit with EINVAL // without reading any characters. So do additional checks only // if base is valid. bool is_valid_base = (base == 0) || (2 <= base && base <= 36); if (is_valid_base) { FixRealStrtolEndptr(nptr, &real_endptr); } COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ? (real_endptr - nptr) + 1 : 0); } #if SANITIZER_INTERCEPT_STRTOIMAX INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *real_endptr; INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base); StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return res; } INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *real_endptr; INTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base); StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return res; } #define INIT_STRTOIMAX \ COMMON_INTERCEPT_FUNCTION(strtoimax); \ COMMON_INTERCEPT_FUNCTION(strtoumax); #else #define INIT_STRTOIMAX #endif #if SANITIZER_INTERCEPT_MBSTOWCS INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(mbstowcs)(dest, src, len); if (res != (SIZE_T) - 1 && dest) { SIZE_T write_cnt = res + (res < len); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t)); } return res; } INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len, void *ps) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps); if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src)); if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps); if (res != (SIZE_T)(-1) && dest && src) { // This function, and several others, may or may not write the terminating // \0 character. They write it iff they clear *src. SIZE_T write_cnt = res + !*src; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t)); } return res; } #define INIT_MBSTOWCS \ COMMON_INTERCEPT_FUNCTION(mbstowcs); \ COMMON_INTERCEPT_FUNCTION(mbsrtowcs); #else #define INIT_MBSTOWCS #endif #if SANITIZER_INTERCEPT_MBSNRTOWCS INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms, SIZE_T len, void *ps) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, mbsnrtowcs, dest, src, nms, len, ps); if (src) { COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src)); if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms); } if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps); if (res != (SIZE_T)(-1) && dest && src) { SIZE_T write_cnt = res + !*src; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t)); } return res; } #define INIT_MBSNRTOWCS COMMON_INTERCEPT_FUNCTION(mbsnrtowcs); #else #define INIT_MBSNRTOWCS #endif #if SANITIZER_INTERCEPT_WCSTOMBS INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(wcstombs)(dest, src, len); if (res != (SIZE_T) - 1 && dest) { SIZE_T write_cnt = res + (res < len); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); } return res; } INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len, void *ps) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps); if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src)); if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps); if (res != (SIZE_T) - 1 && dest && src) { SIZE_T write_cnt = res + !*src; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); } return res; } #define INIT_WCSTOMBS \ COMMON_INTERCEPT_FUNCTION(wcstombs); \ COMMON_INTERCEPT_FUNCTION(wcsrtombs); #else #define INIT_WCSTOMBS #endif #if SANITIZER_INTERCEPT_WCSNRTOMBS INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms, SIZE_T len, void *ps) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcsnrtombs, dest, src, nms, len, ps); if (src) { COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src)); if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms); } if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps); if (res != ((SIZE_T)-1) && dest && src) { SIZE_T write_cnt = res + !*src; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); } return res; } #define INIT_WCSNRTOMBS COMMON_INTERCEPT_FUNCTION(wcsnrtombs); #else #define INIT_WCSNRTOMBS #endif #if SANITIZER_INTERCEPT_WCRTOMB INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps); if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(wcrtomb)(dest, src, ps); if (res != ((SIZE_T)-1) && dest) { SIZE_T write_cnt = res; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); } return res; } #define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb); #else #define INIT_WCRTOMB #endif #if SANITIZER_INTERCEPT_TCGETATTR INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(tcgetattr)(fd, termios_p); if (!res && termios_p) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz); return res; } #define INIT_TCGETATTR COMMON_INTERCEPT_FUNCTION(tcgetattr); #else #define INIT_TCGETATTR #endif #if SANITIZER_INTERCEPT_REALPATH INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest // version of a versioned symbol. For realpath(), this gives us something // (called __old_realpath) that does not handle NULL in the second argument. // Handle it as part of the interceptor. char *allocated_path = nullptr; if (!resolved_path) allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1); char *res = REAL(realpath)(path, resolved_path); if (allocated_path && !res) WRAP(free)(allocated_path); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } #define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath); #else #define INIT_REALPATH #endif #if SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME INTERCEPTOR(char *, canonicalize_file_name, const char *path) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); char *res = REAL(canonicalize_file_name)(path); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } #define INIT_CANONICALIZE_FILE_NAME \ COMMON_INTERCEPT_FUNCTION(canonicalize_file_name); #else #define INIT_CANONICALIZE_FILE_NAME #endif #if SANITIZER_INTERCEPT_CONFSTR INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(confstr)(name, buf, len); if (buf && res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len); return res; } #define INIT_CONFSTR COMMON_INTERCEPT_FUNCTION(confstr); #else #define INIT_CONFSTR #endif #if SANITIZER_INTERCEPT_SCHED_GETAFFINITY INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sched_getaffinity)(pid, cpusetsize, mask); if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize); return res; } #define INIT_SCHED_GETAFFINITY COMMON_INTERCEPT_FUNCTION(sched_getaffinity); #else #define INIT_SCHED_GETAFFINITY #endif #if SANITIZER_INTERCEPT_SCHED_GETPARAM INTERCEPTOR(int, sched_getparam, int pid, void *param) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param); int res = REAL(sched_getparam)(pid, param); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz); return res; } #define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam); #else #define INIT_SCHED_GETPARAM #endif #if SANITIZER_INTERCEPT_STRERROR INTERCEPTOR(char *, strerror, int errnum) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum); char *res = REAL(strerror)(errnum); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); return res; } #define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror); #else #define INIT_STRERROR #endif #if SANITIZER_INTERCEPT_STRERROR_R // There are 2 versions of strerror_r: // * POSIX version returns 0 on success, negative error code on failure, // writes message to buf. // * GNU version returns message pointer, which points to either buf or some // static storage. #if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \ SANITIZER_MAC || SANITIZER_ANDROID // POSIX version. Spec is not clear on whether buf is NULL-terminated. // At least on OSX, buf contents are valid even when the call fails. INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(strerror_r)(errnum, buf, buflen); SIZE_T sz = internal_strnlen(buf, buflen); if (sz < buflen) ++sz; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz); return res; } #else // GNU version. INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(strerror_r)(errnum, buf, buflen); if (res == buf) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); else COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); return res; } #endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE || //SANITIZER_MAC #define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r); #else #define INIT_STRERROR_R #endif #if SANITIZER_INTERCEPT_XPG_STRERROR_R INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(__xpg_strerror_r)(errnum, buf, buflen); // This version always returns a null-terminated string. if (buf && buflen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1); return res; } #define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r); #else #define INIT_XPG_STRERROR_R #endif #if SANITIZER_INTERCEPT_SCANDIR typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *); typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **, const struct __sanitizer_dirent **); static THREADLOCAL scandir_filter_f scandir_filter; static THREADLOCAL scandir_compar_f scandir_compar; static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen); return scandir_filter(dir); } static int wrapped_scandir_compar(const struct __sanitizer_dirent **a, const struct __sanitizer_dirent **b) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen); COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen); return scandir_compar(a, b); } INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist, scandir_filter_f filter, scandir_compar_f compar) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar); if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1); scandir_filter = filter; scandir_compar = compar; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : nullptr, compar ? wrapped_scandir_compar : nullptr); scandir_filter = nullptr; scandir_compar = nullptr; if (namelist && res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res); for (int i = 0; i < res; ++i) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i], (*namelist)[i]->d_reclen); } return res; } #define INIT_SCANDIR COMMON_INTERCEPT_FUNCTION(scandir); #else #define INIT_SCANDIR #endif #if SANITIZER_INTERCEPT_SCANDIR64 typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *); typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **, const struct __sanitizer_dirent64 **); static THREADLOCAL scandir64_filter_f scandir64_filter; static THREADLOCAL scandir64_compar_f scandir64_compar; static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen); return scandir64_filter(dir); } static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a, const struct __sanitizer_dirent64 **b) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen); COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen); return scandir64_compar(a, b); } INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist, scandir64_filter_f filter, scandir64_compar_f compar) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar); if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1); scandir64_filter = filter; scandir64_compar = compar; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : nullptr, compar ? wrapped_scandir64_compar : nullptr); scandir64_filter = nullptr; scandir64_compar = nullptr; if (namelist && res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res); for (int i = 0; i < res; ++i) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i], (*namelist)[i]->d_reclen); } return res; } #define INIT_SCANDIR64 COMMON_INTERCEPT_FUNCTION(scandir64); #else #define INIT_SCANDIR64 #endif #if SANITIZER_INTERCEPT_GETGROUPS INTERCEPTOR(int, getgroups, int size, u32 *lst) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getgroups)(size, lst); if (res >= 0 && lst && size > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst)); return res; } #define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups); #else #define INIT_GETGROUPS #endif #if SANITIZER_INTERCEPT_POLL static void read_pollfd(void *ctx, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds) { for (unsigned i = 0; i < nfds; ++i) { COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].fd, sizeof(fds[i].fd)); COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].events, sizeof(fds[i].events)); } } static void write_pollfd(void *ctx, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds) { for (unsigned i = 0; i < nfds; ++i) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &fds[i].revents, sizeof(fds[i].revents)); } INTERCEPTOR(int, poll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds, int timeout) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, poll, fds, nfds, timeout); if (fds && nfds) read_pollfd(ctx, fds, nfds); int res = COMMON_INTERCEPTOR_BLOCK_REAL(poll)(fds, nfds, timeout); if (fds && nfds) write_pollfd(ctx, fds, nfds); return res; } #define INIT_POLL COMMON_INTERCEPT_FUNCTION(poll); #else #define INIT_POLL #endif #if SANITIZER_INTERCEPT_PPOLL INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds, void *timeout_ts, __sanitizer_sigset_t *sigmask) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ppoll, fds, nfds, timeout_ts, sigmask); if (fds && nfds) read_pollfd(ctx, fds, nfds); if (timeout_ts) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout_ts, struct_timespec_sz); // FIXME: read sigmask when all of sigemptyset, etc are intercepted. int res = COMMON_INTERCEPTOR_BLOCK_REAL(ppoll)(fds, nfds, timeout_ts, sigmask); if (fds && nfds) write_pollfd(ctx, fds, nfds); return res; } #define INIT_PPOLL COMMON_INTERCEPT_FUNCTION(ppoll); #else #define INIT_PPOLL #endif #if SANITIZER_INTERCEPT_WORDEXP INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags); if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(wordexp)(s, p, flags); if (!res && p) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); if (p->we_wordc) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv, sizeof(*p->we_wordv) * p->we_wordc); for (uptr i = 0; i < p->we_wordc; ++i) { char *w = p->we_wordv[i]; if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1); } } return res; } #define INIT_WORDEXP COMMON_INTERCEPT_FUNCTION(wordexp); #else #define INIT_WORDEXP #endif #if SANITIZER_INTERCEPT_SIGWAIT INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig); // FIXME: read sigset_t when all of sigemptyset, etc are intercepted // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigwait)(set, sig); if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig)); return res; } #define INIT_SIGWAIT COMMON_INTERCEPT_FUNCTION(sigwait); #else #define INIT_SIGWAIT #endif #if SANITIZER_INTERCEPT_SIGWAITINFO INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info); // FIXME: read sigset_t when all of sigemptyset, etc are intercepted // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigwaitinfo)(set, info); if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz); return res; } #define INIT_SIGWAITINFO COMMON_INTERCEPT_FUNCTION(sigwaitinfo); #else #define INIT_SIGWAITINFO #endif #if SANITIZER_INTERCEPT_SIGTIMEDWAIT INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info, void *timeout) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout); if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz); // FIXME: read sigset_t when all of sigemptyset, etc are intercepted // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigtimedwait)(set, info, timeout); if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz); return res; } #define INIT_SIGTIMEDWAIT COMMON_INTERCEPT_FUNCTION(sigtimedwait); #else #define INIT_SIGTIMEDWAIT #endif #if SANITIZER_INTERCEPT_SIGSETOPS INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigemptyset)(set); if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set)); return res; } INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigfillset)(set); if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set)); return res; } #define INIT_SIGSETOPS \ COMMON_INTERCEPT_FUNCTION(sigemptyset); \ COMMON_INTERCEPT_FUNCTION(sigfillset); #else #define INIT_SIGSETOPS #endif #if SANITIZER_INTERCEPT_SIGPENDING INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigpending)(set); if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set)); return res; } #define INIT_SIGPENDING COMMON_INTERCEPT_FUNCTION(sigpending); #else #define INIT_SIGPENDING #endif #if SANITIZER_INTERCEPT_SIGPROCMASK INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset); // FIXME: read sigset_t when all of sigemptyset, etc are intercepted // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(sigprocmask)(how, set, oldset); if (!res && oldset) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset)); return res; } #define INIT_SIGPROCMASK COMMON_INTERCEPT_FUNCTION(sigprocmask); #else #define INIT_SIGPROCMASK #endif #if SANITIZER_INTERCEPT_BACKTRACE INTERCEPTOR(int, backtrace, void **buffer, int size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(backtrace)(buffer, size); if (res && buffer) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer)); return res; } INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size); if (buffer && size) COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer)); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char **res = REAL(backtrace_symbols)(buffer, size); if (res && size) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res)); for (int i = 0; i < size; ++i) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1); } return res; } #define INIT_BACKTRACE \ COMMON_INTERCEPT_FUNCTION(backtrace); \ COMMON_INTERCEPT_FUNCTION(backtrace_symbols); #else #define INIT_BACKTRACE #endif #if SANITIZER_INTERCEPT__EXIT INTERCEPTOR(void, _exit, int status) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, _exit, status); COMMON_INTERCEPTOR_USER_CALLBACK_START(); int status1 = COMMON_INTERCEPTOR_ON_EXIT(ctx); COMMON_INTERCEPTOR_USER_CALLBACK_END(); if (status == 0) status = status1; REAL(_exit)(status); } #define INIT__EXIT COMMON_INTERCEPT_FUNCTION(_exit); #else #define INIT__EXIT #endif #if SANITIZER_INTERCEPT_PHTREAD_MUTEX INTERCEPTOR(int, pthread_mutex_lock, void *m) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m); COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m); int res = REAL(pthread_mutex_lock)(m); if (res == errno_EOWNERDEAD) COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m); if (res == 0 || res == errno_EOWNERDEAD) COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m); if (res == errno_EINVAL) COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m); return res; } INTERCEPTOR(int, pthread_mutex_unlock, void *m) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m); COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m); int res = REAL(pthread_mutex_unlock)(m); if (res == errno_EINVAL) COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m); return res; } #define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock) #define INIT_PTHREAD_MUTEX_UNLOCK \ COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock) #else #define INIT_PTHREAD_MUTEX_LOCK #define INIT_PTHREAD_MUTEX_UNLOCK #endif #if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R static void write_mntent(void *ctx, __sanitizer_mntent *mnt) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt)); if (mnt->mnt_fsname) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname, REAL(strlen)(mnt->mnt_fsname) + 1); if (mnt->mnt_dir) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir, REAL(strlen)(mnt->mnt_dir) + 1); if (mnt->mnt_type) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type, REAL(strlen)(mnt->mnt_type) + 1); if (mnt->mnt_opts) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts, REAL(strlen)(mnt->mnt_opts) + 1); } #endif #if SANITIZER_INTERCEPT_GETMNTENT INTERCEPTOR(__sanitizer_mntent *, getmntent, void *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getmntent, fp); __sanitizer_mntent *res = REAL(getmntent)(fp); if (res) write_mntent(ctx, res); return res; } #define INIT_GETMNTENT COMMON_INTERCEPT_FUNCTION(getmntent); #else #define INIT_GETMNTENT #endif #if SANITIZER_INTERCEPT_GETMNTENT_R INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp, __sanitizer_mntent *mntbuf, char *buf, int buflen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getmntent_r, fp, mntbuf, buf, buflen); __sanitizer_mntent *res = REAL(getmntent_r)(fp, mntbuf, buf, buflen); if (res) write_mntent(ctx, res); return res; } #define INIT_GETMNTENT_R COMMON_INTERCEPT_FUNCTION(getmntent_r); #else #define INIT_GETMNTENT_R #endif #if SANITIZER_INTERCEPT_STATFS INTERCEPTOR(int, statfs, char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(statfs)(path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz); return res; } INTERCEPTOR(int, fstatfs, int fd, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fstatfs)(fd, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz); return res; } #define INIT_STATFS \ COMMON_INTERCEPT_FUNCTION(statfs); \ COMMON_INTERCEPT_FUNCTION(fstatfs); #else #define INIT_STATFS #endif #if SANITIZER_INTERCEPT_STATFS64 INTERCEPTOR(int, statfs64, char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(statfs64)(path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz); return res; } INTERCEPTOR(int, fstatfs64, int fd, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fstatfs64)(fd, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz); return res; } #define INIT_STATFS64 \ COMMON_INTERCEPT_FUNCTION(statfs64); \ COMMON_INTERCEPT_FUNCTION(fstatfs64); #else #define INIT_STATFS64 #endif #if SANITIZER_INTERCEPT_STATVFS INTERCEPTOR(int, statvfs, char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(statvfs)(path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz); return res; } INTERCEPTOR(int, fstatvfs, int fd, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fstatvfs)(fd, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz); return res; } #define INIT_STATVFS \ COMMON_INTERCEPT_FUNCTION(statvfs); \ COMMON_INTERCEPT_FUNCTION(fstatvfs); #else #define INIT_STATVFS #endif #if SANITIZER_INTERCEPT_STATVFS64 INTERCEPTOR(int, statvfs64, char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(statvfs64)(path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz); return res; } INTERCEPTOR(int, fstatvfs64, int fd, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(fstatvfs64)(fd, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz); return res; } #define INIT_STATVFS64 \ COMMON_INTERCEPT_FUNCTION(statvfs64); \ COMMON_INTERCEPT_FUNCTION(fstatvfs64); #else #define INIT_STATVFS64 #endif #if SANITIZER_INTERCEPT_INITGROUPS INTERCEPTOR(int, initgroups, char *user, u32 group) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group); if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1); int res = REAL(initgroups)(user, group); return res; } #define INIT_INITGROUPS COMMON_INTERCEPT_FUNCTION(initgroups); #else #define INIT_INITGROUPS #endif #if SANITIZER_INTERCEPT_ETHER_NTOA_ATON INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr); if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr)); char *res = REAL(ether_ntoa)(addr); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); return res; } INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf); if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1); __sanitizer_ether_addr *res = REAL(ether_aton)(buf); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res)); return res; } #define INIT_ETHER_NTOA_ATON \ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \ COMMON_INTERCEPT_FUNCTION(ether_aton); #else #define INIT_ETHER_NTOA_ATON #endif #if SANITIZER_INTERCEPT_ETHER_HOST INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr); if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr)); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(ether_ntohost)(hostname, addr); if (!res && hostname) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1); return res; } INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr); if (hostname) COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(ether_hostton)(hostname, addr); if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr)); return res; } INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr, char *hostname) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname); if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(ether_line)(line, addr, hostname); if (!res) { if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr)); if (hostname) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1); } return res; } #define INIT_ETHER_HOST \ COMMON_INTERCEPT_FUNCTION(ether_ntohost); \ COMMON_INTERCEPT_FUNCTION(ether_hostton); \ COMMON_INTERCEPT_FUNCTION(ether_line); #else #define INIT_ETHER_HOST #endif #if SANITIZER_INTERCEPT_ETHER_R INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf); if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr)); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(ether_ntoa_r)(addr, buf); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); return res; } INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf, __sanitizer_ether_addr *addr) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr); if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr); if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res)); return res; } #define INIT_ETHER_R \ COMMON_INTERCEPT_FUNCTION(ether_ntoa_r); \ COMMON_INTERCEPT_FUNCTION(ether_aton_r); #else #define INIT_ETHER_R #endif #if SANITIZER_INTERCEPT_SHMCTL INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(shmctl)(shmid, cmd, buf); if (res >= 0) { unsigned sz = 0; if (cmd == shmctl_ipc_stat || cmd == shmctl_shm_stat) sz = sizeof(__sanitizer_shmid_ds); else if (cmd == shmctl_ipc_info) sz = struct_shminfo_sz; else if (cmd == shmctl_shm_info) sz = struct_shm_info_sz; if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz); } return res; } #define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION(shmctl); #else #define INIT_SHMCTL #endif #if SANITIZER_INTERCEPT_RANDOM_R INTERCEPTOR(int, random_r, void *buf, u32 *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(random_r)(buf, result); if (!res && result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } #define INIT_RANDOM_R COMMON_INTERCEPT_FUNCTION(random_r); #else #define INIT_RANDOM_R #endif // FIXME: under ASan the REAL() call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET #define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \ INTERCEPTOR(int, fn, void *attr, void *r) { \ void *ctx; \ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \ int res = REAL(fn)(attr, r); \ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \ return res; \ } #define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz) #define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz) #define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz) #define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz) #define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz) #endif #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET INTERCEPTOR_PTHREAD_ATTR_GET(detachstate, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(guardsize, sizeof(SIZE_T)) INTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz) INTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(scope, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T)) INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(pthread_attr_getstack)(attr, addr, size); if (!res) { if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr)); if (size) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, size, sizeof(*size)); } return res; } // We may need to call the real pthread_attr_getstack from the run-time // in sanitizer_common, but we don't want to include the interception headers // there. So, just define this function here. namespace __sanitizer { extern "C" { int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) { return REAL(pthread_attr_getstack)(attr, addr, size); } } // extern "C" } // namespace __sanitizer #define INIT_PTHREAD_ATTR_GET \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getguardsize); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getscope); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstacksize); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstack); #else #define INIT_PTHREAD_ATTR_GET #endif #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED INTERCEPTOR_PTHREAD_ATTR_GET(inheritsched, sizeof(int)) #define INIT_PTHREAD_ATTR_GETINHERITSCHED \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getinheritsched); #else #define INIT_PTHREAD_ATTR_GETINHERITSCHED #endif #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize, void *cpuset) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize, cpuset); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset); if (!res && cpusetsize && cpuset) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize); return res; } #define INIT_PTHREAD_ATTR_GETAFFINITY_NP \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getaffinity_np); #else #define INIT_PTHREAD_ATTR_GETAFFINITY_NP #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETPSHARED \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared); #else #define INIT_PTHREAD_MUTEXATTR_GETPSHARED #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETTYPE \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype); #else #define INIT_PTHREAD_MUTEXATTR_GETTYPE #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol); #else #define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling); #else #define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETROBUST \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust); #else #define INIT_PTHREAD_MUTEXATTR_GETROBUST #endif #if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int)) #define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np); #else #define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP #endif #if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int)) #define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared); #else #define INIT_PTHREAD_RWLOCKATTR_GETPSHARED #endif #if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int)) #define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np); #else #define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP #endif #if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int)) #define INIT_PTHREAD_CONDATTR_GETPSHARED \ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared); #else #define INIT_PTHREAD_CONDATTR_GETPSHARED #endif #if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int)) #define INIT_PTHREAD_CONDATTR_GETCLOCK \ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock); #else #define INIT_PTHREAD_CONDATTR_GETCLOCK #endif #if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android #define INIT_PTHREAD_BARRIERATTR_GETPSHARED \ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared); #else #define INIT_PTHREAD_BARRIERATTR_GETPSHARED #endif #if SANITIZER_INTERCEPT_TMPNAM INTERCEPTOR(char *, tmpnam, char *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, tmpnam, s); char *res = REAL(tmpnam)(s); if (res) { if (s) // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1); else COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); } return res; } #define INIT_TMPNAM COMMON_INTERCEPT_FUNCTION(tmpnam); #else #define INIT_TMPNAM #endif #if SANITIZER_INTERCEPT_TMPNAM_R INTERCEPTOR(char *, tmpnam_r, char *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(tmpnam_r)(s); if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1); return res; } #define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r); #else #define INIT_TMPNAM_R #endif #if SANITIZER_INTERCEPT_TTYNAME_R INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize); int res = REAL(ttyname_r)(fd, name, namesize); if (res == 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1); return res; } #define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r); #else #define INIT_TTYNAME_R #endif #if SANITIZER_INTERCEPT_TEMPNAM INTERCEPTOR(char *, tempnam, char *dir, char *pfx) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx); if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1); if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1); char *res = REAL(tempnam)(dir, pfx); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); return res; } #define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam); #else #define INIT_TEMPNAM #endif #if SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name); COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0); COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name); return REAL(pthread_setname_np)(thread, name); } #define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np); #else #define INIT_PTHREAD_SETNAME_NP #endif #if SANITIZER_INTERCEPT_SINCOS INTERCEPTOR(void, sincos, double x, double *sin, double *cos) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. REAL(sincos)(x, sin, cos); if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin)); if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos)); } INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. REAL(sincosf)(x, sin, cos); if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin)); if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos)); } INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. REAL(sincosl)(x, sin, cos); if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin)); if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos)); } #define INIT_SINCOS \ COMMON_INTERCEPT_FUNCTION(sincos); \ COMMON_INTERCEPT_FUNCTION(sincosf); \ COMMON_INTERCEPT_FUNCTION_LDBL(sincosl); #else #define INIT_SINCOS #endif #if SANITIZER_INTERCEPT_REMQUO INTERCEPTOR(double, remquo, double x, double y, int *quo) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. double res = REAL(remquo)(x, y, quo); if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo)); return res; } INTERCEPTOR(float, remquof, float x, float y, int *quo) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. float res = REAL(remquof)(x, y, quo); if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo)); return res; } INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. long double res = REAL(remquol)(x, y, quo); if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo)); return res; } #define INIT_REMQUO \ COMMON_INTERCEPT_FUNCTION(remquo); \ COMMON_INTERCEPT_FUNCTION(remquof); \ COMMON_INTERCEPT_FUNCTION_LDBL(remquol); #else #define INIT_REMQUO #endif #if SANITIZER_INTERCEPT_LGAMMA extern int signgam; INTERCEPTOR(double, lgamma, double x) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgamma, x); double res = REAL(lgamma)(x); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam)); return res; } INTERCEPTOR(float, lgammaf, float x) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgammaf, x); float res = REAL(lgammaf)(x); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam)); return res; } INTERCEPTOR(long double, lgammal, long double x) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgammal, x); long double res = REAL(lgammal)(x); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam)); return res; } #define INIT_LGAMMA \ COMMON_INTERCEPT_FUNCTION(lgamma); \ COMMON_INTERCEPT_FUNCTION(lgammaf); \ COMMON_INTERCEPT_FUNCTION_LDBL(lgammal); #else #define INIT_LGAMMA #endif #if SANITIZER_INTERCEPT_LGAMMA_R INTERCEPTOR(double, lgamma_r, double x, int *signp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. double res = REAL(lgamma_r)(x, signp); if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp)); return res; } INTERCEPTOR(float, lgammaf_r, float x, int *signp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. float res = REAL(lgammaf_r)(x, signp); if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp)); return res; } #define INIT_LGAMMA_R \ COMMON_INTERCEPT_FUNCTION(lgamma_r); \ COMMON_INTERCEPT_FUNCTION(lgammaf_r); #else #define INIT_LGAMMA_R #endif #if SANITIZER_INTERCEPT_LGAMMAL_R INTERCEPTOR(long double, lgammal_r, long double x, int *signp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. long double res = REAL(lgammal_r)(x, signp); if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp)); return res; } #define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION_LDBL(lgammal_r); #else #define INIT_LGAMMAL_R #endif #if SANITIZER_INTERCEPT_DRAND48_R INTERCEPTOR(int, drand48_r, void *buffer, double *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(drand48_r)(buffer, result); if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } INTERCEPTOR(int, lrand48_r, void *buffer, long *result) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(lrand48_r)(buffer, result); if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); return res; } #define INIT_DRAND48_R \ COMMON_INTERCEPT_FUNCTION(drand48_r); \ COMMON_INTERCEPT_FUNCTION(lrand48_r); #else #define INIT_DRAND48_R #endif #if SANITIZER_INTERCEPT_RAND_R INTERCEPTOR(int, rand_r, unsigned *seedp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp); COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp)); return REAL(rand_r)(seedp); } #define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r); #else #define INIT_RAND_R #endif #if SANITIZER_INTERCEPT_GETLINE INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(getline)(lineptr, n, stream); if (res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); } return res; } // FIXME: under ASan the call below may write to freed memory and corrupt its // metadata. See // https://github.com/google/sanitizers/issues/321. #define GETDELIM_INTERCEPTOR_IMPL(vname) \ { \ void *ctx; \ COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream); \ SSIZE_T res = REAL(vname)(lineptr, n, delim, stream); \ if (res > 0) { \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); \ } \ return res; \ } INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim, void *stream) GETDELIM_INTERCEPTOR_IMPL(__getdelim) // There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor // with its own body. INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim, void *stream) GETDELIM_INTERCEPTOR_IMPL(getdelim) #define INIT_GETLINE \ COMMON_INTERCEPT_FUNCTION(getline); \ COMMON_INTERCEPT_FUNCTION(__getdelim); \ COMMON_INTERCEPT_FUNCTION(getdelim); #else #define INIT_GETLINE #endif #if SANITIZER_INTERCEPT_ICONV INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft, char **outbuf, SIZE_T *outbytesleft) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, iconv, cd, inbuf, inbytesleft, outbuf, outbytesleft); if (inbytesleft) COMMON_INTERCEPTOR_READ_RANGE(ctx, inbytesleft, sizeof(*inbytesleft)); if (inbuf && inbytesleft) COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft); if (outbytesleft) COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft)); void *outbuf_orig = outbuf ? *outbuf : nullptr; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft); if (outbuf && *outbuf > outbuf_orig) { SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz); } return res; } #define INIT_ICONV COMMON_INTERCEPT_FUNCTION(iconv); #else #define INIT_ICONV #endif #if SANITIZER_INTERCEPT_TIMES INTERCEPTOR(__sanitizer_clock_t, times, void *tms) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, times, tms); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_clock_t res = REAL(times)(tms); if (res != (__sanitizer_clock_t)-1 && tms) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz); return res; } #define INIT_TIMES COMMON_INTERCEPT_FUNCTION(times); #else #define INIT_TIMES #endif #if SANITIZER_INTERCEPT_TLS_GET_ADDR #if !SANITIZER_S390 #define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr) // If you see any crashes around this functions, there are 2 known issues with // it: 1. __tls_get_addr can be called with mis-aligned stack due to: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 // 2. It can be called recursively if sanitizer code uses __tls_get_addr // to access thread local variables (it should not happen normally, // because sanitizers use initial-exec tls model). INTERCEPTOR(void *, __tls_get_addr, void *arg) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg); void *res = REAL(__tls_get_addr)(arg); uptr tls_begin, tls_end; COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end); DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end); if (dtv) { // New DTLS block has been allocated. COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size); } return res; } #if SANITIZER_PPC // On PowerPC, we also need to intercept __tls_get_addr_opt, which has // mostly the same semantics as __tls_get_addr, but its presence enables // some optimizations in linker (which are safe to ignore here). extern "C" __attribute__((alias("__interceptor___tls_get_addr"), visibility("default"))) void *__tls_get_addr_opt(void *arg); #endif #else // SANITIZER_S390 // On s390, we have to intercept two functions here: // - __tls_get_addr_internal, which is a glibc-internal function that is like // the usual __tls_get_addr, but returns a TP-relative offset instead of // a proper pointer. It is used by dlsym for TLS symbols. // - __tls_get_offset, which is like the above, but also takes a GOT-relative // descriptor offset as an argument instead of a pointer. GOT address // is passed in r12, so it's necessary to write it in assembly. This is // the function used by the compiler. extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg)); #define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset) DEFINE_REAL(uptr, __tls_get_offset, void *arg) extern "C" uptr __tls_get_offset(void *arg); extern "C" uptr __interceptor___tls_get_offset(void *arg); INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg); uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset)); uptr tp = reinterpret_cast(__builtin_thread_pointer()); void *ptr = reinterpret_cast(res + tp); uptr tls_begin, tls_end; COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end); DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, ptr, tls_begin, tls_end); if (dtv) { // New DTLS block has been allocated. COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size); } return res; } // We need a hidden symbol aliasing the above, so that we can jump // directly to it from the assembly below. extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"), visibility("hidden"))) uptr __tls_get_addr_hidden(void *arg); // Now carefully intercept __tls_get_offset. asm( ".text\n" // The __intercept_ version has to exist, so that gen_dynamic_list.py // exports our symbol. ".weak __tls_get_offset\n" ".type __tls_get_offset, @function\n" "__tls_get_offset:\n" ".global __interceptor___tls_get_offset\n" ".type __interceptor___tls_get_offset, @function\n" "__interceptor___tls_get_offset:\n" #ifdef __s390x__ "la %r2, 0(%r2,%r12)\n" "jg __tls_get_addr_hidden\n" #else "basr %r3,0\n" "0: la %r2,0(%r2,%r12)\n" "l %r4,1f-0b(%r3)\n" "b 0(%r4,%r3)\n" "1: .long __tls_get_addr_hidden - 0b\n" #endif ".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\n" // Assembly wrapper to call REAL(__tls_get_offset)(arg) ".type __tls_get_offset_wrapper, @function\n" "__tls_get_offset_wrapper:\n" #ifdef __s390x__ "sgr %r2,%r12\n" #else "sr %r2,%r12\n" #endif "br %r3\n" ".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n" ); #endif // SANITIZER_S390 #else #define INIT_TLS_GET_ADDR #endif #if SANITIZER_INTERCEPT_LISTXATTR INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(listxattr)(path, list, size); // Here and below, size == 0 is a special case where nothing is written to the // buffer, and res contains the desired buffer size. if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res); return res; } INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(llistxattr)(path, list, size); if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res); return res; } INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(flistxattr)(fd, list, size); if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res); return res; } #define INIT_LISTXATTR \ COMMON_INTERCEPT_FUNCTION(listxattr); \ COMMON_INTERCEPT_FUNCTION(llistxattr); \ COMMON_INTERCEPT_FUNCTION(flistxattr); #else #define INIT_LISTXATTR #endif #if SANITIZER_INTERCEPT_GETXATTR INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(getxattr)(path, name, value, size); if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res); return res; } INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(lgetxattr)(path, name, value, size); if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res); return res; } INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value, SIZE_T size) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size); if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. SSIZE_T res = REAL(fgetxattr)(fd, name, value, size); if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res); return res; } #define INIT_GETXATTR \ COMMON_INTERCEPT_FUNCTION(getxattr); \ COMMON_INTERCEPT_FUNCTION(lgetxattr); \ COMMON_INTERCEPT_FUNCTION(fgetxattr); #else #define INIT_GETXATTR #endif #if SANITIZER_INTERCEPT_GETRESID INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getresuid)(ruid, euid, suid); if (res >= 0) { if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz); if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz); if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz); } return res; } INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getresgid)(rgid, egid, sgid); if (res >= 0) { if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz); if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz); if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz); } return res; } #define INIT_GETRESID \ COMMON_INTERCEPT_FUNCTION(getresuid); \ COMMON_INTERCEPT_FUNCTION(getresgid); #else #define INIT_GETRESID #endif #if SANITIZER_INTERCEPT_GETIFADDRS // As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to // intercept freeifaddrs(). If that ceases to be the case, we might need to // intercept it to poison the memory again. INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(getifaddrs)(ifap); if (res == 0 && ifap) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *)); __sanitizer_ifaddrs *p = *ifap; while (p) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs)); if (p->ifa_name) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name, REAL(strlen)(p->ifa_name) + 1); if (p->ifa_addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz); if (p->ifa_netmask) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz); // On Linux this is a union, but the other member also points to a // struct sockaddr, so the following is sufficient. if (p->ifa_dstaddr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz); // FIXME(smatveev): Unpoison p->ifa_data as well. p = p->ifa_next; } } return res; } #define INIT_GETIFADDRS \ COMMON_INTERCEPT_FUNCTION(getifaddrs); #else #define INIT_GETIFADDRS #endif #if SANITIZER_INTERCEPT_IF_INDEXTONAME INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. char *res = REAL(if_indextoname)(ifindex, ifname); if (res && ifname) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1); return res; } INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname); if (ifname) COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1); return REAL(if_nametoindex)(ifname); } #define INIT_IF_INDEXTONAME \ COMMON_INTERCEPT_FUNCTION(if_indextoname); \ COMMON_INTERCEPT_FUNCTION(if_nametoindex); #else #define INIT_IF_INDEXTONAME #endif #if SANITIZER_INTERCEPT_CAPGET INTERCEPTOR(int, capget, void *hdrp, void *datap) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap); if (hdrp) COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(capget)(hdrp, datap); if (res == 0 && datap) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz); // We can also return -1 and write to hdrp->version if the version passed in // hdrp->version is unsupported. But that's not a trivial condition to check, // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent. return res; } INTERCEPTOR(int, capset, void *hdrp, const void *datap) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap); if (hdrp) COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz); if (datap) COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz); return REAL(capset)(hdrp, datap); } #define INIT_CAPGET \ COMMON_INTERCEPT_FUNCTION(capget); \ COMMON_INTERCEPT_FUNCTION(capset); #else #define INIT_CAPGET #endif #if SANITIZER_INTERCEPT_AEABI_MEM INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size); } INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size); } INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size); } INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size); } INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size); } INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size); } // Note the argument order. INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size); } INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size); } INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size); } INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size); } INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size); } INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size); } #define INIT_AEABI_MEM \ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8); #else #define INIT_AEABI_MEM #endif // SANITIZER_INTERCEPT_AEABI_MEM #if SANITIZER_INTERCEPT___BZERO INTERCEPTOR(void *, __bzero, void *block, uptr size) { void *ctx; COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size); } #define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero); #else #define INIT___BZERO #endif // SANITIZER_INTERCEPT___BZERO #if SANITIZER_INTERCEPT_FTIME INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(ftime)(tp); if (tp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp)); return res; } #define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime); #else #define INIT_FTIME #endif // SANITIZER_INTERCEPT_FTIME #if SANITIZER_INTERCEPT_XDR INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr, unsigned size, int op) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. REAL(xdrmem_create)(xdrs, addr, size, op); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs)); if (op == __sanitizer_XDR_ENCODE) { // It's not obvious how much data individual xdr_ routines write. // Simply unpoison the entire target buffer in advance. COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size); } } INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. REAL(xdrstdio_create)(xdrs, file, op); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs)); } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. #define XDR_INTERCEPTOR(F, T) \ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \ void *ctx; \ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \ int res = REAL(F)(xdrs, p); \ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \ return res; \ } XDR_INTERCEPTOR(xdr_short, short) XDR_INTERCEPTOR(xdr_u_short, unsigned short) XDR_INTERCEPTOR(xdr_int, int) XDR_INTERCEPTOR(xdr_u_int, unsigned) XDR_INTERCEPTOR(xdr_long, long) XDR_INTERCEPTOR(xdr_u_long, unsigned long) XDR_INTERCEPTOR(xdr_hyper, long long) XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long) XDR_INTERCEPTOR(xdr_longlong_t, long long) XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long) XDR_INTERCEPTOR(xdr_int8_t, u8) XDR_INTERCEPTOR(xdr_uint8_t, u8) XDR_INTERCEPTOR(xdr_int16_t, u16) XDR_INTERCEPTOR(xdr_uint16_t, u16) XDR_INTERCEPTOR(xdr_int32_t, u32) XDR_INTERCEPTOR(xdr_uint32_t, u32) XDR_INTERCEPTOR(xdr_int64_t, u64) XDR_INTERCEPTOR(xdr_uint64_t, u64) XDR_INTERCEPTOR(xdr_quad_t, long long) XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long) XDR_INTERCEPTOR(xdr_bool, bool) XDR_INTERCEPTOR(xdr_enum, int) XDR_INTERCEPTOR(xdr_char, char) XDR_INTERCEPTOR(xdr_u_char, unsigned char) XDR_INTERCEPTOR(xdr_float, float) XDR_INTERCEPTOR(xdr_double, double) // FIXME: intercept xdr_array, opaque, union, vector, reference, pointer, // wrapstring, sizeof INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep, unsigned maxsize) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize); if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) { COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep)); COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep); } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize); if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep)); if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep); } return res; } INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p, unsigned maxsize) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize); if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) { COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1); } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. int res = REAL(xdr_string)(xdrs, p, maxsize); if (p && xdrs->x_op == __sanitizer_XDR_DECODE) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); if (res && *p) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1); } return res; } #define INIT_XDR \ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \ COMMON_INTERCEPT_FUNCTION(xdr_short); \ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \ COMMON_INTERCEPT_FUNCTION(xdr_int); \ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \ COMMON_INTERCEPT_FUNCTION(xdr_long); \ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \ COMMON_INTERCEPT_FUNCTION(xdr_bool); \ COMMON_INTERCEPT_FUNCTION(xdr_enum); \ COMMON_INTERCEPT_FUNCTION(xdr_char); \ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \ COMMON_INTERCEPT_FUNCTION(xdr_float); \ COMMON_INTERCEPT_FUNCTION(xdr_double); \ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \ COMMON_INTERCEPT_FUNCTION(xdr_string); #else #define INIT_XDR #endif // SANITIZER_INTERCEPT_XDR #if SANITIZER_INTERCEPT_TSEARCH INTERCEPTOR(void *, tsearch, void *key, void **rootp, int (*compar)(const void *, const void *)) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. void *res = REAL(tsearch)(key, rootp, compar); if (res && *(void **)res == key) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *)); return res; } #define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch); #else #define INIT_TSEARCH #endif #if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \ SANITIZER_INTERCEPT_OPEN_MEMSTREAM void unpoison_file(__sanitizer_FILE *fp) { #if SANITIZER_HAS_STRUCT_FILE COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp)); if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end) COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base, fp->_IO_read_end - fp->_IO_read_base); #endif // SANITIZER_HAS_STRUCT_FILE } #endif #if SANITIZER_INTERCEPT_LIBIO_INTERNALS // These guys are called when a .c source is built with -O2. INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp); int res = REAL(__uflow)(fp); unpoison_file(fp); return res; } INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp); int res = REAL(__underflow)(fp); unpoison_file(fp); return res; } INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch); int res = REAL(__overflow)(fp, ch); unpoison_file(fp); return res; } INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp); int res = REAL(__wuflow)(fp); unpoison_file(fp); return res; } INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp); int res = REAL(__wunderflow)(fp); unpoison_file(fp); return res; } INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch); int res = REAL(__woverflow)(fp, ch); unpoison_file(fp); return res; } #define INIT_LIBIO_INTERNALS \ COMMON_INTERCEPT_FUNCTION(__uflow); \ COMMON_INTERCEPT_FUNCTION(__underflow); \ COMMON_INTERCEPT_FUNCTION(__overflow); \ COMMON_INTERCEPT_FUNCTION(__wuflow); \ COMMON_INTERCEPT_FUNCTION(__wunderflow); \ COMMON_INTERCEPT_FUNCTION(__woverflow); #else #define INIT_LIBIO_INTERNALS #endif #if SANITIZER_INTERCEPT_FOPEN INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); __sanitizer_FILE *res = REAL(fopen)(path, mode); COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path); if (res) unpoison_file(res); return res; } INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); __sanitizer_FILE *res = REAL(fdopen)(fd, mode); if (res) unpoison_file(res); return res; } INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); __sanitizer_FILE *res = REAL(freopen)(path, mode, fp); COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path); if (res) unpoison_file(res); return res; } #define INIT_FOPEN \ COMMON_INTERCEPT_FUNCTION(fopen); \ COMMON_INTERCEPT_FUNCTION(fdopen); \ COMMON_INTERCEPT_FUNCTION(freopen); #else #define INIT_FOPEN #endif #if SANITIZER_INTERCEPT_FOPEN64 INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode); COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); __sanitizer_FILE *res = REAL(fopen64)(path, mode); COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path); if (res) unpoison_file(res); return res; } INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp); if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp); COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path); if (res) unpoison_file(res); return res; } #define INIT_FOPEN64 \ COMMON_INTERCEPT_FUNCTION(fopen64); \ COMMON_INTERCEPT_FUNCTION(freopen64); #else #define INIT_FOPEN64 #endif #if SANITIZER_INTERCEPT_OPEN_MEMSTREAM INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc); if (res) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc)); unpoison_file(res); FileMetadata file = {ptr, sizeloc}; SetInterceptorMetadata(res, file); } return res; } INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr, SIZE_T *sizeloc) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc); __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc); if (res) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc)); unpoison_file(res); FileMetadata file = {(char **)ptr, sizeloc}; SetInterceptorMetadata(res, file); } return res; } INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size, const char *mode) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode); // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode); if (res) unpoison_file(res); return res; } #define INIT_OPEN_MEMSTREAM \ COMMON_INTERCEPT_FUNCTION(open_memstream); \ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \ COMMON_INTERCEPT_FUNCTION(fmemopen); #else #define INIT_OPEN_MEMSTREAM #endif #if SANITIZER_INTERCEPT_OBSTACK static void initialize_obstack(__sanitizer_obstack *obstack) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack)); if (obstack->chunk) COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk, sizeof(*obstack->chunk)); } INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz, int align, void *(*alloc_fn)(uptr arg, uptr sz), void (*free_fn)(uptr arg, void *p)) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn, free_fn); int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn); if (res) initialize_obstack(obstack); return res; } INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz, int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn, free_fn); int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn); if (res) initialize_obstack(obstack); return res; } INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length); REAL(_obstack_newchunk)(obstack, length); if (obstack->chunk) COMMON_INTERCEPTOR_INITIALIZE_RANGE( obstack->chunk, obstack->next_free - (char *)obstack->chunk); } #define INIT_OBSTACK \ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk); #else #define INIT_OBSTACK #endif #if SANITIZER_INTERCEPT_FFLUSH INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp); int res = REAL(fflush)(fp); // FIXME: handle fp == NULL if (fp) { const FileMetadata *m = GetInterceptorMetadata(fp); if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size); } return res; } #define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush); #else #define INIT_FFLUSH #endif #if SANITIZER_INTERCEPT_FCLOSE INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); const FileMetadata *m = GetInterceptorMetadata(fp); int res = REAL(fclose)(fp); if (m) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size); DeleteInterceptorMetadata(fp); } return res; } #define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose); #else #define INIT_FCLOSE #endif #if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE INTERCEPTOR(void*, dlopen, const char *filename, int flag) { void *ctx; COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag); if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0); COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag); void *res = REAL(dlopen)(filename, flag); COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res); return res; } INTERCEPTOR(int, dlclose, void *handle) { void *ctx; COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle); int res = REAL(dlclose)(handle); COMMON_INTERCEPTOR_LIBRARY_UNLOADED(); return res; } #define INIT_DLOPEN_DLCLOSE \ COMMON_INTERCEPT_FUNCTION(dlopen); \ COMMON_INTERCEPT_FUNCTION(dlclose); #else #define INIT_DLOPEN_DLCLOSE #endif #if SANITIZER_INTERCEPT_GETPASS INTERCEPTOR(char *, getpass, const char *prompt) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt); if (prompt) COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1); char *res = REAL(getpass)(prompt); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1); return res; } #define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass); #else #define INIT_GETPASS #endif #if SANITIZER_INTERCEPT_TIMERFD INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value, void *old_value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value, old_value); COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz); int res = REAL(timerfd_settime)(fd, flags, new_value, old_value); if (res != -1 && old_value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz); return res; } INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value); int res = REAL(timerfd_gettime)(fd, curr_value); if (res != -1 && curr_value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz); return res; } #define INIT_TIMERFD \ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \ COMMON_INTERCEPT_FUNCTION(timerfd_gettime); #else #define INIT_TIMERFD #endif #if SANITIZER_INTERCEPT_MLOCKX // Linux kernel has a bug that leads to kernel deadlock if a process // maps TBs of memory and then calls mlock(). static void MlockIsUnsupported() { static atomic_uint8_t printed; if (atomic_exchange(&printed, 1, memory_order_relaxed)) return; VPrintf(1, "%s ignores mlock/mlockall/munlock/munlockall\n", SanitizerToolName); } INTERCEPTOR(int, mlock, const void *addr, uptr len) { MlockIsUnsupported(); return 0; } INTERCEPTOR(int, munlock, const void *addr, uptr len) { MlockIsUnsupported(); return 0; } INTERCEPTOR(int, mlockall, int flags) { MlockIsUnsupported(); return 0; } INTERCEPTOR(int, munlockall, void) { MlockIsUnsupported(); return 0; } #define INIT_MLOCKX \ COMMON_INTERCEPT_FUNCTION(mlock); \ COMMON_INTERCEPT_FUNCTION(munlock); \ COMMON_INTERCEPT_FUNCTION(mlockall); \ COMMON_INTERCEPT_FUNCTION(munlockall); #else #define INIT_MLOCKX #endif // SANITIZER_INTERCEPT_MLOCKX #if SANITIZER_INTERCEPT_FOPENCOOKIE struct WrappedCookie { void *real_cookie; __sanitizer_cookie_io_functions_t real_io_funcs; }; static uptr wrapped_read(void *cookie, char *buf, uptr size) { COMMON_INTERCEPTOR_UNPOISON_PARAM(3); WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read; return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0; } static uptr wrapped_write(void *cookie, const char *buf, uptr size) { COMMON_INTERCEPTOR_UNPOISON_PARAM(3); WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write; return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size; } static int wrapped_seek(void *cookie, u64 *offset, int whence) { COMMON_INTERCEPTOR_UNPOISON_PARAM(3); COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset)); WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek; return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence) : -1; } static int wrapped_close(void *cookie) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close; int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0; InternalFree(wrapped_cookie); return res; } INTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode, __sanitizer_cookie_io_functions_t io_funcs) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs); WrappedCookie *wrapped_cookie = (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie)); wrapped_cookie->real_cookie = cookie; wrapped_cookie->real_io_funcs = io_funcs; __sanitizer_FILE *res = REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write, wrapped_seek, wrapped_close}); return res; } #define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie); #else #define INIT_FOPENCOOKIE #endif // SANITIZER_INTERCEPT_FOPENCOOKIE #if SANITIZER_INTERCEPT_SEM INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value); // Workaround a bug in glibc's "old" semaphore implementation by // zero-initializing the sem_t contents. This has to be done here because // interceptors bind to the lowest symbols version by default, hitting the // buggy code path while the non-sanitized build of the same code works fine. REAL(memset)(s, 0, sizeof(*s)); int res = REAL(sem_init)(s, pshared, value); return res; } INTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s); int res = REAL(sem_destroy)(s); return res; } INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s); if (res == 0) { COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); } return res; } INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s); if (res == 0) { COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); } return res; } INTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime); COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime); if (res == 0) { COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); } return res; } INTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s); COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s); int res = REAL(sem_post)(s); return res; } INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval); int res = REAL(sem_getvalue)(s, sval); if (res == 0) { COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval)); } return res; } #define INIT_SEM \ COMMON_INTERCEPT_FUNCTION(sem_init); \ COMMON_INTERCEPT_FUNCTION(sem_destroy); \ COMMON_INTERCEPT_FUNCTION(sem_wait); \ COMMON_INTERCEPT_FUNCTION(sem_trywait); \ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \ COMMON_INTERCEPT_FUNCTION(sem_post); \ COMMON_INTERCEPT_FUNCTION(sem_getvalue); #else #define INIT_SEM #endif // SANITIZER_INTERCEPT_SEM #if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate); int res = REAL(pthread_setcancelstate)(state, oldstate); if (res == 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate)); return res; } INTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype); int res = REAL(pthread_setcanceltype)(type, oldtype); if (res == 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype)); return res; } #define INIT_PTHREAD_SETCANCEL \ COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate); \ COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype); #else #define INIT_PTHREAD_SETCANCEL #endif #if SANITIZER_INTERCEPT_MINCORE INTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec); int res = REAL(mincore)(addr, length, vec); if (res == 0) { uptr page_size = GetPageSizeCached(); uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size); } return res; } #define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore); #else #define INIT_MINCORE #endif #if SANITIZER_INTERCEPT_PROCESS_VM_READV INTERCEPTOR(SSIZE_T, process_vm_readv, int pid, __sanitizer_iovec *local_iov, uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt, uptr flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, process_vm_readv, pid, local_iov, liovcnt, remote_iov, riovcnt, flags); SSIZE_T res = REAL(process_vm_readv)(pid, local_iov, liovcnt, remote_iov, riovcnt, flags); if (res > 0) write_iovec(ctx, local_iov, liovcnt, res); return res; } INTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov, uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt, uptr flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, process_vm_writev, pid, local_iov, liovcnt, remote_iov, riovcnt, flags); SSIZE_T res = REAL(process_vm_writev)(pid, local_iov, liovcnt, remote_iov, riovcnt, flags); if (res > 0) read_iovec(ctx, local_iov, liovcnt, res); return res; } #define INIT_PROCESS_VM_READV \ COMMON_INTERCEPT_FUNCTION(process_vm_readv); \ COMMON_INTERCEPT_FUNCTION(process_vm_writev); #else #define INIT_PROCESS_VM_READV #endif #if SANITIZER_INTERCEPT_CTERMID INTERCEPTOR(char *, ctermid, char *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s); char *res = REAL(ctermid)(s); if (res) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); } return res; } #define INIT_CTERMID COMMON_INTERCEPT_FUNCTION(ctermid); #else #define INIT_CTERMID #endif #if SANITIZER_INTERCEPT_CTERMID_R INTERCEPTOR(char *, ctermid_r, char *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s); char *res = REAL(ctermid_r)(s); if (res) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1); } return res; } #define INIT_CTERMID_R COMMON_INTERCEPT_FUNCTION(ctermid_r); #else #define INIT_CTERMID_R #endif #if SANITIZER_INTERCEPT_RECV_RECVFROM INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); SSIZE_T res = REAL(recv)(fd, buf, len, flags); if (res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len)); } if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags, void *srcaddr, int *addrlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, recvfrom, fd, buf, len, flags, srcaddr, addrlen); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); SIZE_T srcaddr_sz; if (srcaddr) srcaddr_sz = *addrlen; (void)srcaddr_sz; // prevent "set but not used" warning SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen); if (res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len)); if (srcaddr) COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr, Min((SIZE_T)*addrlen, srcaddr_sz)); } return res; } #define INIT_RECV_RECVFROM \ COMMON_INTERCEPT_FUNCTION(recv); \ COMMON_INTERCEPT_FUNCTION(recvfrom); #else #define INIT_RECV_RECVFROM #endif #if SANITIZER_INTERCEPT_SEND_SENDTO INTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, send, fd, buf, len, flags); if (fd >= 0) { COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); } SSIZE_T res = REAL(send)(fd, buf, len, flags); if (common_flags()->intercept_send && res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len)); return res; } INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags, void *dstaddr, int addrlen) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, sendto, fd, buf, len, flags, dstaddr, addrlen); if (fd >= 0) { COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); } // Can't check dstaddr as it may have uninitialized padding at the end. SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen); if (common_flags()->intercept_send && res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len)); return res; } #define INIT_SEND_SENDTO \ COMMON_INTERCEPT_FUNCTION(send); \ COMMON_INTERCEPT_FUNCTION(sendto); #else #define INIT_SEND_SENDTO #endif #if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE INTERCEPTOR(int, eventfd_read, int fd, u64 *value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value); COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); int res = REAL(eventfd_read)(fd, value); if (res == 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value)); if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); } return res; } INTERCEPTOR(int, eventfd_write, int fd, u64 value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value); if (fd >= 0) { COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); } int res = REAL(eventfd_write)(fd, value); return res; } #define INIT_EVENTFD_READ_WRITE \ COMMON_INTERCEPT_FUNCTION(eventfd_read); \ COMMON_INTERCEPT_FUNCTION(eventfd_write) #else #define INIT_EVENTFD_READ_WRITE #endif #if SANITIZER_INTERCEPT_STAT INTERCEPTOR(int, stat, const char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, stat, path, buf); if (common_flags()->intercept_stat) COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); int res = REAL(stat)(path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz); return res; } #define INIT_STAT COMMON_INTERCEPT_FUNCTION(stat) #else #define INIT_STAT #endif #if SANITIZER_INTERCEPT___XSTAT INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __xstat, version, path, buf); if (common_flags()->intercept_stat) COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); int res = REAL(__xstat)(version, path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz); return res; } #define INIT___XSTAT COMMON_INTERCEPT_FUNCTION(__xstat) #else #define INIT___XSTAT #endif #if SANITIZER_INTERCEPT___XSTAT64 INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __xstat64, version, path, buf); if (common_flags()->intercept_stat) COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); int res = REAL(__xstat64)(version, path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz); return res; } #define INIT___XSTAT64 COMMON_INTERCEPT_FUNCTION(__xstat64) #else #define INIT___XSTAT64 #endif #if SANITIZER_INTERCEPT___LXSTAT INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __lxstat, version, path, buf); if (common_flags()->intercept_stat) COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); int res = REAL(__lxstat)(version, path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz); return res; } #define INIT___LXSTAT COMMON_INTERCEPT_FUNCTION(__lxstat) #else #define INIT___LXSTAT #endif #if SANITIZER_INTERCEPT___LXSTAT64 INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __lxstat64, version, path, buf); if (common_flags()->intercept_stat) COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); int res = REAL(__lxstat64)(version, path, buf); if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz); return res; } #define INIT___LXSTAT64 COMMON_INTERCEPT_FUNCTION(__lxstat64) #else #define INIT___LXSTAT64 #endif // FIXME: add other *stat interceptor #if SANITIZER_INTERCEPT_UTMP INTERCEPTOR(void *, getutent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy); void *res = REAL(getutent)(dummy); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz); return res; } INTERCEPTOR(void *, getutid, void *ut) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut); void *res = REAL(getutid)(ut); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz); return res; } INTERCEPTOR(void *, getutline, void *ut) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut); void *res = REAL(getutline)(ut); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz); return res; } #define INIT_UTMP \ COMMON_INTERCEPT_FUNCTION(getutent); \ COMMON_INTERCEPT_FUNCTION(getutid); \ COMMON_INTERCEPT_FUNCTION(getutline); #else #define INIT_UTMP #endif #if SANITIZER_INTERCEPT_UTMPX INTERCEPTOR(void *, getutxent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy); void *res = REAL(getutxent)(dummy); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz); return res; } INTERCEPTOR(void *, getutxid, void *ut) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut); void *res = REAL(getutxid)(ut); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz); return res; } INTERCEPTOR(void *, getutxline, void *ut) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut); void *res = REAL(getutxline)(ut); if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz); return res; } #define INIT_UTMPX \ COMMON_INTERCEPT_FUNCTION(getutxent); \ COMMON_INTERCEPT_FUNCTION(getutxid); \ COMMON_INTERCEPT_FUNCTION(getutxline); #else #define INIT_UTMPX #endif #if SANITIZER_INTERCEPT_GETLOADAVG INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem); int res = REAL(getloadavg)(loadavg, nelem); if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg)); return res; } #define INIT_GETLOADAVG \ COMMON_INTERCEPT_FUNCTION(getloadavg); #else #define INIT_GETLOADAVG #endif #if SANITIZER_INTERCEPT_MCHECK_MPROBE INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) { return 0; } INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) { return 0; } INTERCEPTOR(int, mprobe, void *ptr) { return 0; } #endif INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s); SIZE_T res = REAL(wcslen)(s); COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1)); return res; } INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n); SIZE_T res = REAL(wcsnlen)(s, n); COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n)); return res; } #define INIT_WCSLEN \ COMMON_INTERCEPT_FUNCTION(wcslen); \ COMMON_INTERCEPT_FUNCTION(wcsnlen); #if SANITIZER_INTERCEPT_WCSCAT INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src); SIZE_T src_size = REAL(wcslen)(src); SIZE_T dst_size = REAL(wcslen)(dst); COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t)); COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, (src_size + 1) * sizeof(wchar_t)); return REAL(wcscat)(dst, src); // NOLINT } INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n); SIZE_T src_size = REAL(wcsnlen)(src, n); SIZE_T dst_size = REAL(wcslen)(dst); COMMON_INTERCEPTOR_READ_RANGE(ctx, src, Min(src_size + 1, n) * sizeof(wchar_t)); COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, (src_size + 1) * sizeof(wchar_t)); return REAL(wcsncat)(dst, src, n); // NOLINT } #define INIT_WCSCAT \ COMMON_INTERCEPT_FUNCTION(wcscat); \ COMMON_INTERCEPT_FUNCTION(wcsncat); #else #define INIT_WCSCAT #endif static void InitializeCommonInterceptors() { static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1]; interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap(); INIT_TEXTDOMAIN; INIT_STRLEN; INIT_STRNLEN; INIT_STRNDUP; INIT___STRNDUP; INIT_STRCMP; INIT_STRNCMP; INIT_STRCASECMP; INIT_STRNCASECMP; INIT_STRSTR; INIT_STRCASESTR; INIT_STRCHR; INIT_STRCHRNUL; INIT_STRRCHR; INIT_STRSPN; INIT_STRTOK; INIT_STRPBRK; INIT_MEMSET; INIT_MEMMOVE; INIT_MEMCPY; INIT_MEMCHR; INIT_MEMCMP; INIT_MEMRCHR; INIT_MEMMEM; INIT_READ; INIT_FREAD; INIT_PREAD; INIT_PREAD64; INIT_READV; INIT_PREADV; INIT_PREADV64; INIT_WRITE; INIT_FWRITE; INIT_PWRITE; INIT_PWRITE64; INIT_WRITEV; INIT_PWRITEV; INIT_PWRITEV64; INIT_PRCTL; INIT_LOCALTIME_AND_FRIENDS; INIT_STRPTIME; INIT_SCANF; INIT_ISOC99_SCANF; INIT_PRINTF; INIT_PRINTF_L; INIT_ISOC99_PRINTF; INIT_FREXP; INIT_FREXPF_FREXPL; INIT_GETPWNAM_AND_FRIENDS; INIT_GETPWNAM_R_AND_FRIENDS; INIT_GETPWENT; INIT_FGETPWENT; INIT_GETPWENT_R; INIT_SETPWENT; INIT_CLOCK_GETTIME; INIT_GETITIMER; INIT_TIME; INIT_GLOB; INIT_WAIT; INIT_WAIT4; INIT_INET; INIT_PTHREAD_GETSCHEDPARAM; INIT_GETADDRINFO; INIT_GETNAMEINFO; INIT_GETSOCKNAME; INIT_GETHOSTBYNAME; INIT_GETHOSTBYNAME_R; INIT_GETHOSTBYNAME2_R; INIT_GETHOSTBYADDR_R; INIT_GETHOSTENT_R; INIT_GETSOCKOPT; INIT_ACCEPT; INIT_ACCEPT4; INIT_MODF; INIT_RECVMSG; INIT_SENDMSG; INIT_GETPEERNAME; INIT_IOCTL; INIT_INET_ATON; INIT_SYSINFO; INIT_READDIR; INIT_READDIR64; INIT_PTRACE; INIT_SETLOCALE; INIT_GETCWD; INIT_GET_CURRENT_DIR_NAME; INIT_STRTOIMAX; INIT_MBSTOWCS; INIT_MBSNRTOWCS; INIT_WCSTOMBS; INIT_WCSNRTOMBS; INIT_WCRTOMB; INIT_TCGETATTR; INIT_REALPATH; INIT_CANONICALIZE_FILE_NAME; INIT_CONFSTR; INIT_SCHED_GETAFFINITY; INIT_SCHED_GETPARAM; INIT_STRERROR; INIT_STRERROR_R; INIT_XPG_STRERROR_R; INIT_SCANDIR; INIT_SCANDIR64; INIT_GETGROUPS; INIT_POLL; INIT_PPOLL; INIT_WORDEXP; INIT_SIGWAIT; INIT_SIGWAITINFO; INIT_SIGTIMEDWAIT; INIT_SIGSETOPS; INIT_SIGPENDING; INIT_SIGPROCMASK; INIT_BACKTRACE; INIT__EXIT; INIT_PTHREAD_MUTEX_LOCK; INIT_PTHREAD_MUTEX_UNLOCK; INIT_GETMNTENT; INIT_GETMNTENT_R; INIT_STATFS; INIT_STATFS64; INIT_STATVFS; INIT_STATVFS64; INIT_INITGROUPS; INIT_ETHER_NTOA_ATON; INIT_ETHER_HOST; INIT_ETHER_R; INIT_SHMCTL; INIT_RANDOM_R; INIT_PTHREAD_ATTR_GET; INIT_PTHREAD_ATTR_GETINHERITSCHED; INIT_PTHREAD_ATTR_GETAFFINITY_NP; INIT_PTHREAD_MUTEXATTR_GETPSHARED; INIT_PTHREAD_MUTEXATTR_GETTYPE; INIT_PTHREAD_MUTEXATTR_GETPROTOCOL; INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING; INIT_PTHREAD_MUTEXATTR_GETROBUST; INIT_PTHREAD_MUTEXATTR_GETROBUST_NP; INIT_PTHREAD_RWLOCKATTR_GETPSHARED; INIT_PTHREAD_RWLOCKATTR_GETKIND_NP; INIT_PTHREAD_CONDATTR_GETPSHARED; INIT_PTHREAD_CONDATTR_GETCLOCK; INIT_PTHREAD_BARRIERATTR_GETPSHARED; INIT_TMPNAM; INIT_TMPNAM_R; INIT_TTYNAME_R; INIT_TEMPNAM; INIT_PTHREAD_SETNAME_NP; INIT_SINCOS; INIT_REMQUO; INIT_LGAMMA; INIT_LGAMMA_R; INIT_LGAMMAL_R; INIT_DRAND48_R; INIT_RAND_R; INIT_GETLINE; INIT_ICONV; INIT_TIMES; INIT_TLS_GET_ADDR; INIT_LISTXATTR; INIT_GETXATTR; INIT_GETRESID; INIT_GETIFADDRS; INIT_IF_INDEXTONAME; INIT_CAPGET; INIT_AEABI_MEM; INIT___BZERO; INIT_FTIME; INIT_XDR; INIT_TSEARCH; INIT_LIBIO_INTERNALS; INIT_FOPEN; INIT_FOPEN64; INIT_OPEN_MEMSTREAM; INIT_OBSTACK; INIT_FFLUSH; INIT_FCLOSE; INIT_DLOPEN_DLCLOSE; INIT_GETPASS; INIT_TIMERFD; INIT_MLOCKX; INIT_FOPENCOOKIE; INIT_SEM; INIT_PTHREAD_SETCANCEL; INIT_MINCORE; INIT_PROCESS_VM_READV; INIT_CTERMID; INIT_CTERMID_R; INIT_RECV_RECVFROM; INIT_SEND_SENDTO; INIT_STAT; INIT_EVENTFD_READ_WRITE; INIT___XSTAT; INIT___XSTAT64; INIT___LXSTAT; INIT___LXSTAT64; // FIXME: add other *stat interceptors. INIT_UTMP; INIT_UTMPX; INIT_GETLOADAVG; INIT_WCSLEN; INIT_WCSCAT; } Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.cc (nonexistent) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.cc (revision 320961) @@ -0,0 +1,35 @@ +//===-- sanitizer_errno.cc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizers run-time libraries. +// +// Defines errno to avoid including errno.h and its dependencies into other +// files (e.g. interceptors are not supposed to include any system headers). +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_errno_codes.h" +#include "sanitizer_internal_defs.h" + +#include + +namespace __sanitizer { + +COMPILER_CHECK(errno_ENOMEM == ENOMEM); +COMPILER_CHECK(errno_EBUSY == EBUSY); +COMPILER_CHECK(errno_EINVAL == EINVAL); + +// EOWNERDEAD is not present in some older platforms. +#if defined(EOWNERDEAD) +extern const int errno_EOWNERDEAD = EOWNERDEAD; +#else +extern const int errno_EOWNERDEAD = -1; +#endif + +} // namespace __sanitizer Property changes on: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.h (nonexistent) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.h (revision 320961) @@ -0,0 +1,35 @@ +//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizers run-time libraries. +// +// Defines errno to avoid including errno.h and its dependencies into sensitive +// files (e.g. interceptors are not supposed to include any system headers). +// It's ok to use errno.h directly when your file already depend on other system +// includes though. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ERRNO_H +#define SANITIZER_ERRNO_H + +#include "sanitizer_errno_codes.h" +#include "sanitizer_platform.h" + +#if SANITIZER_FREEBSD || SANITIZER_MAC +# define __errno_location __error +#elif SANITIZER_ANDROID +# define __errno_location __errno +#endif + +extern "C" int *__errno_location(); + +#define errno (*__errno_location()) + +#endif // SANITIZER_ERRNO_H Property changes on: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno_codes.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno_codes.h (nonexistent) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno_codes.h (revision 320961) @@ -0,0 +1,34 @@ +//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizers run-time libraries. +// +// Defines errno codes to avoid including errno.h and its dependencies into +// sensitive files (e.g. interceptors are not supposed to include any system +// headers). +// It's ok to use errno.h directly when your file already depend on other system +// includes though. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ERRNO_CODES_H +#define SANITIZER_ERRNO_CODES_H + +namespace __sanitizer { + +#define errno_ENOMEM 12 +#define errno_EBUSY 16 +#define errno_EINVAL 22 + +// Those might not present or their value differ on different platforms. +extern const int errno_EOWNERDEAD; + +} // namespace __sanitizer + +#endif // SANITIZER_ERRNO_CODES_H Property changes on: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_errno_codes.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.cc (revision 320961) @@ -1,1635 +1,1708 @@ //===-- sanitizer_linux.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements linux-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_linux.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" #if !SANITIZER_FREEBSD #include #endif // For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat' // format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To // access stat from asm/stat.h, without conflicting with definition in // sys/stat.h, we use this trick. #if defined(__mips64) #include #include #define stat kernel_stat #include #undef stat #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#if SANITIZER_LINUX +#include +#endif + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +#include +#endif + #if SANITIZER_FREEBSD #include #include #include extern "C" { // must be included after and on // FreeBSD 9.2 and 10.0. #include } extern char **environ; // provided by crt1 #endif // SANITIZER_FREEBSD #if !SANITIZER_ANDROID #include #endif #ifndef __GLIBC_PREREQ #define __GLIBC_PREREQ(x, y) 0 #endif #if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16) # define SANITIZER_USE_GETAUXVAL 1 #else # define SANITIZER_USE_GETAUXVAL 0 #endif #if SANITIZER_USE_GETAUXVAL #include #endif #if SANITIZER_LINUX // struct kernel_timeval { long tv_sec; long tv_usec; }; // is broken on some linux distributions. const int FUTEX_WAIT = 0; const int FUTEX_WAKE = 1; #endif // SANITIZER_LINUX // Are we using 32-bit or 64-bit Linux syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // but it still needs to use 64-bit syscalls. #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ SANITIZER_WORDSIZE == 64) # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1 #else # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 #endif #if defined(__x86_64__) || SANITIZER_MIPS64 extern "C" { extern void internal_sigreturn(); } #endif namespace __sanitizer { #if SANITIZER_LINUX && defined(__x86_64__) #include "sanitizer_syscall_linux_x86_64.inc" #elif SANITIZER_LINUX && defined(__aarch64__) #include "sanitizer_syscall_linux_aarch64.inc" #else #include "sanitizer_syscall_generic.inc" #endif // --------------- sanitizer_libc.h #if !SANITIZER_S390 uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, OFF_T offset) { #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, offset); #else // mmap2 specifies file offset in 4096-byte units. CHECK(IsAligned(offset, 4096)); return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd, offset / 4096); #endif } #endif // !SANITIZER_S390 uptr internal_munmap(void *addr, uptr length) { return internal_syscall(SYSCALL(munmap), (uptr)addr, length); } int internal_mprotect(void *addr, uptr length, int prot) { return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot); } uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); } uptr internal_open(const char *filename, int flags) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags); #else return internal_syscall(SYSCALL(open), (uptr)filename, flags); #endif } uptr internal_open(const char *filename, int flags, u32 mode) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags, mode); #else return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode); #endif } uptr internal_read(fd_t fd, void *buf, uptr count) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, count)); return res; } uptr internal_write(fd_t fd, const void *buf, uptr count) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf, count)); return res; } uptr internal_ftruncate(fd_t fd, uptr size) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, (OFF_T)size)); return res; } #if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD static void stat64_to_stat(struct stat64 *in, struct stat *out) { internal_memset(out, 0, sizeof(*out)); out->st_dev = in->st_dev; out->st_ino = in->st_ino; out->st_mode = in->st_mode; out->st_nlink = in->st_nlink; out->st_uid = in->st_uid; out->st_gid = in->st_gid; out->st_rdev = in->st_rdev; out->st_size = in->st_size; out->st_blksize = in->st_blksize; out->st_blocks = in->st_blocks; out->st_atime = in->st_atime; out->st_mtime = in->st_mtime; out->st_ctime = in->st_ctime; - out->st_ino = in->st_ino; } #endif #if defined(__mips64) static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { internal_memset(out, 0, sizeof(*out)); out->st_dev = in->st_dev; out->st_ino = in->st_ino; out->st_mode = in->st_mode; out->st_nlink = in->st_nlink; out->st_uid = in->st_uid; out->st_gid = in->st_gid; out->st_rdev = in->st_rdev; out->st_size = in->st_size; out->st_blksize = in->st_blksize; out->st_blocks = in->st_blocks; out->st_atime = in->st_atime_nsec; out->st_mtime = in->st_mtime_nsec; out->st_ctime = in->st_ctime_nsec; - out->st_ino = in->st_ino; } #endif uptr internal_stat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if defined(__mips64) // For mips64, stat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(stat), path, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(stat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_lstat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if SANITIZER_MIPS64 // For mips64, lstat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(lstat), path, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(lstat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_fstat(fd_t fd, void *buf) { #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS # if SANITIZER_MIPS64 // For mips64, fstat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; # else return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); # endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(fstat64), fd, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; #endif } uptr internal_filesize(fd_t fd) { struct stat st; if (internal_fstat(fd, &st)) return -1; return (uptr)st.st_size; } uptr internal_dup2(int oldfd, int newfd) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0); #else return internal_syscall(SYSCALL(dup2), oldfd, newfd); #endif } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf, bufsize); #else return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize); #endif } uptr internal_unlink(const char *path) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0); #else return internal_syscall(SYSCALL(unlink), (uptr)path); #endif } uptr internal_rename(const char *oldpath, const char *newpath) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath); #else return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath); #endif } uptr internal_sched_yield() { return internal_syscall(SYSCALL(sched_yield)); } void internal__exit(int exitcode) { #if SANITIZER_FREEBSD internal_syscall(SYSCALL(exit), exitcode); #else internal_syscall(SYSCALL(exit_group), exitcode); #endif Die(); // Unreachable. } unsigned int internal_sleep(unsigned int seconds) { struct timespec ts; ts.tv_sec = 1; ts.tv_nsec = 0; int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts); if (res) return ts.tv_sec; return 0; } uptr internal_execve(const char *filename, char *const argv[], char *const envp[]) { return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv, (uptr)envp); } // ----------------- sanitizer_common.h bool FileExists(const char *filename) { struct stat st; #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0)) #else if (internal_stat(filename, &st)) #endif return false; // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } tid_t GetTid() { #if SANITIZER_FREEBSD return (uptr)pthread_self(); #else return internal_syscall(SYSCALL(gettid)); #endif } u64 NanoTime() { #if SANITIZER_FREEBSD timeval tv; #else kernel_timeval tv; #endif internal_memset(&tv, 0, sizeof(tv)); internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0); return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; } // Like getenv, but reads env directly from /proc (on Linux) or parses the // 'environ' array (on FreeBSD) and does not use libc. This function should be // called first inside __asan_init. const char *GetEnv(const char *name) { #if SANITIZER_FREEBSD if (::environ != 0) { uptr NameLen = internal_strlen(name); for (char **Env = ::environ; *Env != 0; Env++) { if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') return (*Env) + NameLen + 1; } } return 0; // Not found. #elif SANITIZER_LINUX static char *environ; static uptr len; static bool inited; if (!inited) { inited = true; uptr environ_size; if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len)) environ = nullptr; } if (!environ || len == 0) return nullptr; uptr namelen = internal_strlen(name); const char *p = environ; while (*p != '\0') { // will happen at the \0\0 that terminates the buffer // proc file has the format NAME=value\0NAME=value\0NAME=value\0... const char* endp = (char*)internal_memchr(p, '\0', len - (p - environ)); if (!endp) // this entry isn't NUL terminated return nullptr; else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match. return p + namelen + 1; // point after = p = endp + 1; } return nullptr; // Not found. #else #error "Unsupported platform" #endif } #if !SANITIZER_FREEBSD extern "C" { SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; } #endif #if !SANITIZER_GO && !SANITIZER_FREEBSD static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; uptr buff_size; uptr buff_len; *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray"); if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) { (*arr)[0] = nullptr; return; } (*arr)[0] = buff; int count, i; for (count = 1, i = 1; ; i++) { if (buff[i] == 0) { if (buff[i+1] == 0) break; (*arr)[count] = &buff[i+1]; CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible. count++; } } (*arr)[count] = nullptr; } #endif static void GetArgsAndEnv(char ***argv, char ***envp) { #if !SANITIZER_FREEBSD #if !SANITIZER_GO if (&__libc_stack_end) { #endif uptr* stack_end = (uptr*)__libc_stack_end; int argc = *stack_end; *argv = (char**)(stack_end + 1); *envp = (char**)(stack_end + argc + 2); #if !SANITIZER_GO } else { static const int kMaxArgv = 2000, kMaxEnvp = 2000; ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv); ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp); } #endif #else // On FreeBSD, retrieving the argument and environment arrays is done via the // kern.ps_strings sysctl, which returns a pointer to a structure containing // this information. See also . ps_strings *pss; size_t sz = sizeof(pss); if (sysctlbyname("kern.ps_strings", &pss, &sz, NULL, 0) == -1) { Printf("sysctl kern.ps_strings failed\n"); Die(); } *argv = pss->ps_argvstr; *envp = pss->ps_envstr; #endif } char **GetArgv() { char **argv, **envp; GetArgsAndEnv(&argv, &envp); return argv; } void ReExec() { char **argv, **envp; GetArgsAndEnv(&argv, &envp); uptr rv = internal_execve("/proc/self/exe", argv, envp); int rverrno; CHECK_EQ(internal_iserror(rv, &rverrno), true); Printf("execve failed, errno %d\n", rverrno); Die(); } enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { CHECK_EQ(owner_, 0); atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) return; while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { #if SANITIZER_FREEBSD _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0); #else internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0); #endif } } void BlockingMutex::Unlock() { atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); CHECK_NE(v, MtxUnlocked); if (v == MtxSleeping) { #if SANITIZER_FREEBSD _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0); #else internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0); #endif } } void BlockingMutex::CheckLocked() { atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); } // ----------------- sanitizer_linux.h // The actual size of this structure is specified by d_reclen. // Note that getdents64 uses a different structure format. We only provide the // 32-bit syscall here. struct linux_dirent { #if SANITIZER_X32 || defined(__aarch64__) u64 d_ino; u64 d_off; #else unsigned long d_ino; unsigned long d_off; #endif unsigned short d_reclen; #ifdef __aarch64__ unsigned char d_type; #endif char d_name[256]; }; // Syscall wrappers. uptr internal_ptrace(int request, int pid, void *addr, void *data) { return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr, (uptr)data); } uptr internal_waitpid(int pid, int *status, int options) { return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options, 0 /* rusage */); } uptr internal_getpid() { return internal_syscall(SYSCALL(getpid)); } uptr internal_getppid() { return internal_syscall(SYSCALL(getppid)); } uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count); #else return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count); #endif } uptr internal_lseek(fd_t fd, OFF_T offset, int whence) { return internal_syscall(SYSCALL(lseek), fd, offset, whence); } #if SANITIZER_LINUX uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) { return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5); } #endif uptr internal_sigaltstack(const struct sigaltstack *ss, struct sigaltstack *oss) { return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss); } int internal_fork() { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS return internal_syscall(SYSCALL(clone), SIGCHLD, 0); #else return internal_syscall(SYSCALL(fork)); #endif } #if SANITIZER_LINUX #define SA_RESTORER 0x04000000 // Doesn't set sa_restorer if the caller did not set it, so use with caution //(see below). int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { __sanitizer_kernel_sigaction_t k_act, k_oldact; internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t)); internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t)); const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act; __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact; if (u_act) { k_act.handler = u_act->handler; k_act.sigaction = u_act->sigaction; internal_memcpy(&k_act.sa_mask, &u_act->sa_mask, sizeof(__sanitizer_kernel_sigset_t)); // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL). k_act.sa_flags = u_act->sa_flags | SA_RESTORER; // FIXME: most often sa_restorer is unset, however the kernel requires it // to point to a valid signal restorer that calls the rt_sigreturn syscall. // If sa_restorer passed to the kernel is NULL, the program may crash upon // signal delivery or fail to unwind the stack in the signal handler. // libc implementation of sigaction() passes its own restorer to // rt_sigaction, so we need to do the same (we'll need to reimplement the // restorers; for x86_64 the restorer address can be obtained from // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact). #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 k_act.sa_restorer = u_act->sa_restorer; #endif } uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum, (uptr)(u_act ? &k_act : nullptr), (uptr)(u_oldact ? &k_oldact : nullptr), (uptr)sizeof(__sanitizer_kernel_sigset_t)); if ((result == 0) && u_oldact) { u_oldact->handler = k_oldact.handler; u_oldact->sigaction = k_oldact.sigaction; internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask, sizeof(__sanitizer_kernel_sigset_t)); u_oldact->sa_flags = k_oldact.sa_flags; #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 u_oldact->sa_restorer = k_oldact.sa_restorer; #endif } return result; } // Invokes sigaction via a raw syscall with a restorer, but does not support // all platforms yet. // We disable for Go simply because we have not yet added to buildgo.sh. #if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO int internal_sigaction_syscall(int signum, const void *act, void *oldact) { if (act == nullptr) return internal_sigaction_norestorer(signum, act, oldact); __sanitizer_sigaction u_adjust; internal_memcpy(&u_adjust, act, sizeof(u_adjust)); #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 if (u_adjust.sa_restorer == nullptr) { u_adjust.sa_restorer = internal_sigreturn; } #endif return internal_sigaction_norestorer(signum, (const void *)&u_adjust, oldact); } #endif // defined(__x86_64__) && !SANITIZER_GO #endif // SANITIZER_LINUX uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(sigprocmask), how, set, oldset); #else __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset; return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t)); #endif } void internal_sigfillset(__sanitizer_sigset_t *set) { internal_memset(set, 0xff, sizeof(*set)); } void internal_sigemptyset(__sanitizer_sigset_t *set) { internal_memset(set, 0, sizeof(*set)); } #if SANITIZER_LINUX void internal_sigdelset(__sanitizer_sigset_t *set, int signum) { signum -= 1; CHECK_GE(signum, 0); CHECK_LT(signum, sizeof(*set) * 8); __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; const uptr idx = signum / (sizeof(k_set->sig[0]) * 8); const uptr bit = signum % (sizeof(k_set->sig[0]) * 8); k_set->sig[idx] &= ~(1 << bit); } bool internal_sigismember(__sanitizer_sigset_t *set, int signum) { signum -= 1; CHECK_GE(signum, 0); CHECK_LT(signum, sizeof(*set) * 8); __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; const uptr idx = signum / (sizeof(k_set->sig[0]) * 8); const uptr bit = signum % (sizeof(k_set->sig[0]) * 8); return k_set->sig[idx] & (1 << bit); } #endif // SANITIZER_LINUX // ThreadLister implementation. ThreadLister::ThreadLister(int pid) : pid_(pid), descriptor_(-1), buffer_(4096), error_(true), entry_((struct linux_dirent *)buffer_.data()), bytes_read_(0) { char task_directory_path[80]; internal_snprintf(task_directory_path, sizeof(task_directory_path), "/proc/%d/task/", pid); uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY); if (internal_iserror(openrv)) { error_ = true; Report("Can't open /proc/%d/task for reading.\n", pid); } else { error_ = false; descriptor_ = openrv; } } int ThreadLister::GetNextTID() { int tid = -1; do { if (error_) return -1; if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries()) return -1; if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' && entry_->d_name[0] <= '9') { // Found a valid tid. tid = (int)internal_atoll(entry_->d_name); } entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen); } while (tid < 0); return tid; } void ThreadLister::Reset() { if (error_ || descriptor_ < 0) return; internal_lseek(descriptor_, 0, SEEK_SET); } ThreadLister::~ThreadLister() { if (descriptor_ >= 0) internal_close(descriptor_); } bool ThreadLister::error() { return error_; } bool ThreadLister::GetDirectoryEntries() { CHECK_GE(descriptor_, 0); CHECK_NE(error_, true); bytes_read_ = internal_getdents(descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size()); if (internal_iserror(bytes_read_)) { Report("Can't read directory entries from /proc/%d/task.\n", pid_); error_ = true; return false; } else if (bytes_read_ == 0) { return false; } entry_ = (struct linux_dirent *)buffer_.data(); return true; } +#if SANITIZER_WORDSIZE == 32 +// Take care of unusable kernel area in top gigabyte. +static uptr GetKernelAreaSize() { +#if SANITIZER_LINUX && !SANITIZER_X32 + const uptr gbyte = 1UL << 30; + + // Firstly check if there are writable segments + // mapped to top gigabyte (e.g. stack). + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0; + } + +#if !SANITIZER_ANDROID + // Even if nothing is mapped, top Gb may still be accessible + // if we are running on 64-bit kernel. + // Uname may report misleading results if personality type + // is modified (e.g. under schroot) so check this as well. + struct utsname uname_info; + int pers = personality(0xffffffffUL); + if (!(pers & PER_MASK) + && uname(&uname_info) == 0 + && internal_strstr(uname_info.machine, "64")) + return 0; +#endif // SANITIZER_ANDROID + + // Top gigabyte is reserved for kernel. + return gbyte; +#else + return 0; +#endif // SANITIZER_LINUX && !SANITIZER_X32 +} +#endif // SANITIZER_WORDSIZE == 32 + +uptr GetMaxVirtualAddress() { +#if SANITIZER_WORDSIZE == 64 +# if defined(__powerpc64__) || defined(__aarch64__) + // On PowerPC64 we have two different address space layouts: 44- and 46-bit. + // We somehow need to figure out which one we are using now and choose + // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. + // Note that with 'ulimit -s unlimited' the stack is moved away from the top + // of the address space, so simply checking the stack address is not enough. + // This should (does) work for both PowerPC64 Endian modes. + // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. + return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; +# elif defined(__mips64) + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; +# elif defined(__s390x__) + return (1ULL << 53) - 1; // 0x001fffffffffffffUL; +# else + return (1ULL << 47) - 1; // 0x00007fffffffffffUL; +# endif +#else // SANITIZER_WORDSIZE == 32 +# if defined(__s390__) + return (1ULL << 31) - 1; // 0x7fffffff; +# else + uptr res = (1ULL << 32) - 1; // 0xffffffff; + if (!common_flags()->full_address_space) + res -= GetKernelAreaSize(); + CHECK_LT(reinterpret_cast(&res), res); + return res; +# endif +#endif // SANITIZER_WORDSIZE +} + uptr GetPageSize() { // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. #if SANITIZER_ANDROID return 4096; #elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) return EXEC_PAGESIZE; #elif SANITIZER_USE_GETAUXVAL return getauxval(AT_PAGESZ); #else return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy. #endif } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { #if SANITIZER_FREEBSD const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; const char *default_module_name = "kern.proc.pathname"; size_t Size = buf_len; bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0); int readlink_error = IsErr ? errno : 0; uptr module_name_len = Size; #else const char *default_module_name = "/proc/self/exe"; uptr module_name_len = internal_readlink( default_module_name, buf, buf_len); int readlink_error; bool IsErr = internal_iserror(module_name_len, &readlink_error); #endif if (IsErr) { // We can't read binary name for some reason, assume it's unknown. Report("WARNING: reading executable name failed with errno %d, " "some stack frames may not be symbolized\n", readlink_error); module_name_len = internal_snprintf(buf, buf_len, "%s", default_module_name); CHECK_LT(module_name_len, buf_len); } return module_name_len; } uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { #if SANITIZER_LINUX char *tmpbuf; uptr tmpsize; uptr tmplen; if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen, 1024 * 1024)) { internal_strncpy(buf, tmpbuf, buf_len); UnmapOrDie(tmpbuf, tmpsize); return internal_strlen(buf); } #endif return ReadBinaryName(buf, buf_len); } // Match full names of the form /path/to/base_name{-,.}* bool LibraryNameIs(const char *full_name, const char *base_name) { const char *name = full_name; // Strip path. while (*name != '\0') name++; while (name > full_name && *name != '/') name--; if (*name == '/') name++; uptr base_name_length = internal_strlen(base_name); if (internal_strncmp(name, base_name, base_name_length)) return false; return (name[base_name_length] == '-' || name[base_name_length] == '.'); } #if !SANITIZER_ANDROID // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) { CHECK_NE(map, nullptr); #if !SANITIZER_FREEBSD typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; #endif // !SANITIZER_FREEBSD char *base = (char *)map->l_addr; Elf_Ehdr *ehdr = (Elf_Ehdr *)base; char *phdrs = base + ehdr->e_phoff; char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize; // Find the segment with the minimum base so we can "relocate" the p_vaddr // fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC // objects have a non-zero base. uptr preferred_base = (uptr)-1; for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { Elf_Phdr *phdr = (Elf_Phdr *)iter; if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr) preferred_base = (uptr)phdr->p_vaddr; } // Compute the delta from the real base to get a relocation delta. sptr delta = (uptr)base - preferred_base; // Now we can figure out what the loader really mapped. for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { Elf_Phdr *phdr = (Elf_Phdr *)iter; if (phdr->p_type == PT_LOAD) { uptr seg_start = phdr->p_vaddr + delta; uptr seg_end = seg_start + phdr->p_memsz; // None of these values are aligned. We consider the ragged edges of the // load command as defined, since they are mapped from the file. seg_start = RoundDownTo(seg_start, GetPageSizeCached()); seg_end = RoundUpTo(seg_end, GetPageSizeCached()); cb((void *)seg_start, seg_end - seg_start); } } } #endif #if defined(__x86_64__) && SANITIZER_LINUX // We cannot use glibc's clone wrapper, because it messes with the child // task's TLS. It writes the PID and TID of the child task to its thread // descriptor, but in our case the child task shares the thread descriptor with // the parent (because we don't know how to allocate a new thread // descriptor to keep glibc happy). So the stock version of clone(), when // used with CLONE_VM, would end up corrupting the parent's thread descriptor. uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register void *r8 __asm__("r8") = newtls; register int *r10 __asm__("r10") = child_tidptr; __asm__ __volatile__( /* %rax = syscall(%rax = SYSCALL(clone), * %rdi = flags, * %rsi = child_stack, * %rdx = parent_tidptr, * %r8 = new_tls, * %r10 = child_tidptr) */ "syscall\n" /* if (%rax != 0) * return; */ "testq %%rax,%%rax\n" "jnz 1f\n" /* In the child. Terminate unwind chain. */ // XXX: We should also terminate the CFI unwind chain // here. Unfortunately clang 3.2 doesn't support the // necessary CFI directives, so we skip that part. "xorq %%rbp,%%rbp\n" /* Call "fn(arg)". */ "popq %%rax\n" "popq %%rdi\n" "call *%%rax\n" /* Call _exit(%rax). */ "movq %%rax,%%rdi\n" "movq %2,%%rax\n" "syscall\n" /* Return to parent. */ "1:\n" : "=a" (res) : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "S"(child_stack), "D"(flags), "d"(parent_tidptr), "r"(r8), "r"(r10) : "rsp", "memory", "r11", "rcx"); return res; } #elif defined(__mips__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register void *a3 __asm__("$7") = newtls; register int *a4 __asm__("$8") = child_tidptr; // We don't have proper CFI directives here because it requires alot of code // for very marginal benefits. __asm__ __volatile__( /* $v0 = syscall($v0 = __NR_clone, * $a0 = flags, * $a1 = child_stack, * $a2 = parent_tidptr, * $a3 = new_tls, * $a4 = child_tidptr) */ ".cprestore 16;\n" "move $4,%1;\n" "move $5,%2;\n" "move $6,%3;\n" "move $7,%4;\n" /* Store the fifth argument on stack * if we are using 32-bit abi. */ #if SANITIZER_WORDSIZE == 32 "lw %5,16($29);\n" #else "move $8,%5;\n" #endif "li $2,%6;\n" "syscall;\n" /* if ($v0 != 0) * return; */ "bnez $2,1f;\n" /* Call "fn(arg)". */ #if SANITIZER_WORDSIZE == 32 #ifdef __BIG_ENDIAN__ "lw $25,4($29);\n" "lw $4,12($29);\n" #else "lw $25,0($29);\n" "lw $4,8($29);\n" #endif #else "ld $25,0($29);\n" "ld $4,8($29);\n" #endif "jal $25;\n" /* Call _exit($v0). */ "move $4,$2;\n" "li $2,%7;\n" "syscall;\n" /* Return to parent. */ "1:\n" : "=r" (res) : "r"(flags), "r"(child_stack), "r"(parent_tidptr), "r"(a3), "r"(a4), "i"(__NR_clone), "i"(__NR_exit) : "memory", "$29" ); return res; } #elif defined(__aarch64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[1] = (uptr)arg; register int (*__fn)(void *) __asm__("x0") = fn; register void *__stack __asm__("x1") = child_stack; register int __flags __asm__("x2") = flags; register void *__arg __asm__("x3") = arg; register int *__ptid __asm__("x4") = parent_tidptr; register void *__tls __asm__("x5") = newtls; register int *__ctid __asm__("x6") = child_tidptr; __asm__ __volatile__( "mov x0,x2\n" /* flags */ "mov x2,x4\n" /* ptid */ "mov x3,x5\n" /* tls */ "mov x4,x6\n" /* ctid */ "mov x8,%9\n" /* clone */ "svc 0x0\n" /* if (%r0 != 0) * return %r0; */ "cmp x0, #0\n" "bne 1f\n" /* In the child, now. Call "fn(arg)". */ "ldp x1, x0, [sp], #16\n" "blr x1\n" /* Call _exit(%r0). */ "mov x8, %10\n" "svc 0x0\n" "1:\n" : "=r" (res) : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit) : "x30", "memory"); return res; } #elif defined(__powerpc64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long res; // Stack frame structure. #if SANITIZER_PPC64V1 // Back chain == 0 (SP + 112) // Frame (112 bytes): // Parameter save area (SP + 48), 8 doublewords // TOC save area (SP + 40) // Link editor doubleword (SP + 32) // Compiler doubleword (SP + 24) // LR save area (SP + 16) // CR save area (SP + 8) // Back chain (SP + 0) # define FRAME_SIZE 112 # define FRAME_TOC_SAVE_OFFSET 40 #elif SANITIZER_PPC64V2 // Back chain == 0 (SP + 32) // Frame (32 bytes): // TOC save area (SP + 24) // LR save area (SP + 16) // CR save area (SP + 8) // Back chain (SP + 0) # define FRAME_SIZE 32 # define FRAME_TOC_SAVE_OFFSET 24 #else # error "Unsupported PPC64 ABI" #endif if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); register int (*__fn)(void *) __asm__("r3") = fn; register void *__cstack __asm__("r4") = child_stack; register int __flags __asm__("r5") = flags; register void *__arg __asm__("r6") = arg; register int *__ptidptr __asm__("r7") = parent_tidptr; register void *__newtls __asm__("r8") = newtls; register int *__ctidptr __asm__("r9") = child_tidptr; __asm__ __volatile__( /* fn and arg are saved across the syscall */ "mr 28, %5\n\t" "mr 27, %8\n\t" /* syscall r0 == __NR_clone r3 == flags r4 == child_stack r5 == parent_tidptr r6 == newtls r7 == child_tidptr */ "mr 3, %7\n\t" "mr 5, %9\n\t" "mr 6, %10\n\t" "mr 7, %11\n\t" "li 0, %3\n\t" "sc\n\t" /* Test if syscall was successful */ "cmpdi cr1, 3, 0\n\t" "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" "bne- cr1, 1f\n\t" /* Set up stack frame */ "li 29, 0\n\t" "stdu 29, -8(1)\n\t" "stdu 1, -%12(1)\n\t" /* Do the function call */ "std 2, %13(1)\n\t" #if SANITIZER_PPC64V1 "ld 0, 0(28)\n\t" "ld 2, 8(28)\n\t" "mtctr 0\n\t" #elif SANITIZER_PPC64V2 "mr 12, 28\n\t" "mtctr 12\n\t" #else # error "Unsupported PPC64 ABI" #endif "mr 3, 27\n\t" "bctrl\n\t" "ld 2, %13(1)\n\t" /* Call _exit(r3) */ "li 0, %4\n\t" "sc\n\t" /* Return to parent */ "1:\n\t" "mr %0, 3\n\t" : "=r" (res) : "0" (-1), "i" (EINVAL), "i" (__NR_clone), "i" (__NR_exit), "r" (__fn), "r" (__cstack), "r" (__flags), "r" (__arg), "r" (__ptidptr), "r" (__newtls), "r" (__ctidptr), "i" (FRAME_SIZE), "i" (FRAME_TOC_SAVE_OFFSET) : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29"); return res; } #elif defined(__i386__) && SANITIZER_LINUX uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { int res; if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); child_stack = (char *)child_stack - 7 * sizeof(unsigned int); ((unsigned int *)child_stack)[0] = (uptr)flags; ((unsigned int *)child_stack)[1] = (uptr)0; ((unsigned int *)child_stack)[2] = (uptr)fn; ((unsigned int *)child_stack)[3] = (uptr)arg; __asm__ __volatile__( /* %eax = syscall(%eax = SYSCALL(clone), * %ebx = flags, * %ecx = child_stack, * %edx = parent_tidptr, * %esi = new_tls, * %edi = child_tidptr) */ /* Obtain flags */ "movl (%%ecx), %%ebx\n" /* Do the system call */ "pushl %%ebx\n" "pushl %%esi\n" "pushl %%edi\n" /* Remember the flag value. */ "movl %%ebx, (%%ecx)\n" "int $0x80\n" "popl %%edi\n" "popl %%esi\n" "popl %%ebx\n" /* if (%eax != 0) * return; */ "test %%eax,%%eax\n" "jnz 1f\n" /* terminate the stack frame */ "xorl %%ebp,%%ebp\n" /* Call FN. */ "call *%%ebx\n" #ifdef PIC "call here\n" "here:\n" "popl %%ebx\n" "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n" #endif /* Call exit */ "movl %%eax, %%ebx\n" "movl %2, %%eax\n" "int $0x80\n" "1:\n" : "=a" (res) : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "c"(child_stack), "d"(parent_tidptr), "S"(newtls), "D"(child_tidptr) : "memory"); return res; } #elif defined(__arm__) && SANITIZER_LINUX uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { unsigned int res; if (!fn || !child_stack) return -EINVAL; child_stack = (char *)child_stack - 2 * sizeof(unsigned int); ((unsigned int *)child_stack)[0] = (uptr)fn; ((unsigned int *)child_stack)[1] = (uptr)arg; register int r0 __asm__("r0") = flags; register void *r1 __asm__("r1") = child_stack; register int *r2 __asm__("r2") = parent_tidptr; register void *r3 __asm__("r3") = newtls; register int *r4 __asm__("r4") = child_tidptr; register int r7 __asm__("r7") = __NR_clone; #if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__) # define ARCH_HAS_BX #endif #if __ARM_ARCH > 4 # define ARCH_HAS_BLX #endif #ifdef ARCH_HAS_BX # ifdef ARCH_HAS_BLX # define BLX(R) "blx " #R "\n" # else # define BLX(R) "mov lr, pc; bx " #R "\n" # endif #else # define BLX(R) "mov lr, pc; mov pc," #R "\n" #endif __asm__ __volatile__( /* %r0 = syscall(%r7 = SYSCALL(clone), * %r0 = flags, * %r1 = child_stack, * %r2 = parent_tidptr, * %r3 = new_tls, * %r4 = child_tidptr) */ /* Do the system call */ "swi 0x0\n" /* if (%r0 != 0) * return %r0; */ "cmp r0, #0\n" "bne 1f\n" /* In the child, now. Call "fn(arg)". */ "ldr r0, [sp, #4]\n" "ldr ip, [sp], #8\n" BLX(ip) /* Call _exit(%r0). */ "mov r7, %7\n" "swi 0x0\n" "1:\n" "mov %0, r0\n" : "=r"(res) : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7), "i"(__NR_exit) : "memory"); return res; } #endif // defined(__x86_64__) && SANITIZER_LINUX #if SANITIZER_ANDROID #if __ANDROID_API__ < 21 extern "C" __attribute__((weak)) int dl_iterate_phdr( int (*)(struct dl_phdr_info *, size_t, void *), void *); #endif static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size, void *data) { // Any name starting with "lib" indicates a bug in L where library base names // are returned instead of paths. if (info->dlpi_name && info->dlpi_name[0] == 'l' && info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') { *(bool *)data = true; return 1; } return 0; } static atomic_uint32_t android_api_level; static AndroidApiLevel AndroidDetectApiLevel() { if (!&dl_iterate_phdr) return ANDROID_KITKAT; // K or lower bool base_name_seen = false; dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen); if (base_name_seen) return ANDROID_LOLLIPOP_MR1; // L MR1 return ANDROID_POST_LOLLIPOP; // post-L // Plain L (API level 21) is completely broken wrt ASan and not very // interesting to detect. } AndroidApiLevel AndroidGetApiLevel() { AndroidApiLevel level = (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed); if (level) return level; level = AndroidDetectApiLevel(); atomic_store(&android_api_level, level, memory_order_relaxed); return level; } #endif static HandleSignalMode GetHandleSignalModeImpl(int signum) { switch (signum) { case SIGABRT: return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: return common_flags()->handle_segv; case SIGBUS: return common_flags()->handle_sigbus; } return kHandleSignalNo; } HandleSignalMode GetHandleSignalMode(int signum) { HandleSignalMode result = GetHandleSignalModeImpl(signum); if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler) return kHandleSignalExclusive; return result; } #if !SANITIZER_GO void *internal_start_thread(void(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); #if SANITIZER_LINUX && !SANITIZER_ANDROID // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked // on any thread, setuid call hangs (see test/tsan/setuid.c). internal_sigdelset(&set, 33); #endif internal_sigprocmask(SIG_SETMASK, &set, &old); void *th; real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg); internal_sigprocmask(SIG_SETMASK, &old, nullptr); return th; } void internal_join_thread(void *th) { real_pthread_join(th, nullptr); } #else void *internal_start_thread(void (*func)(void *), void *arg) { return 0; } void internal_join_thread(void *th) {} #endif #if defined(__aarch64__) // Android headers in the older NDK releases miss this definition. struct __sanitizer_esr_context { struct _aarch64_ctx head; uint64_t esr; }; static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) { static const u32 kEsrMagic = 0x45535201; u8 *aux = ucontext->uc_mcontext.__reserved; while (true) { _aarch64_ctx *ctx = (_aarch64_ctx *)aux; if (ctx->size == 0) break; if (ctx->magic == kEsrMagic) { *esr = ((__sanitizer_esr_context *)ctx)->esr; return true; } aux += ctx->size; } return false; } #endif SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) { ucontext_t *ucontext = (ucontext_t *)context; #if defined(__x86_64__) || defined(__i386__) static const uptr PF_WRITE = 1U << 1; #if SANITIZER_FREEBSD uptr err = ucontext->uc_mcontext.mc_err; #else uptr err = ucontext->uc_mcontext.gregs[REG_ERR]; #endif return err & PF_WRITE ? WRITE : READ; #elif defined(__arm__) static const uptr FSR_WRITE = 1U << 11; uptr fsr = ucontext->uc_mcontext.error_code; return fsr & FSR_WRITE ? WRITE : READ; #elif defined(__aarch64__) static const u64 ESR_ELx_WNR = 1U << 6; u64 esr; if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; return esr & ESR_ELx_WNR ? WRITE : READ; #else (void)ucontext; return UNKNOWN; // FIXME: Implement. #endif } void SignalContext::DumpAllRegisters(void *context) { // FIXME: Implement this. } void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { #if defined(__arm__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.arm_pc; *bp = ucontext->uc_mcontext.arm_fp; *sp = ucontext->uc_mcontext.arm_sp; #elif defined(__aarch64__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.regs[29]; *sp = ucontext->uc_mcontext.sp; #elif defined(__hppa__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.sc_iaoq[0]; /* GCC uses %r3 whenever a frame pointer is needed. */ *bp = ucontext->uc_mcontext.sc_gr[3]; *sp = ucontext->uc_mcontext.sc_gr[30]; #elif defined(__x86_64__) # if SANITIZER_FREEBSD ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.mc_rip; *bp = ucontext->uc_mcontext.mc_rbp; *sp = ucontext->uc_mcontext.mc_rsp; # else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_RIP]; *bp = ucontext->uc_mcontext.gregs[REG_RBP]; *sp = ucontext->uc_mcontext.gregs[REG_RSP]; # endif #elif defined(__i386__) # if SANITIZER_FREEBSD ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.mc_eip; *bp = ucontext->uc_mcontext.mc_ebp; *sp = ucontext->uc_mcontext.mc_esp; # else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_EIP]; *bp = ucontext->uc_mcontext.gregs[REG_EBP]; *sp = ucontext->uc_mcontext.gregs[REG_ESP]; # endif #elif defined(__powerpc__) || defined(__powerpc64__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.regs->nip; *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; // The powerpc{,64}-linux ABIs do not specify r31 as the frame // pointer, but GCC always uses r31 when we need a frame pointer. *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; #elif defined(__sparc__) ucontext_t *ucontext = (ucontext_t*)context; uptr *stk_ptr; # if defined (__arch64__) *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; stk_ptr = (uptr *) (*sp + 2047); *bp = stk_ptr[15]; # else *pc = ucontext->uc_mcontext.gregs[REG_PC]; *sp = ucontext->uc_mcontext.gregs[REG_O6]; stk_ptr = (uptr *) *sp; *bp = stk_ptr[15]; # endif #elif defined(__mips__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.gregs[30]; *sp = ucontext->uc_mcontext.gregs[29]; #elif defined(__s390__) ucontext_t *ucontext = (ucontext_t*)context; # if defined(__s390x__) *pc = ucontext->uc_mcontext.psw.addr; # else *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff; # endif *bp = ucontext->uc_mcontext.gregs[11]; *sp = ucontext->uc_mcontext.gregs[15]; #else # error "Unsupported arch" #endif } void MaybeReexec() { // No need to re-exec on Linux. } void PrintModuleMap() { } void CheckNoDeepBind(const char *filename, int flag) { #ifdef RTLD_DEEPBIND if (flag & RTLD_DEEPBIND) { Report( "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag" " which is incompatibe with sanitizer runtime " "(see https://github.com/google/sanitizers/issues/611 for details" "). If you want to run %s library under sanitizers please remove " "RTLD_DEEPBIND from dlopen flags.\n", filename, filename); Die(); } #endif } -uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) { +uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, + uptr *largest_gap_found) { UNREACHABLE("FindAvailableMemoryRange is not available"); return 0; } bool GetRandom(void *buffer, uptr length) { if (!buffer || !length || length > 256) return false; #if defined(__NR_getrandom) static atomic_uint8_t skip_getrandom_syscall; if (!atomic_load_relaxed(&skip_getrandom_syscall)) { // Up to 256 bytes, getrandom will not be interrupted. uptr res = internal_syscall(SYSCALL(getrandom), buffer, length, 0); int rverrno = 0; if (internal_iserror(res, &rverrno) && rverrno == ENOSYS) atomic_store_relaxed(&skip_getrandom_syscall, 1); else if (res == length) return true; } #endif uptr fd = internal_open("/dev/urandom", O_RDONLY); if (internal_iserror(fd)) return false; // internal_read deals with EINTR. uptr res = internal_read(fd, buffer, length); if (internal_iserror(res)) return false; internal_close(fd); return true; } } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc (revision 320961) @@ -1,565 +1,562 @@ //===-- sanitizer_linux_libcdep.cc ----------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements linux-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_freebsd.h" #include "sanitizer_linux.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include // for dlsym() #include #include #include #include #include #if SANITIZER_FREEBSD #include #include #define pthread_getattr_np pthread_attr_get_np #endif #if SANITIZER_LINUX #include #endif #if SANITIZER_ANDROID #include #endif #if SANITIZER_ANDROID && __ANDROID_API__ < 21 #include #endif #if !SANITIZER_ANDROID #include #include #endif namespace __sanitizer { SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act, void *oldact); int internal_sigaction(int signum, const void *act, void *oldact) { #if !SANITIZER_GO if (&real_sigaction) return real_sigaction(signum, act, oldact); #endif return sigaction(signum, (const struct sigaction *)act, (struct sigaction *)oldact); } void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); if (at_initialization) { // This is the main thread. Libpthread may not be initialized yet. struct rlimit rl; CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); // Find the mapping that contains a stack variable. MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr start, end, offset; + MemoryMappedSegment segment; uptr prev_end = 0; - while (proc_maps.Next(&start, &end, &offset, nullptr, 0, - /* protection */nullptr)) { - if ((uptr)&rl < end) - break; - prev_end = end; + while (proc_maps.Next(&segment)) { + if ((uptr)&rl < segment.end) break; + prev_end = segment.end; } - CHECK((uptr)&rl >= start && (uptr)&rl < end); + CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end); // Get stacksize from rlimit, but clip it so that it does not overlap // with other mappings. uptr stacksize = rl.rlim_cur; - if (stacksize > end - prev_end) - stacksize = end - prev_end; + if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end; // When running with unlimited stack size, we still want to set some limit. // The unlimited stack size is caused by 'ulimit -s unlimited'. // Also, for some reason, GNU make spawns subprocesses with unlimited stack. if (stacksize > kMaxThreadStackSize) stacksize = kMaxThreadStackSize; - *stack_top = end; - *stack_bottom = end - stacksize; + *stack_top = segment.end; + *stack_bottom = segment.end - stacksize; return; } pthread_attr_t attr; pthread_attr_init(&attr); CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); uptr stacksize = 0; void *stackaddr = nullptr; my_pthread_attr_getstack(&attr, &stackaddr, &stacksize); pthread_attr_destroy(&attr); *stack_top = (uptr)stackaddr + stacksize; *stack_bottom = (uptr)stackaddr; } #if !SANITIZER_GO bool SetEnv(const char *name, const char *value) { void *f = dlsym(RTLD_NEXT, "setenv"); if (!f) return false; typedef int(*setenv_ft)(const char *name, const char *value, int overwrite); setenv_ft setenv_f; CHECK_EQ(sizeof(setenv_f), sizeof(f)); internal_memcpy(&setenv_f, &f, sizeof(f)); return setenv_f(name, value, 1) == 0; } #endif bool SanitizerSetThreadName(const char *name) { #ifdef PR_SET_NAME return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT #else return false; #endif } bool SanitizerGetThreadName(char *name, int max_len) { #ifdef PR_GET_NAME char buff[17]; if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT return false; internal_strncpy(name, buff, max_len); name[max_len] = 0; return true; #else return false; #endif } #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO static uptr g_tls_size; #ifdef __i386__ # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall)) #else # define DL_INTERNAL_FUNCTION #endif void InitTlsSize() { // all current supported platforms have 16 bytes stack alignment const size_t kStackAlign = 16; typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; get_tls_func get_tls; void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); internal_memcpy(&get_tls, &get_tls_static_info_ptr, sizeof(get_tls_static_info_ptr)); CHECK_NE(get_tls, 0); size_t tls_size = 0; size_t tls_align = 0; get_tls(&tls_size, &tls_align); if (tls_align < kStackAlign) tls_align = kStackAlign; g_tls_size = RoundUpTo(tls_size, tls_align); } #else void InitTlsSize() { } #endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \ || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \ || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID // sizeof(struct pthread) from glibc. static atomic_uintptr_t kThreadDescriptorSize; uptr ThreadDescriptorSize() { uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed); if (val) return val; #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) #ifdef _CS_GNU_LIBC_VERSION char buf[64]; uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf)); if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) { char *end; int minor = internal_simple_strtoll(buf + 8, &end, 10); if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) { int patch = 0; if (*end == '.') // strtoll will return 0 if no valid conversion could be performed patch = internal_simple_strtoll(end + 1, nullptr, 10); /* sizeof(struct pthread) values from various glibc versions. */ if (SANITIZER_X32) val = 1728; // Assume only one particular version for x32. // For ARM sizeof(struct pthread) changed in Glibc 2.23. else if (SANITIZER_ARM) val = minor <= 22 ? 1120 : 1216; else if (minor <= 3) val = FIRST_32_SECOND_64(1104, 1696); else if (minor == 4) val = FIRST_32_SECOND_64(1120, 1728); else if (minor == 5) val = FIRST_32_SECOND_64(1136, 1728); else if (minor <= 9) val = FIRST_32_SECOND_64(1136, 1712); else if (minor == 10) val = FIRST_32_SECOND_64(1168, 1776); else if (minor == 11 || (minor == 12 && patch == 1)) val = FIRST_32_SECOND_64(1168, 2288); else if (minor <= 13) val = FIRST_32_SECOND_64(1168, 2304); else val = FIRST_32_SECOND_64(1216, 2304); } if (val) atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); return val; } #endif #elif defined(__mips__) // TODO(sagarthakur): add more values as per different glibc versions. val = FIRST_32_SECOND_64(1152, 1776); if (val) atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); return val; #elif defined(__aarch64__) // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. val = 1776; atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); return val; #elif defined(__powerpc64__) val = 1776; // from glibc.ppc64le 2.20-8.fc21 atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); return val; #elif defined(__s390__) val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22 atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); #endif return 0; } // The offset at which pointer to self is located in the thread descriptor. const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16); uptr ThreadSelfOffset() { return kThreadSelfOffset; } #if defined(__mips__) || defined(__powerpc64__) // TlsPreTcbSize includes size of struct pthread_descr and size of tcb // head structure. It lies before the static tls blocks. static uptr TlsPreTcbSize() { # if defined(__mips__) const uptr kTcbHead = 16; // sizeof (tcbhead_t) # elif defined(__powerpc64__) const uptr kTcbHead = 88; // sizeof (tcbhead_t) # endif const uptr kTlsAlign = 16; const uptr kTlsPreTcbSize = RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign); return kTlsPreTcbSize; } #endif uptr ThreadSelf() { uptr descr_addr; # if defined(__i386__) asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); # elif defined(__x86_64__) asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); # elif defined(__mips__) // MIPS uses TLS variant I. The thread pointer (in hardware register $29) // points to the end of the TCB + 0x7000. The pthread_descr structure is // immediately in front of the TCB. TlsPreTcbSize() includes the size of the // TCB and the size of pthread_descr. const uptr kTlsTcbOffset = 0x7000; uptr thread_pointer; asm volatile(".set push;\ .set mips64r2;\ rdhwr %0,$29;\ .set pop" : "=r" (thread_pointer)); descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize(); # elif defined(__aarch64__) || defined(__arm__) descr_addr = reinterpret_cast(__builtin_thread_pointer()) - ThreadDescriptorSize(); # elif defined(__s390__) descr_addr = reinterpret_cast(__builtin_thread_pointer()); # elif defined(__powerpc64__) // PPC64LE uses TLS variant I. The thread pointer (in GPR 13) // points to the end of the TCB + 0x7000. The pthread_descr structure is // immediately in front of the TCB. TlsPreTcbSize() includes the size of the // TCB and the size of pthread_descr. const uptr kTlsTcbOffset = 0x7000; uptr thread_pointer; asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset)); descr_addr = thread_pointer - TlsPreTcbSize(); # else # error "unsupported CPU arch" # endif return descr_addr; } #endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX #if SANITIZER_FREEBSD static void **ThreadSelfSegbase() { void **segbase = 0; # if defined(__i386__) // sysarch(I386_GET_GSBASE, segbase); __asm __volatile("mov %%gs:0, %0" : "=r" (segbase)); # elif defined(__x86_64__) // sysarch(AMD64_GET_FSBASE, segbase); __asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); # else # error "unsupported CPU arch for FreeBSD platform" # endif return segbase; } uptr ThreadSelf() { return (uptr)ThreadSelfSegbase()[2]; } #endif // SANITIZER_FREEBSD #if !SANITIZER_GO static void GetTls(uptr *addr, uptr *size) { #if SANITIZER_LINUX && !SANITIZER_ANDROID # if defined(__x86_64__) || defined(__i386__) || defined(__s390__) *addr = ThreadSelf(); *size = GetTlsSize(); *addr -= *size; *addr += ThreadDescriptorSize(); # elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \ || defined(__arm__) *addr = ThreadSelf(); *size = GetTlsSize(); # else *addr = 0; *size = 0; # endif #elif SANITIZER_FREEBSD void** segbase = ThreadSelfSegbase(); *addr = 0; *size = 0; if (segbase != 0) { // tcbalign = 16 // tls_size = round(tls_static_space, tcbalign); // dtv = segbase[1]; // dtv[2] = segbase - tls_static_space; void **dtv = (void**) segbase[1]; *addr = (uptr) dtv[2]; *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]); } #elif SANITIZER_ANDROID *addr = 0; *size = 0; #else # error "Unknown OS" #endif } #endif #if !SANITIZER_GO uptr GetTlsSize() { #if SANITIZER_FREEBSD || SANITIZER_ANDROID uptr addr, size; GetTls(&addr, &size); return size; #elif defined(__mips__) || defined(__powerpc64__) return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16); #else return g_tls_size; #endif } #endif void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if SANITIZER_GO // Stub implementation for Go. *stk_addr = *stk_size = *tls_addr = *tls_size = 0; #else GetTls(tls_addr, tls_size); uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; if (!main) { // If stack and tls intersect, make them non-intersecting. if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { CHECK_GT(*tls_addr + *tls_size, *stk_addr); CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size); *stk_size -= *tls_size; *tls_addr = *stk_addr + *stk_size; } } #endif } # if !SANITIZER_FREEBSD typedef ElfW(Phdr) Elf_Phdr; # elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2 # define Elf_Phdr XElf32_Phdr # define dl_phdr_info xdl_phdr_info # define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b)) # endif struct DlIteratePhdrData { InternalMmapVector *modules; bool first; }; static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { DlIteratePhdrData *data = (DlIteratePhdrData*)arg; InternalScopedString module_name(kMaxPathLength); if (data->first) { data->first = false; // First module is the binary itself. ReadBinaryNameCached(module_name.data(), module_name.size()); } else if (info->dlpi_name) { module_name.append("%s", info->dlpi_name); } if (module_name[0] == '\0') return 0; LoadedModule cur_module; cur_module.set(module_name.data(), info->dlpi_addr); for (int i = 0; i < info->dlpi_phnum; i++) { const Elf_Phdr *phdr = &info->dlpi_phdr[i]; if (phdr->p_type == PT_LOAD) { uptr cur_beg = info->dlpi_addr + phdr->p_vaddr; uptr cur_end = cur_beg + phdr->p_memsz; bool executable = phdr->p_flags & PF_X; bool writable = phdr->p_flags & PF_W; cur_module.addAddressRange(cur_beg, cur_end, executable, writable); } } data->modules->push_back(cur_module); return 0; } #if SANITIZER_ANDROID && __ANDROID_API__ < 21 extern "C" __attribute__((weak)) int dl_iterate_phdr( int (*)(struct dl_phdr_info *, size_t, void *), void *); #endif void ListOfModules::init() { clear(); #if SANITIZER_ANDROID && __ANDROID_API__ <= 22 u32 api_level = AndroidGetApiLevel(); // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken. // The runtime check allows the same library to work with // both K and L (and future) Android releases. if (api_level <= ANDROID_LOLLIPOP_MR1) { // L or earlier MemoryMappingLayout memory_mapping(false); memory_mapping.DumpListOfModules(&modules_); return; } #endif DlIteratePhdrData data = {&modules_, true}; dl_iterate_phdr(dl_iterate_phdr_cb, &data); } // getrusage does not give us the current RSS, only the max RSS. // Still, this is better than nothing if /proc/self/statm is not available // for some reason, e.g. due to a sandbox. static uptr GetRSSFromGetrusage() { struct rusage usage; if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox. return 0; return usage.ru_maxrss << 10; // ru_maxrss is in Kb. } uptr GetRSS() { if (!common_flags()->can_use_proc_maps_statm) return GetRSSFromGetrusage(); fd_t fd = OpenFile("/proc/self/statm", RdOnly); if (fd == kInvalidFd) return GetRSSFromGetrusage(); char buf[64]; uptr len = internal_read(fd, buf, sizeof(buf) - 1); internal_close(fd); if ((sptr)len <= 0) return 0; buf[len] = 0; // The format of the file is: // 1084 89 69 11 0 79 0 // We need the second number which is RSS in pages. char *pos = buf; // Skip the first number. while (*pos >= '0' && *pos <= '9') pos++; // Skip whitespaces. while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++; // Read the number. uptr rss = 0; while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0'; return rss * GetPageSizeCached(); } // 64-bit Android targets don't provide the deprecated __android_log_write. // Starting with the L release, syslog() works and is preferable to // __android_log_write. #if SANITIZER_LINUX #if SANITIZER_ANDROID static atomic_uint8_t android_log_initialized; void AndroidLogInit() { openlog(GetProcessName(), 0, LOG_USER); atomic_store(&android_log_initialized, 1, memory_order_release); } static bool ShouldLogAfterPrintf() { return atomic_load(&android_log_initialized, memory_order_acquire); } #else void AndroidLogInit() {} static bool ShouldLogAfterPrintf() { return true; } #endif // SANITIZER_ANDROID void WriteOneLineToSyslog(const char *s) { #if SANITIZER_ANDROID &&__ANDROID_API__ < 21 __android_log_write(ANDROID_LOG_INFO, NULL, s); #else syslog(LOG_INFO, "%s", s); #endif } void LogMessageOnPrintf(const char *str) { if (common_flags()->log_to_syslog && ShouldLogAfterPrintf()) WriteToSyslog(str); } #if SANITIZER_ANDROID && __ANDROID_API__ >= 21 extern "C" void android_set_abort_message(const char *msg); void SetAbortMessage(const char *str) { android_set_abort_message(str); } #else void SetAbortMessage(const char *str) {} #endif #endif // SANITIZER_LINUX } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.cc (revision 320961) @@ -1,933 +1,979 @@ //===-- sanitizer_mac.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between various sanitizers' runtime libraries and // implements OSX-specific functions. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_MAC #include "sanitizer_mac.h" // Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so // the clients will most certainly use 64-bit ones as well. #ifndef _DARWIN_USE_64_BIT_INODE #define _DARWIN_USE_64_BIT_INODE 1 #endif #include #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_procmaps.h" #if !SANITIZER_IOS #include // for _NSGetEnviron #else extern char **environ; #endif #if defined(__has_include) && __has_include() #define SANITIZER_OS_TRACE 1 #include #else #define SANITIZER_OS_TRACE 0 #endif #if !SANITIZER_IOS #include // for _NSGetArgv and _NSGetEnviron #else extern "C" { extern char ***_NSGetArgv(void); } #endif #include #include // for dladdr() #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // From , but we don't have that file on iOS. extern "C" { extern char ***_NSGetArgv(void); extern char ***_NSGetEnviron(void); } // From , but we don't have that file on iOS. extern "C" { extern kern_return_t mach_vm_region_recurse( vm_map_t target_task, mach_vm_address_t *address, mach_vm_size_t *size, natural_t *nesting_depth, vm_region_recurse_info_t info, mach_msg_type_number_t *infoCnt); } namespace __sanitizer { #include "sanitizer_syscall_generic.inc" // Direct syscalls, don't call libmalloc hooks (but not available on 10.6). extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) SANITIZER_WEAK_ATTRIBUTE; extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE; // ---------------------- sanitizer_libc.h uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd, u64 offset) { if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL); if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset); return (uptr)mmap(addr, length, prot, flags, fd, offset); } uptr internal_munmap(void *addr, uptr length) { if (&__munmap) return __munmap(addr, length); return munmap(addr, length); } int internal_mprotect(void *addr, uptr length, int prot) { return mprotect(addr, length, prot); } uptr internal_close(fd_t fd) { return close(fd); } uptr internal_open(const char *filename, int flags) { return open(filename, flags); } uptr internal_open(const char *filename, int flags, u32 mode) { return open(filename, flags, mode); } uptr internal_read(fd_t fd, void *buf, uptr count) { return read(fd, buf, count); } uptr internal_write(fd_t fd, const void *buf, uptr count) { return write(fd, buf, count); } uptr internal_stat(const char *path, void *buf) { return stat(path, (struct stat *)buf); } uptr internal_lstat(const char *path, void *buf) { return lstat(path, (struct stat *)buf); } uptr internal_fstat(fd_t fd, void *buf) { return fstat(fd, (struct stat *)buf); } uptr internal_filesize(fd_t fd) { struct stat st; if (internal_fstat(fd, &st)) return -1; return (uptr)st.st_size; } uptr internal_dup2(int oldfd, int newfd) { return dup2(oldfd, newfd); } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { return readlink(path, buf, bufsize); } uptr internal_unlink(const char *path) { return unlink(path); } uptr internal_sched_yield() { return sched_yield(); } void internal__exit(int exitcode) { _exit(exitcode); } unsigned int internal_sleep(unsigned int seconds) { return sleep(seconds); } uptr internal_getpid() { return getpid(); } int internal_sigaction(int signum, const void *act, void *oldact) { return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact); } void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); } uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { - return sigprocmask(how, set, oldset); + // Don't use sigprocmask here, because it affects all threads. + return pthread_sigmask(how, set, oldset); } // Doesn't call pthread_atfork() handlers (but not available on 10.6). extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE; int internal_fork() { if (&__fork) return __fork(); return fork(); } int internal_forkpty(int *amaster) { int master, slave; if (openpty(&master, &slave, nullptr, nullptr, nullptr) == -1) return -1; int pid = internal_fork(); if (pid == -1) { close(master); close(slave); return -1; } if (pid == 0) { close(master); if (login_tty(slave) != 0) { // We already forked, there's not much we can do. Let's quit. Report("login_tty failed (errno %d)\n", errno); internal__exit(1); } } else { *amaster = master; close(slave); } return pid; } uptr internal_rename(const char *oldpath, const char *newpath) { return rename(oldpath, newpath); } uptr internal_ftruncate(fd_t fd, uptr size) { return ftruncate(fd, size); } uptr internal_execve(const char *filename, char *const argv[], char *const envp[]) { return execve(filename, argv, envp); } uptr internal_waitpid(int pid, int *status, int options) { return waitpid(pid, status, options); } // ----------------- sanitizer_common.h bool FileExists(const char *filename) { struct stat st; if (stat(filename, &st)) return false; // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } tid_t GetTid() { tid_t tid; pthread_threadid_np(nullptr, &tid); return tid; } void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); uptr stacksize = pthread_get_stacksize_np(pthread_self()); // pthread_get_stacksize_np() returns an incorrect stack size for the main // thread on Mavericks. See // https://github.com/google/sanitizers/issues/261 if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization && stacksize == (1 << 19)) { struct rlimit rl; CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); // Most often rl.rlim_cur will be the desired 8M. if (rl.rlim_cur < kMaxThreadStackSize) { stacksize = rl.rlim_cur; } else { stacksize = kMaxThreadStackSize; } } void *stackaddr = pthread_get_stackaddr_np(pthread_self()); *stack_top = (uptr)stackaddr; *stack_bottom = *stack_top - stacksize; } char **GetEnviron() { #if !SANITIZER_IOS char ***env_ptr = _NSGetEnviron(); if (!env_ptr) { Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is " "called after libSystem_initializer().\n"); CHECK(env_ptr); } char **environ = *env_ptr; #endif CHECK(environ); return environ; } const char *GetEnv(const char *name) { char **env = GetEnviron(); uptr name_len = internal_strlen(name); while (*env != 0) { uptr len = internal_strlen(*env); if (len > name_len) { const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. return *env + name_len + 1; // String starting after =. } } env++; } return 0; } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { CHECK_LE(kMaxPathLength, buf_len); // On OS X the executable path is saved to the stack by dyld. Reading it // from there is much faster than calling dladdr, especially for large // binaries with symbols. InternalScopedString exe_path(kMaxPathLength); uint32_t size = exe_path.size(); if (_NSGetExecutablePath(exe_path.data(), &size) == 0 && realpath(exe_path.data(), buf) != 0) { return internal_strlen(buf); } return 0; } uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { return ReadBinaryName(buf, buf_len); } void ReExec() { UNIMPLEMENTED(); } uptr GetPageSize() { return sysconf(_SC_PAGESIZE); } BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); CHECK_EQ(OS_SPINLOCK_INIT, 0); CHECK_EQ(owner_, 0); OSSpinLockLock((OSSpinLock*)&opaque_storage_); } void BlockingMutex::Unlock() { OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); } void BlockingMutex::CheckLocked() { CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0); } u64 NanoTime() { return 0; } uptr GetTlsSize() { return 0; } void InitTlsSize() { } uptr TlsBaseAddr() { uptr segbase = 0; #if defined(__x86_64__) asm("movq %%gs:0,%0" : "=r"(segbase)); #elif defined(__i386__) asm("movl %%gs:0,%0" : "=r"(segbase)); #endif return segbase; } // The size of the tls on darwin does not appear to be well documented, // however the vm memory map suggests that it is 1024 uptrs in size, // with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386. uptr TlsSize() { #if defined(__x86_64__) || defined(__i386__) return 1024 * sizeof(uptr); #else return 0; #endif } void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if !SANITIZER_GO uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; *tls_addr = TlsBaseAddr(); *tls_size = TlsSize(); #else *stk_addr = 0; *stk_size = 0; *tls_addr = 0; *tls_size = 0; #endif } void ListOfModules::init() { clear(); MemoryMappingLayout memory_mapping(false); memory_mapping.DumpListOfModules(&modules_); } static HandleSignalMode GetHandleSignalModeImpl(int signum) { switch (signum) { case SIGABRT: return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: return common_flags()->handle_segv; case SIGBUS: return common_flags()->handle_sigbus; } return kHandleSignalNo; } HandleSignalMode GetHandleSignalMode(int signum) { // Handling fatal signals on watchOS and tvOS devices is disallowed. if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM)) return kHandleSignalNo; HandleSignalMode result = GetHandleSignalModeImpl(signum); if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler) return kHandleSignalExclusive; return result; } MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED; MacosVersion GetMacosVersionInternal() { int mib[2] = { CTL_KERN, KERN_OSRELEASE }; char version[100]; uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]); for (uptr i = 0; i < maxlen; i++) version[i] = '\0'; // Get the version length. CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1); CHECK_LT(len, maxlen); CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1); switch (version[0]) { case '9': return MACOS_VERSION_LEOPARD; case '1': { switch (version[1]) { case '0': return MACOS_VERSION_SNOW_LEOPARD; case '1': return MACOS_VERSION_LION; case '2': return MACOS_VERSION_MOUNTAIN_LION; case '3': return MACOS_VERSION_MAVERICKS; case '4': return MACOS_VERSION_YOSEMITE; default: if (IsDigit(version[1])) return MACOS_VERSION_UNKNOWN_NEWER; else return MACOS_VERSION_UNKNOWN; } } default: return MACOS_VERSION_UNKNOWN; } } MacosVersion GetMacosVersion() { atomic_uint32_t *cache = reinterpret_cast(&cached_macos_version); MacosVersion result = static_cast(atomic_load(cache, memory_order_acquire)); if (result == MACOS_VERSION_UNINITIALIZED) { result = GetMacosVersionInternal(); atomic_store(cache, result, memory_order_release); } return result; } bool PlatformHasDifferentMemcpyAndMemmove() { // On OS X 10.7 memcpy() and memmove() are both resolved // into memmove$VARIANT$sse42. // See also https://github.com/google/sanitizers/issues/34. // TODO(glider): need to check dynamically that memcpy() and memmove() are // actually the same function. return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD; } uptr GetRSS() { struct task_basic_info info; unsigned count = TASK_BASIC_INFO_COUNT; kern_return_t result = task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count); if (UNLIKELY(result != KERN_SUCCESS)) { Report("Cannot get task info. Error: %d\n", result); Die(); } return info.resident_size; } void *internal_start_thread(void(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); internal_sigprocmask(SIG_SETMASK, &set, &old); pthread_t th; pthread_create(&th, 0, (void*(*)(void *arg))func, arg); internal_sigprocmask(SIG_SETMASK, &old, 0); return th; } void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); } #if !SANITIZER_GO static BlockingMutex syslog_lock(LINKER_INITIALIZED); #endif void WriteOneLineToSyslog(const char *s) { #if !SANITIZER_GO syslog_lock.CheckLocked(); asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s); #endif } void LogMessageOnPrintf(const char *str) { // Log all printf output to CrashLog. if (common_flags()->abort_on_error) CRAppendCrashLogMessage(str); } void LogFullErrorReport(const char *buffer) { #if !SANITIZER_GO // Log with os_trace. This will make it into the crash log. #if SANITIZER_OS_TRACE if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) { // os_trace requires the message (format parameter) to be a string literal. if (internal_strncmp(SanitizerToolName, "AddressSanitizer", sizeof("AddressSanitizer") - 1) == 0) os_trace("Address Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer", sizeof("UndefinedBehaviorSanitizer") - 1) == 0) os_trace("Undefined Behavior Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer", sizeof("ThreadSanitizer") - 1) == 0) os_trace("Thread Sanitizer reported a failure."); else os_trace("Sanitizer tool reported a failure."); if (common_flags()->log_to_syslog) os_trace("Consult syslog for more information."); } #endif // Log to syslog. // The logging on OS X may call pthread_create so we need the threading // environment to be fully initialized. Also, this should never be called when // holding the thread registry lock since that may result in a deadlock. If // the reporting thread holds the thread registry mutex, and asl_log waits // for GCD to dispatch a new thread, the process will deadlock, because the // pthread_create wrapper needs to acquire the lock as well. BlockingMutexLock l(&syslog_lock); if (common_flags()->log_to_syslog) WriteToSyslog(buffer); // The report is added to CrashLog as part of logging all of Printf output. #endif } SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) { #if defined(__x86_64__) || defined(__i386__) ucontext_t *ucontext = static_cast(context); return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ; #else return UNKNOWN; #endif } void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { ucontext_t *ucontext = (ucontext_t*)context; # if defined(__aarch64__) *pc = ucontext->uc_mcontext->__ss.__pc; # if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0 *bp = ucontext->uc_mcontext->__ss.__fp; # else *bp = ucontext->uc_mcontext->__ss.__lr; # endif *sp = ucontext->uc_mcontext->__ss.__sp; # elif defined(__x86_64__) *pc = ucontext->uc_mcontext->__ss.__rip; *bp = ucontext->uc_mcontext->__ss.__rbp; *sp = ucontext->uc_mcontext->__ss.__rsp; # elif defined(__arm__) *pc = ucontext->uc_mcontext->__ss.__pc; *bp = ucontext->uc_mcontext->__ss.__r[7]; *sp = ucontext->uc_mcontext->__ss.__sp; # elif defined(__i386__) *pc = ucontext->uc_mcontext->__ss.__eip; *bp = ucontext->uc_mcontext->__ss.__ebp; *sp = ucontext->uc_mcontext->__ss.__esp; # else # error "Unknown architecture" # endif } #if !SANITIZER_GO static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES"; LowLevelAllocator allocator_for_env; // Change the value of the env var |name|, leaking the original value. // If |name_value| is NULL, the variable is deleted from the environment, // otherwise the corresponding "NAME=value" string is replaced with // |name_value|. void LeakyResetEnv(const char *name, const char *name_value) { char **env = GetEnviron(); uptr name_len = internal_strlen(name); while (*env != 0) { uptr len = internal_strlen(*env); if (len > name_len) { const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. if (name_value) { // Replace the old value with the new one. *env = const_cast(name_value); } else { // Shift the subsequent pointers back. char **del = env; do { del[0] = del[1]; } while (*del++); } } } env++; } } SANITIZER_WEAK_CXX_DEFAULT_IMPL bool ReexecDisabled() { return false; } extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber; static const double kMinDyldVersionWithAutoInterposition = 360.0; bool DyldNeedsEnvVariable() { // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users // still may want use them on older systems. On older Darwin platforms, dyld // doesn't export dyldVersionNumber symbol and we simply return true. if (!&dyldVersionNumber) return true; // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via // GetMacosVersion() doesn't work for the simulator. Let's instead check // `dyldVersionNumber`, which is exported by dyld, against a known version // number from the first OS release where this appeared. return dyldVersionNumber < kMinDyldVersionWithAutoInterposition; } void MaybeReexec() { if (ReexecDisabled()) return; // Make sure the dynamic runtime library is preloaded so that the // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec // ourselves. Dl_info info; RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info)); char *dyld_insert_libraries = const_cast(GetEnv(kDyldInsertLibraries)); uptr old_env_len = dyld_insert_libraries ? internal_strlen(dyld_insert_libraries) : 0; uptr fname_len = internal_strlen(info.dli_fname); const char *dylib_name = StripModuleName(info.dli_fname); uptr dylib_name_len = internal_strlen(dylib_name); bool lib_is_in_env = dyld_insert_libraries && internal_strstr(dyld_insert_libraries, dylib_name); if (DyldNeedsEnvVariable() && !lib_is_in_env) { // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // library. InternalScopedString program_name(1024); uint32_t buf_size = program_name.size(); _NSGetExecutablePath(program_name.data(), &buf_size); char *new_env = const_cast(info.dli_fname); if (dyld_insert_libraries) { // Append the runtime dylib name to the existing value of // DYLD_INSERT_LIBRARIES. new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2); internal_strncpy(new_env, dyld_insert_libraries, old_env_len); new_env[old_env_len] = ':'; // Copy fname_len and add a trailing zero. internal_strncpy(new_env + old_env_len + 1, info.dli_fname, fname_len + 1); // Ok to use setenv() since the wrappers don't depend on the value of // asan_inited. setenv(kDyldInsertLibraries, new_env, /*overwrite*/1); } else { // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name. setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); } VReport(1, "exec()-ing the program with\n"); VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); VReport(1, "to enable wrappers.\n"); execv(program_name.data(), *_NSGetArgv()); // We get here only if execv() failed. Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, " "which is required for the sanitizer to work. We tried to set the " "environment variable and re-execute itself, but execv() failed, " "possibly because of sandbox restrictions. Make sure to launch the " "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env); RAW_CHECK("execv failed" && 0); } // Verify that interceptors really work. We'll use dlsym to locate // "pthread_create", if interceptors are working, it should really point to // "wrap_pthread_create" within our own dylib. Dl_info info_pthread_create; void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create"); RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create)); if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) { Report( "ERROR: Interceptors are not working. This may be because %s is " "loaded too late (e.g. via dlopen). Please launch the executable " "with:\n%s=%s\n", SanitizerToolName, kDyldInsertLibraries, info.dli_fname); RAW_CHECK("interceptors not installed" && 0); } if (!lib_is_in_env) return; // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove // the dylib from the environment variable, because interceptors are installed // and we don't want our children to inherit the variable. uptr env_name_len = internal_strlen(kDyldInsertLibraries); // Allocate memory to hold the previous env var name, its value, the '=' // sign and the '\0' char. char *new_env = (char*)allocator_for_env.Allocate( old_env_len + 2 + env_name_len); RAW_CHECK(new_env); internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); new_env[env_name_len] = '='; char *new_env_pos = new_env + env_name_len + 1; // Iterate over colon-separated pieces of |dyld_insert_libraries|. char *piece_start = dyld_insert_libraries; char *piece_end = NULL; char *old_env_end = dyld_insert_libraries + old_env_len; do { if (piece_start[0] == ':') piece_start++; piece_end = internal_strchr(piece_start, ':'); if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; uptr piece_len = piece_end - piece_start; char *filename_start = (char *)internal_memrchr(piece_start, '/', piece_len); uptr filename_len = piece_len; if (filename_start) { filename_start += 1; filename_len = piece_len - (filename_start - piece_start); } else { filename_start = piece_start; } // If the current piece isn't the runtime library name, // append it to new_env. if ((dylib_name_len != filename_len) || (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) { if (new_env_pos != new_env + env_name_len + 1) { new_env_pos[0] = ':'; new_env_pos++; } internal_strncpy(new_env_pos, piece_start, piece_len); new_env_pos += piece_len; } // Move on to the next piece. piece_start = piece_end; } while (piece_start < old_env_end); // Can't use setenv() here, because it requires the allocator to be // initialized. // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in // a separate function called after InitializeAllocator(). if (new_env_pos == new_env + env_name_len + 1) new_env = NULL; LeakyResetEnv(kDyldInsertLibraries, new_env); } #endif // SANITIZER_GO char **GetArgv() { return *_NSGetArgv(); } +#if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM +// The task_vm_info struct is normally provided by the macOS SDK, but we need +// fields only available in 10.12+. Declare the struct manually to be able to +// build against older SDKs. +struct __sanitizer_task_vm_info { + uptr _unused[(SANITIZER_WORDSIZE == 32) ? 20 : 19]; + uptr min_address; + uptr max_address; +}; + +uptr GetTaskInfoMaxAddress() { + __sanitizer_task_vm_info vm_info = {{0}, 0, 0}; + mach_msg_type_number_t count = sizeof(vm_info) / sizeof(int); + int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count); + if (err == 0) { + return vm_info.max_address - 1; + } else { + // xnu cannot provide vm address limit + return 0x200000000 - 1; + } +} +#endif + +uptr GetMaxVirtualAddress() { +#if SANITIZER_WORDSIZE == 64 +# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM + // Get the maximum VM address + static uptr max_vm = GetTaskInfoMaxAddress(); + CHECK(max_vm); + return max_vm; +# else + return (1ULL << 47) - 1; // 0x00007fffffffffffUL; +# endif +#else // SANITIZER_WORDSIZE == 32 + return (1ULL << 32) - 1; // 0xffffffff; +#endif // SANITIZER_WORDSIZE +} + uptr FindAvailableMemoryRange(uptr shadow_size, uptr alignment, - uptr left_padding) { + uptr left_padding, + uptr *largest_gap_found) { typedef vm_region_submap_short_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; // Start searching for available memory region past PAGEZERO, which is // 4KB on 32-bit and 4GB on 64-bit. mach_vm_address_t start_address = (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000; mach_vm_address_t address = start_address; mach_vm_address_t free_begin = start_address; kern_return_t kr = KERN_SUCCESS; + if (largest_gap_found) *largest_gap_found = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; RegionInfo vminfo; mach_msg_type_number_t count = kRegionInfoSize; kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, (vm_region_info_t)&vminfo, &count); if (free_begin != address) { // We found a free region [free_begin..address-1]. - uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding, - alignment); - if (shadow_address + shadow_size < (uptr)address) { - return shadow_address; + uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); + uptr gap_end = RoundDownTo((uptr)address, alignment); + uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0; + if (shadow_size < gap_size) { + return gap_start; + } + + if (largest_gap_found && *largest_gap_found < gap_size) { + *largest_gap_found = gap_size; } } // Move to the next region. address += vmsize; free_begin = address; } // We looked at all free regions and could not find one large enough. return 0; } // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } void SignalContext::DumpAllRegisters(void *context) { Report("Register values:\n"); ucontext_t *ucontext = (ucontext_t*)context; # define DUMPREG64(r) \ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG32(r) \ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG_(r) Printf(" "); DUMPREG(r); # define DUMPREG__(r) Printf(" "); DUMPREG(r); # define DUMPREG___(r) Printf(" "); DUMPREG(r); # if defined(__x86_64__) # define DUMPREG(r) DUMPREG64(r) DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n"); DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n"); DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n"); DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n"); # elif defined(__i386__) # define DUMPREG(r) DUMPREG32(r) DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n"); DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n"); # elif defined(__aarch64__) # define DUMPREG(r) DUMPREG64(r) DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n"); DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n"); DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n"); DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n"); DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n"); DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n"); DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n"); DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n"); # elif defined(__arm__) # define DUMPREG(r) DUMPREG32(r) DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n"); DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n"); DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n"); DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n"); # else # error "Unknown architecture" # endif # undef DUMPREG64 # undef DUMPREG32 # undef DUMPREG_ # undef DUMPREG__ # undef DUMPREG___ # undef DUMPREG } static inline bool CompareBaseAddress(const LoadedModule &a, const LoadedModule &b) { return a.base_address() < b.base_address(); } void FormatUUID(char *out, uptr size, const u8 *uuid) { internal_snprintf(out, size, "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-" "%02X%02X%02X%02X%02X%02X>", uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]); } void PrintModuleMap() { Printf("Process module map:\n"); MemoryMappingLayout memory_mapping(false); InternalMmapVector modules(/*initial_capacity*/ 128); memory_mapping.DumpListOfModules(&modules); InternalSort(&modules, modules.size(), CompareBaseAddress); for (uptr i = 0; i < modules.size(); ++i) { char uuid_str[128]; FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid()); Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(), modules[i].max_executable_address(), modules[i].full_name(), ModuleArchToString(modules[i].arch()), uuid_str); } Printf("End of module map.\n"); } void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } // FIXME: implement on this platform. bool GetRandom(void *buffer, uptr length) { UNIMPLEMENTED(); } } // namespace __sanitizer #endif // SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.h (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.h (revision 320961) @@ -1,59 +1,61 @@ //===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between various sanitizers' runtime libraries and // provides definitions for OSX-specific functions. //===----------------------------------------------------------------------===// #ifndef SANITIZER_MAC_H #define SANITIZER_MAC_H #include "sanitizer_common.h" #include "sanitizer_platform.h" #if SANITIZER_MAC #include "sanitizer_posix.h" namespace __sanitizer { enum MacosVersion { MACOS_VERSION_UNINITIALIZED = 0, MACOS_VERSION_UNKNOWN, MACOS_VERSION_LEOPARD, MACOS_VERSION_SNOW_LEOPARD, MACOS_VERSION_LION, MACOS_VERSION_MOUNTAIN_LION, MACOS_VERSION_MAVERICKS, MACOS_VERSION_YOSEMITE, MACOS_VERSION_UNKNOWN_NEWER }; MacosVersion GetMacosVersion(); char **GetEnviron(); +void RestrictMemoryToMaxAddress(uptr max_address); + } // namespace __sanitizer extern "C" { static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] = {}; static const char *__crashreporter_info__ __attribute__((__used__)) = &__crashreporter_info_buff__[0]; asm(".desc ___crashreporter_info__, 0x10"); } // extern "C" namespace __sanitizer { static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED); INLINE void CRAppendCrashLogMessage(const char *msg) { BlockingMutexLock l(&crashreporter_info_mutex); internal_strlcat(__crashreporter_info_buff__, msg, sizeof(__crashreporter_info_buff__)); } } // namespace __sanitizer #endif // SANITIZER_MAC #endif // SANITIZER_MAC_H Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac_libcdep.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac_libcdep.cc (nonexistent) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac_libcdep.cc (revision 320961) @@ -0,0 +1,30 @@ +//===-- sanitizer_mac_libcdep.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries and +// implements OSX-specific functions. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_MAC +#include "sanitizer_mac.h" + +#include + +namespace __sanitizer { + +void RestrictMemoryToMaxAddress(uptr max_address) { + uptr size_to_mmap = GetMaxVirtualAddress() + 1 - max_address; + void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap"); + CHECK(res != MAP_FAILED); +} + +} // namespace __sanitizer + +#endif // SANITIZER_MAC Property changes on: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac_libcdep.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.cc (revision 320961) @@ -1,1292 +1,1283 @@ //===-- sanitizer_platform_limits_posix.cc --------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of Sanitizer common code. // // Sizes and layouts of platform-specific POSIX data structures. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC // Tests in this file assume that off_t-dependent data structures match the // libc ABI. For example, struct dirent here is what readdir() function (as // exported from libc) returns, and not the user-facing "dirent", which // depends on _FILE_OFFSET_BITS setting. // To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below. #ifdef _FILE_OFFSET_BITS #undef _FILE_OFFSET_BITS #endif #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if !SANITIZER_MAC && !SANITIZER_FREEBSD #include #endif #if !SANITIZER_IOS #include #endif #if !SANITIZER_ANDROID #include #include #include #endif #if SANITIZER_LINUX #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #if SANITIZER_FREEBSD # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include #define _KERNEL // to declare 'shminfo' structure # include #undef _KERNEL #undef INLINE // to avoid clashes with sanitizers' definitions #endif #if SANITIZER_FREEBSD || SANITIZER_IOS #undef IOC_DIRMASK #endif #if SANITIZER_LINUX || SANITIZER_FREEBSD # include # include # if defined(__mips64) || defined(__aarch64__) || defined(__arm__) # include # ifdef __arm__ typedef struct user_fpregs elf_fpregset_t; # define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/) # if !defined(ARM_VFPREGS_SIZE) # define ARM_VFPREGS_SIZE ARM_VFPREGS_SIZE_ASAN # endif # endif # endif # include #endif #if !SANITIZER_ANDROID #include #include #include #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID #include #include #include #include #include #include #include #if HAVE_RPC_XDR_H # include #elif HAVE_TIRPC_RPC_XDR_H # include #endif #include #include #include #include #include #include #if defined(__mips64) # include #endif #include #include #include #include #include #include #include #include #include #include #include #include #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_ANDROID #include #include #include #include #endif #if SANITIZER_LINUX #include #include #include #include #endif // SANITIZER_LINUX #if SANITIZER_MAC #include #include #include #endif // Include these after system headers to avoid name clashes and ambiguities. #include "sanitizer_internal_defs.h" #include "sanitizer_platform_limits_posix.h" namespace __sanitizer { unsigned struct_utsname_sz = sizeof(struct utsname); unsigned struct_stat_sz = sizeof(struct stat); #if !SANITIZER_IOS && !SANITIZER_FREEBSD unsigned struct_stat64_sz = sizeof(struct stat64); #endif // !SANITIZER_IOS && !SANITIZER_FREEBSD unsigned struct_rusage_sz = sizeof(struct rusage); unsigned struct_tm_sz = sizeof(struct tm); unsigned struct_passwd_sz = sizeof(struct passwd); unsigned struct_group_sz = sizeof(struct group); unsigned siginfo_t_sz = sizeof(siginfo_t); unsigned struct_sigaction_sz = sizeof(struct sigaction); unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_cond_t_sz = sizeof(pthread_cond_t); unsigned pid_t_sz = sizeof(pid_t); unsigned timeval_sz = sizeof(timeval); unsigned uid_t_sz = sizeof(uid_t); unsigned gid_t_sz = sizeof(gid_t); unsigned mbstate_t_sz = sizeof(mbstate_t); unsigned sigset_t_sz = sizeof(sigset_t); unsigned struct_timezone_sz = sizeof(struct timezone); unsigned struct_tms_sz = sizeof(struct tms); unsigned struct_sigevent_sz = sizeof(struct sigevent); unsigned struct_sched_param_sz = sizeof(struct sched_param); #if SANITIZER_MAC && !SANITIZER_IOS unsigned struct_statfs64_sz = sizeof(struct statfs64); #endif // SANITIZER_MAC && !SANITIZER_IOS #if !SANITIZER_ANDROID unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); unsigned ucontext_t_sz = sizeof(ucontext_t); #endif // !SANITIZER_ANDROID #if SANITIZER_LINUX unsigned struct_epoll_event_sz = sizeof(struct epoll_event); unsigned struct_sysinfo_sz = sizeof(struct sysinfo); unsigned __user_cap_header_struct_sz = sizeof(struct __user_cap_header_struct); unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct); unsigned struct_new_utsname_sz = sizeof(struct new_utsname); unsigned struct_old_utsname_sz = sizeof(struct old_utsname); unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname); #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_timespec_sz = sizeof(struct timespec); unsigned struct_utimbuf_sz = sizeof(struct utimbuf); unsigned struct_itimerspec_sz = sizeof(struct itimerspec); #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned struct_ustat_sz = sizeof(struct ustat); unsigned struct_rlimit64_sz = sizeof(struct rlimit64); unsigned struct_statvfs64_sz = sizeof(struct statvfs64); #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned struct_timex_sz = sizeof(struct timex); unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); unsigned struct_mq_attr_sz = sizeof(struct mq_attr); unsigned struct_statvfs_sz = sizeof(struct statvfs); #endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID uptr sig_ign = (uptr)SIG_IGN; uptr sig_dfl = (uptr)SIG_DFL; uptr sa_siginfo = (uptr)SA_SIGINFO; #if SANITIZER_LINUX int e_tabsz = (int)E_TABSZ; #endif #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned struct_shminfo_sz = sizeof(struct shminfo); unsigned struct_shm_info_sz = sizeof(struct shm_info); int shmctl_ipc_stat = (int)IPC_STAT; int shmctl_ipc_info = (int)IPC_INFO; int shmctl_shm_info = (int)SHM_INFO; int shmctl_shm_stat = (int)SHM_STAT; #endif #if !SANITIZER_MAC && !SANITIZER_FREEBSD unsigned struct_utmp_sz = sizeof(struct utmp); #endif #if !SANITIZER_ANDROID unsigned struct_utmpx_sz = sizeof(struct utmpx); #endif int map_fixed = MAP_FIXED; int af_inet = (int)AF_INET; int af_inet6 = (int)AF_INET6; uptr __sanitizer_in_addr_sz(int af) { if (af == AF_INET) return sizeof(struct in_addr); else if (af == AF_INET6) return sizeof(struct in6_addr); else return 0; } #if SANITIZER_LINUX unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr)); #elif SANITIZER_FREEBSD unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); #endif #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID int glob_nomatch = GLOB_NOMATCH; int glob_altdirfunc = GLOB_ALTDIRFUNC; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ defined(__s390__)) #if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); #elif defined(__aarch64__) unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); #elif defined(__s390__) unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct); unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct); #else unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); #endif // __mips64 || __powerpc64__ || __aarch64__ #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ defined(__aarch64__) || defined(__arm__) || defined(__s390__) unsigned struct_user_fpxregs_struct_sz = 0; #else unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ // || __s390__ #ifdef __arm__ unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; #else unsigned struct_user_vfpregs_struct_sz = 0; #endif int ptrace_peektext = PTRACE_PEEKTEXT; int ptrace_peekdata = PTRACE_PEEKDATA; int ptrace_peekuser = PTRACE_PEEKUSER; #if (defined(PTRACE_GETREGS) && defined(PTRACE_SETREGS)) || \ (defined(PT_GETREGS) && defined(PT_SETREGS)) int ptrace_getregs = PTRACE_GETREGS; int ptrace_setregs = PTRACE_SETREGS; #else int ptrace_getregs = -1; int ptrace_setregs = -1; #endif #if (defined(PTRACE_GETFPREGS) && defined(PTRACE_SETFPREGS)) || \ (defined(PT_GETFPREGS) && defined(PT_SETFPREGS)) int ptrace_getfpregs = PTRACE_GETFPREGS; int ptrace_setfpregs = PTRACE_SETFPREGS; #else int ptrace_getfpregs = -1; int ptrace_setfpregs = -1; #endif #if (defined(PTRACE_GETFPXREGS) && defined(PTRACE_SETFPXREGS)) || \ (defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS)) int ptrace_getfpxregs = PTRACE_GETFPXREGS; int ptrace_setfpxregs = PTRACE_SETFPXREGS; #else int ptrace_getfpxregs = -1; int ptrace_setfpxregs = -1; #endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS #if defined(PTRACE_GETVFPREGS) && defined(PTRACE_SETVFPREGS) int ptrace_getvfpregs = PTRACE_GETVFPREGS; int ptrace_setvfpregs = PTRACE_SETVFPREGS; #else int ptrace_getvfpregs = -1; int ptrace_setvfpregs = -1; #endif int ptrace_geteventmsg = PTRACE_GETEVENTMSG; #if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO)) int ptrace_getsiginfo = PTRACE_GETSIGINFO; int ptrace_setsiginfo = PTRACE_SETSIGINFO; #else int ptrace_getsiginfo = -1; int ptrace_setsiginfo = -1; #endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO #if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET) int ptrace_getregset = PTRACE_GETREGSET; int ptrace_setregset = PTRACE_SETREGSET; #else int ptrace_getregset = -1; int ptrace_setregset = -1; #endif // PTRACE_GETREGSET/PTRACE_SETREGSET #endif unsigned path_max = PATH_MAX; // ioctl arguments unsigned struct_ifreq_sz = sizeof(struct ifreq); unsigned struct_termios_sz = sizeof(struct termios); unsigned struct_winsize_sz = sizeof(struct winsize); #if SANITIZER_LINUX unsigned struct_arpreq_sz = sizeof(struct arpreq); unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf); unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession); unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio); unsigned struct_cdrom_subchnl_sz = sizeof(struct cdrom_subchnl); unsigned struct_cdrom_ti_sz = sizeof(struct cdrom_ti); unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry); unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr); unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl); unsigned struct_ff_effect_sz = sizeof(struct ff_effect); unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params); unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct); unsigned struct_floppy_fdc_state_sz = sizeof(struct floppy_fdc_state); unsigned struct_floppy_max_errors_sz = sizeof(struct floppy_max_errors); unsigned struct_floppy_raw_cmd_sz = sizeof(struct floppy_raw_cmd); unsigned struct_floppy_struct_sz = sizeof(struct floppy_struct); unsigned struct_floppy_write_errors_sz = sizeof(struct floppy_write_errors); unsigned struct_format_descr_sz = sizeof(struct format_descr); unsigned struct_hd_driveid_sz = sizeof(struct hd_driveid); unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry); unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo); unsigned struct_input_id_sz = sizeof(struct input_id); unsigned struct_mtpos_sz = sizeof(struct mtpos); unsigned struct_rtentry_sz = sizeof(struct rtentry); unsigned struct_termio_sz = sizeof(struct termio); unsigned struct_vt_consize_sz = sizeof(struct vt_consize); unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes); unsigned struct_vt_stat_sz = sizeof(struct vt_stat); #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD #if SOUND_VERSION >= 0x040000 unsigned struct_copr_buffer_sz = 0; unsigned struct_copr_debug_buf_sz = 0; unsigned struct_copr_msg_sz = 0; #else unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer); unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf); unsigned struct_copr_msg_sz = sizeof(struct copr_msg); #endif unsigned struct_midi_info_sz = sizeof(struct midi_info); unsigned struct_mtget_sz = sizeof(struct mtget); unsigned struct_mtop_sz = sizeof(struct mtop); unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument); unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec); unsigned struct_synth_info_sz = sizeof(struct synth_info); unsigned struct_vt_mode_sz = sizeof(struct vt_mode); #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct); unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor); #if EV_VERSION > (0x010000) unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry); #else unsigned struct_input_keymap_entry_sz = 0; #endif unsigned struct_ipx_config_data_sz = sizeof(struct ipx_config_data); unsigned struct_kbdiacrs_sz = sizeof(struct kbdiacrs); unsigned struct_kbentry_sz = sizeof(struct kbentry); unsigned struct_kbkeycode_sz = sizeof(struct kbkeycode); unsigned struct_kbsentry_sz = sizeof(struct kbsentry); unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo); unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct); unsigned struct_scc_modem_sz = sizeof(struct scc_modem); unsigned struct_scc_stat_sz = sizeof(struct scc_stat); unsigned struct_serial_multiport_struct_sz = sizeof(struct serial_multiport_struct); unsigned struct_serial_struct_sz = sizeof(struct serial_struct); unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25); unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc); unsigned struct_unimapinit_sz = sizeof(struct unimapinit); #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); #endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #if !SANITIZER_ANDROID && !SANITIZER_MAC unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); #endif const unsigned IOCTL_NOT_PRESENT = 0; unsigned IOCTL_FIOASYNC = FIOASYNC; unsigned IOCTL_FIOCLEX = FIOCLEX; unsigned IOCTL_FIOGETOWN = FIOGETOWN; unsigned IOCTL_FIONBIO = FIONBIO; unsigned IOCTL_FIONCLEX = FIONCLEX; unsigned IOCTL_FIOSETOWN = FIOSETOWN; unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; unsigned IOCTL_SIOCATMARK = SIOCATMARK; unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR; unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR; unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF; unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR; unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS; unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC; unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU; unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK; unsigned IOCTL_SIOCGPGRP = SIOCGPGRP; unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR; unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR; unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR; unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS; unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC; unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU; unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; unsigned IOCTL_TIOCCONS = TIOCCONS; unsigned IOCTL_TIOCEXCL = TIOCEXCL; unsigned IOCTL_TIOCGETD = TIOCGETD; unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; unsigned IOCTL_TIOCMBIC = TIOCMBIC; unsigned IOCTL_TIOCMBIS = TIOCMBIS; unsigned IOCTL_TIOCMGET = TIOCMGET; unsigned IOCTL_TIOCMSET = TIOCMSET; unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; unsigned IOCTL_TIOCNXCL = TIOCNXCL; unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; unsigned IOCTL_TIOCPKT = TIOCPKT; unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; unsigned IOCTL_TIOCSETD = TIOCSETD; unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; unsigned IOCTL_TIOCSTI = TIOCSTI; unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; #if ((SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID) unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; #endif #if SANITIZER_LINUX unsigned IOCTL_EVIOCGABS = EVIOCGABS(0); unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0); unsigned IOCTL_EVIOCGEFFECTS = EVIOCGEFFECTS; unsigned IOCTL_EVIOCGID = EVIOCGID; unsigned IOCTL_EVIOCGKEY = EVIOCGKEY(0); unsigned IOCTL_EVIOCGKEYCODE = EVIOCGKEYCODE; unsigned IOCTL_EVIOCGLED = EVIOCGLED(0); unsigned IOCTL_EVIOCGNAME = EVIOCGNAME(0); unsigned IOCTL_EVIOCGPHYS = EVIOCGPHYS(0); unsigned IOCTL_EVIOCGRAB = EVIOCGRAB; unsigned IOCTL_EVIOCGREP = EVIOCGREP; unsigned IOCTL_EVIOCGSND = EVIOCGSND(0); unsigned IOCTL_EVIOCGSW = EVIOCGSW(0); unsigned IOCTL_EVIOCGUNIQ = EVIOCGUNIQ(0); unsigned IOCTL_EVIOCGVERSION = EVIOCGVERSION; unsigned IOCTL_EVIOCRMFF = EVIOCRMFF; unsigned IOCTL_EVIOCSABS = EVIOCSABS(0); unsigned IOCTL_EVIOCSFF = EVIOCSFF; unsigned IOCTL_EVIOCSKEYCODE = EVIOCSKEYCODE; unsigned IOCTL_EVIOCSREP = EVIOCSREP; unsigned IOCTL_BLKFLSBUF = BLKFLSBUF; unsigned IOCTL_BLKGETSIZE = BLKGETSIZE; unsigned IOCTL_BLKRAGET = BLKRAGET; unsigned IOCTL_BLKRASET = BLKRASET; unsigned IOCTL_BLKROGET = BLKROGET; unsigned IOCTL_BLKROSET = BLKROSET; unsigned IOCTL_BLKRRPART = BLKRRPART; unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ; unsigned IOCTL_CDROMEJECT = CDROMEJECT; unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW; unsigned IOCTL_CDROMMULTISESSION = CDROMMULTISESSION; unsigned IOCTL_CDROMPAUSE = CDROMPAUSE; unsigned IOCTL_CDROMPLAYMSF = CDROMPLAYMSF; unsigned IOCTL_CDROMPLAYTRKIND = CDROMPLAYTRKIND; unsigned IOCTL_CDROMREADAUDIO = CDROMREADAUDIO; unsigned IOCTL_CDROMREADCOOKED = CDROMREADCOOKED; unsigned IOCTL_CDROMREADMODE1 = CDROMREADMODE1; unsigned IOCTL_CDROMREADMODE2 = CDROMREADMODE2; unsigned IOCTL_CDROMREADRAW = CDROMREADRAW; unsigned IOCTL_CDROMREADTOCENTRY = CDROMREADTOCENTRY; unsigned IOCTL_CDROMREADTOCHDR = CDROMREADTOCHDR; unsigned IOCTL_CDROMRESET = CDROMRESET; unsigned IOCTL_CDROMRESUME = CDROMRESUME; unsigned IOCTL_CDROMSEEK = CDROMSEEK; unsigned IOCTL_CDROMSTART = CDROMSTART; unsigned IOCTL_CDROMSTOP = CDROMSTOP; unsigned IOCTL_CDROMSUBCHNL = CDROMSUBCHNL; unsigned IOCTL_CDROMVOLCTRL = CDROMVOLCTRL; unsigned IOCTL_CDROMVOLREAD = CDROMVOLREAD; unsigned IOCTL_CDROM_GET_UPC = CDROM_GET_UPC; unsigned IOCTL_FDCLRPRM = FDCLRPRM; unsigned IOCTL_FDDEFPRM = FDDEFPRM; unsigned IOCTL_FDFLUSH = FDFLUSH; unsigned IOCTL_FDFMTBEG = FDFMTBEG; unsigned IOCTL_FDFMTEND = FDFMTEND; unsigned IOCTL_FDFMTTRK = FDFMTTRK; unsigned IOCTL_FDGETDRVPRM = FDGETDRVPRM; unsigned IOCTL_FDGETDRVSTAT = FDGETDRVSTAT; unsigned IOCTL_FDGETDRVTYP = FDGETDRVTYP; unsigned IOCTL_FDGETFDCSTAT = FDGETFDCSTAT; unsigned IOCTL_FDGETMAXERRS = FDGETMAXERRS; unsigned IOCTL_FDGETPRM = FDGETPRM; unsigned IOCTL_FDMSGOFF = FDMSGOFF; unsigned IOCTL_FDMSGON = FDMSGON; unsigned IOCTL_FDPOLLDRVSTAT = FDPOLLDRVSTAT; unsigned IOCTL_FDRAWCMD = FDRAWCMD; unsigned IOCTL_FDRESET = FDRESET; unsigned IOCTL_FDSETDRVPRM = FDSETDRVPRM; unsigned IOCTL_FDSETEMSGTRESH = FDSETEMSGTRESH; unsigned IOCTL_FDSETMAXERRS = FDSETMAXERRS; unsigned IOCTL_FDSETPRM = FDSETPRM; unsigned IOCTL_FDTWADDLE = FDTWADDLE; unsigned IOCTL_FDWERRORCLR = FDWERRORCLR; unsigned IOCTL_FDWERRORGET = FDWERRORGET; unsigned IOCTL_HDIO_DRIVE_CMD = HDIO_DRIVE_CMD; unsigned IOCTL_HDIO_GETGEO = HDIO_GETGEO; unsigned IOCTL_HDIO_GET_32BIT = HDIO_GET_32BIT; unsigned IOCTL_HDIO_GET_DMA = HDIO_GET_DMA; unsigned IOCTL_HDIO_GET_IDENTITY = HDIO_GET_IDENTITY; unsigned IOCTL_HDIO_GET_KEEPSETTINGS = HDIO_GET_KEEPSETTINGS; unsigned IOCTL_HDIO_GET_MULTCOUNT = HDIO_GET_MULTCOUNT; unsigned IOCTL_HDIO_GET_NOWERR = HDIO_GET_NOWERR; unsigned IOCTL_HDIO_GET_UNMASKINTR = HDIO_GET_UNMASKINTR; unsigned IOCTL_HDIO_SET_32BIT = HDIO_SET_32BIT; unsigned IOCTL_HDIO_SET_DMA = HDIO_SET_DMA; unsigned IOCTL_HDIO_SET_KEEPSETTINGS = HDIO_SET_KEEPSETTINGS; unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT; unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR; unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR; unsigned IOCTL_MTIOCPOS = MTIOCPOS; unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP; unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG; unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS; unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT; unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP; unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP; unsigned IOCTL_PPPIOCSDEBUG = PPPIOCSDEBUG; unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS; unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID; unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU; unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP; unsigned IOCTL_SIOCADDRT = SIOCADDRT; unsigned IOCTL_SIOCDARP = SIOCDARP; unsigned IOCTL_SIOCDELRT = SIOCDELRT; unsigned IOCTL_SIOCDRARP = SIOCDRARP; unsigned IOCTL_SIOCGARP = SIOCGARP; unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP; unsigned IOCTL_SIOCGIFHWADDR = SIOCGIFHWADDR; unsigned IOCTL_SIOCGIFMAP = SIOCGIFMAP; unsigned IOCTL_SIOCGIFMEM = SIOCGIFMEM; unsigned IOCTL_SIOCGIFNAME = SIOCGIFNAME; unsigned IOCTL_SIOCGIFSLAVE = SIOCGIFSLAVE; unsigned IOCTL_SIOCGRARP = SIOCGRARP; unsigned IOCTL_SIOCGSTAMP = SIOCGSTAMP; unsigned IOCTL_SIOCSARP = SIOCSARP; unsigned IOCTL_SIOCSIFENCAP = SIOCSIFENCAP; unsigned IOCTL_SIOCSIFHWADDR = SIOCSIFHWADDR; unsigned IOCTL_SIOCSIFLINK = SIOCSIFLINK; unsigned IOCTL_SIOCSIFMAP = SIOCSIFMAP; unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM; unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE; unsigned IOCTL_SIOCSRARP = SIOCSRARP; # if SOUND_VERSION >= 0x040000 unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_RCVMSG = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_RDATA = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_RESET = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_RUN = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_SENDMSG = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_WCODE = IOCTL_NOT_PRESENT; unsigned IOCTL_SNDCTL_COPR_WDATA = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_READ_BITS = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_READ_CHANNELS = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_READ_FILTER = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT; unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT; # else // SOUND_VERSION unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT; unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD; unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE; unsigned IOCTL_SNDCTL_COPR_RCVMSG = SNDCTL_COPR_RCVMSG; unsigned IOCTL_SNDCTL_COPR_RDATA = SNDCTL_COPR_RDATA; unsigned IOCTL_SNDCTL_COPR_RESET = SNDCTL_COPR_RESET; unsigned IOCTL_SNDCTL_COPR_RUN = SNDCTL_COPR_RUN; unsigned IOCTL_SNDCTL_COPR_SENDMSG = SNDCTL_COPR_SENDMSG; unsigned IOCTL_SNDCTL_COPR_WCODE = SNDCTL_COPR_WCODE; unsigned IOCTL_SNDCTL_COPR_WDATA = SNDCTL_COPR_WDATA; unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS; unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS; unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER; unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE; unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS; unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER; #endif // SOUND_VERSION unsigned IOCTL_TCFLSH = TCFLSH; unsigned IOCTL_TCGETA = TCGETA; unsigned IOCTL_TCGETS = TCGETS; unsigned IOCTL_TCSBRK = TCSBRK; unsigned IOCTL_TCSBRKP = TCSBRKP; unsigned IOCTL_TCSETA = TCSETA; unsigned IOCTL_TCSETAF = TCSETAF; unsigned IOCTL_TCSETAW = TCSETAW; unsigned IOCTL_TCSETS = TCSETS; unsigned IOCTL_TCSETSF = TCSETSF; unsigned IOCTL_TCSETSW = TCSETSW; unsigned IOCTL_TCXONC = TCXONC; unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS; unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR; unsigned IOCTL_TIOCINQ = TIOCINQ; unsigned IOCTL_TIOCLINUX = TIOCLINUX; unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG; unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR; unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD; unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD; unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS; unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR; unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE; unsigned IOCTL_VT_GETSTATE = VT_GETSTATE; unsigned IOCTL_VT_RESIZE = VT_RESIZE; unsigned IOCTL_VT_RESIZEX = VT_RESIZEX; unsigned IOCTL_VT_SENDSIG = VT_SENDSIG; #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD unsigned IOCTL_MTIOCGET = MTIOCGET; unsigned IOCTL_MTIOCTOP = MTIOCTOP; unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE; unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS; unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK; unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST; unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET; unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT; unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT; unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED; unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO; unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE; unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC; unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE; unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR; unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO; unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME; unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE; unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT; unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT; unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS; unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS; unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND; unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC; unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE; unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET; unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES; unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC; unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI; unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD; unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO; unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL; unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE; unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME; unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT; unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE; unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START; unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP; unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO; unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE; unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM; unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS; unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS; unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD; unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK; unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE; unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN; unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX; unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE; unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1; unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2; unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3; unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD; unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC; unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE; unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN; unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM; unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV; unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK; unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC; unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER; unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS; unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH; unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE; unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME; unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM; unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS; unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD; unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE; unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN; unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX; unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE; unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1; unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2; unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3; unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD; unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC; unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE; unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN; unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM; unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV; unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC; unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER; unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH; unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE; unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME; unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE; unsigned IOCTL_VT_GETMODE = VT_GETMODE; unsigned IOCTL_VT_OPENQRY = VT_OPENQRY; unsigned IOCTL_VT_RELDISP = VT_RELDISP; unsigned IOCTL_VT_SETMODE = VT_SETMODE; unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE; #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH; unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT; unsigned IOCTL_CYGETMON = CYGETMON; unsigned IOCTL_CYGETTHRESH = CYGETTHRESH; unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT; unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH; unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT; unsigned IOCTL_CYSETTHRESH = CYSETTHRESH; unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT; unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE; unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE; unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG; unsigned IOCTL_EQL_GETSLAVECFG = EQL_GETSLAVECFG; unsigned IOCTL_EQL_SETMASTRCFG = EQL_SETMASTRCFG; unsigned IOCTL_EQL_SETSLAVECFG = EQL_SETSLAVECFG; #if EV_VERSION > (0x010000) unsigned IOCTL_EVIOCGKEYCODE_V2 = EVIOCGKEYCODE_V2; unsigned IOCTL_EVIOCGPROP = EVIOCGPROP(0); unsigned IOCTL_EVIOCSKEYCODE_V2 = EVIOCSKEYCODE_V2; #else unsigned IOCTL_EVIOCGKEYCODE_V2 = IOCTL_NOT_PRESENT; unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT; unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT; #endif unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS; unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION; unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS; unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION; unsigned IOCTL_GIO_CMAP = GIO_CMAP; unsigned IOCTL_GIO_FONT = GIO_FONT; unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP; unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP; unsigned IOCTL_KDADDIO = KDADDIO; unsigned IOCTL_KDDELIO = KDDELIO; unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE; unsigned IOCTL_KDGKBDIACR = KDGKBDIACR; unsigned IOCTL_KDGKBENT = KDGKBENT; unsigned IOCTL_KDGKBLED = KDGKBLED; unsigned IOCTL_KDGKBMETA = KDGKBMETA; unsigned IOCTL_KDGKBSENT = KDGKBSENT; unsigned IOCTL_KDMAPDISP = KDMAPDISP; unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE; unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT; unsigned IOCTL_KDSKBDIACR = KDSKBDIACR; unsigned IOCTL_KDSKBENT = KDSKBENT; unsigned IOCTL_KDSKBLED = KDSKBLED; unsigned IOCTL_KDSKBMETA = KDSKBMETA; unsigned IOCTL_KDSKBSENT = KDSKBSENT; unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP; unsigned IOCTL_LPABORT = LPABORT; unsigned IOCTL_LPABORTOPEN = LPABORTOPEN; unsigned IOCTL_LPCAREFUL = LPCAREFUL; unsigned IOCTL_LPCHAR = LPCHAR; unsigned IOCTL_LPGETIRQ = LPGETIRQ; unsigned IOCTL_LPGETSTATUS = LPGETSTATUS; unsigned IOCTL_LPRESET = LPRESET; unsigned IOCTL_LPSETIRQ = LPSETIRQ; unsigned IOCTL_LPTIME = LPTIME; unsigned IOCTL_LPWAIT = LPWAIT; unsigned IOCTL_MTIOCGETCONFIG = MTIOCGETCONFIG; unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG; unsigned IOCTL_PIO_CMAP = PIO_CMAP; unsigned IOCTL_PIO_FONT = PIO_FONT; unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP; unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR; unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP; unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN; unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST; unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE; unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE; unsigned IOCTL_SIOCAIPXITFCRT = SIOCAIPXITFCRT; unsigned IOCTL_SIOCAIPXPRISLT = SIOCAIPXPRISLT; unsigned IOCTL_SIOCAX25ADDUID = SIOCAX25ADDUID; unsigned IOCTL_SIOCAX25DELUID = SIOCAX25DELUID; unsigned IOCTL_SIOCAX25GETPARMS = SIOCAX25GETPARMS; unsigned IOCTL_SIOCAX25GETUID = SIOCAX25GETUID; unsigned IOCTL_SIOCAX25NOUID = SIOCAX25NOUID; unsigned IOCTL_SIOCAX25SETPARMS = SIOCAX25SETPARMS; unsigned IOCTL_SIOCDEVPLIP = SIOCDEVPLIP; unsigned IOCTL_SIOCIPXCFGDATA = SIOCIPXCFGDATA; unsigned IOCTL_SIOCNRDECOBS = SIOCNRDECOBS; unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS; unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL; unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS; unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL; unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI; unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI; unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL; #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP; unsigned IOCTL_KDDISABIO = KDDISABIO; unsigned IOCTL_KDENABIO = KDENABIO; unsigned IOCTL_KDGETLED = KDGETLED; unsigned IOCTL_KDGETMODE = KDGETMODE; unsigned IOCTL_KDGKBMODE = KDGKBMODE; unsigned IOCTL_KDGKBTYPE = KDGKBTYPE; unsigned IOCTL_KDMKTONE = KDMKTONE; unsigned IOCTL_KDSETLED = KDSETLED; unsigned IOCTL_KDSETMODE = KDSETMODE; unsigned IOCTL_KDSKBMODE = KDSKBMODE; unsigned IOCTL_KIOCSOUND = KIOCSOUND; unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP; unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE; unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE; #endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID - - const int errno_EINVAL = EINVAL; -// EOWNERDEAD is not present in some older platforms. -#if defined(EOWNERDEAD) - const int errno_EOWNERDEAD = EOWNERDEAD; -#else - const int errno_EOWNERDEAD = -1; -#endif const int si_SEGV_MAPERR = SEGV_MAPERR; const int si_SEGV_ACCERR = SEGV_ACCERR; } // namespace __sanitizer using namespace __sanitizer; COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t)); COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned)); CHECK_TYPE_SIZE(pthread_key_t); #if SANITIZER_LINUX // FIXME: We define those on Linux and Mac, but only check on Linux. COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS); COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS); COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS); COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS); COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK); COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK); COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK); COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK); COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT); COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT); COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT); COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT); COMPILER_CHECK(IOC_NONE == _IOC_NONE); COMPILER_CHECK(IOC_WRITE == _IOC_WRITE); COMPILER_CHECK(IOC_READ == _IOC_READ); COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX); COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX); COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678)); COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678)); COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678)); COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678)); #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD // There are more undocumented fields in dl_phdr_info that we are not interested // in. COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info)); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum); #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID CHECK_TYPE_SIZE(glob_t); CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc); CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv); CHECK_SIZE_AND_OFFSET(glob_t, gl_offs); CHECK_SIZE_AND_OFFSET(glob_t, gl_flags); CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir); CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir); CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir); CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat); CHECK_SIZE_AND_OFFSET(glob_t, gl_stat); #endif CHECK_TYPE_SIZE(addrinfo); CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags); CHECK_SIZE_AND_OFFSET(addrinfo, ai_family); CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype); CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol); CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol); CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen); CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname); CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr); CHECK_TYPE_SIZE(hostent); CHECK_SIZE_AND_OFFSET(hostent, h_name); CHECK_SIZE_AND_OFFSET(hostent, h_aliases); CHECK_SIZE_AND_OFFSET(hostent, h_addrtype); CHECK_SIZE_AND_OFFSET(hostent, h_length); CHECK_SIZE_AND_OFFSET(hostent, h_addr_list); CHECK_TYPE_SIZE(iovec); CHECK_SIZE_AND_OFFSET(iovec, iov_base); CHECK_SIZE_AND_OFFSET(iovec, iov_len); CHECK_TYPE_SIZE(msghdr); CHECK_SIZE_AND_OFFSET(msghdr, msg_name); CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen); CHECK_SIZE_AND_OFFSET(msghdr, msg_iov); CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen); CHECK_SIZE_AND_OFFSET(msghdr, msg_control); CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen); CHECK_SIZE_AND_OFFSET(msghdr, msg_flags); CHECK_TYPE_SIZE(cmsghdr); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type); COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent)); CHECK_SIZE_AND_OFFSET(dirent, d_ino); #if SANITIZER_MAC CHECK_SIZE_AND_OFFSET(dirent, d_seekoff); #elif SANITIZER_FREEBSD // There is no 'd_off' field on FreeBSD. #else CHECK_SIZE_AND_OFFSET(dirent, d_off); #endif CHECK_SIZE_AND_OFFSET(dirent, d_reclen); #if SANITIZER_LINUX && !SANITIZER_ANDROID COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64)); CHECK_SIZE_AND_OFFSET(dirent64, d_ino); CHECK_SIZE_AND_OFFSET(dirent64, d_off); CHECK_SIZE_AND_OFFSET(dirent64, d_reclen); #endif CHECK_TYPE_SIZE(ifconf); CHECK_SIZE_AND_OFFSET(ifconf, ifc_len); CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu); CHECK_TYPE_SIZE(pollfd); CHECK_SIZE_AND_OFFSET(pollfd, fd); CHECK_SIZE_AND_OFFSET(pollfd, events); CHECK_SIZE_AND_OFFSET(pollfd, revents); CHECK_TYPE_SIZE(nfds_t); CHECK_TYPE_SIZE(sigset_t); COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction)); // Can't write checks for sa_handler and sa_sigaction due to them being // preprocessor macros. CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask); #ifndef __GLIBC_PREREQ #define __GLIBC_PREREQ(x, y) 0 #endif #if !defined(__s390x__) || __GLIBC_PREREQ (2, 20) // On s390x glibc 2.19 and earlier sa_flags was unsigned long, and sa_resv // didn't exist. CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags); #endif #if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32) CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer); #endif #if SANITIZER_LINUX CHECK_TYPE_SIZE(__sysctl_args); CHECK_SIZE_AND_OFFSET(__sysctl_args, name); CHECK_SIZE_AND_OFFSET(__sysctl_args, nlen); CHECK_SIZE_AND_OFFSET(__sysctl_args, oldval); CHECK_SIZE_AND_OFFSET(__sysctl_args, oldlenp); CHECK_SIZE_AND_OFFSET(__sysctl_args, newval); CHECK_SIZE_AND_OFFSET(__sysctl_args, newlen); CHECK_TYPE_SIZE(__kernel_uid_t); CHECK_TYPE_SIZE(__kernel_gid_t); #if SANITIZER_USES_UID16_SYSCALLS CHECK_TYPE_SIZE(__kernel_old_uid_t); CHECK_TYPE_SIZE(__kernel_old_gid_t); #endif CHECK_TYPE_SIZE(__kernel_off_t); CHECK_TYPE_SIZE(__kernel_loff_t); CHECK_TYPE_SIZE(__kernel_fd_set); #endif #if !SANITIZER_ANDROID CHECK_TYPE_SIZE(wordexp_t); CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc); CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv); CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs); #endif CHECK_TYPE_SIZE(tm); CHECK_SIZE_AND_OFFSET(tm, tm_sec); CHECK_SIZE_AND_OFFSET(tm, tm_min); CHECK_SIZE_AND_OFFSET(tm, tm_hour); CHECK_SIZE_AND_OFFSET(tm, tm_mday); CHECK_SIZE_AND_OFFSET(tm, tm_mon); CHECK_SIZE_AND_OFFSET(tm, tm_year); CHECK_SIZE_AND_OFFSET(tm, tm_wday); CHECK_SIZE_AND_OFFSET(tm, tm_yday); CHECK_SIZE_AND_OFFSET(tm, tm_isdst); CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff); CHECK_SIZE_AND_OFFSET(tm, tm_zone); #if SANITIZER_LINUX CHECK_TYPE_SIZE(mntent); CHECK_SIZE_AND_OFFSET(mntent, mnt_fsname); CHECK_SIZE_AND_OFFSET(mntent, mnt_dir); CHECK_SIZE_AND_OFFSET(mntent, mnt_type); CHECK_SIZE_AND_OFFSET(mntent, mnt_opts); CHECK_SIZE_AND_OFFSET(mntent, mnt_freq); CHECK_SIZE_AND_OFFSET(mntent, mnt_passno); #endif CHECK_TYPE_SIZE(ether_addr); #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID CHECK_TYPE_SIZE(ipc_perm); # if SANITIZER_FREEBSD CHECK_SIZE_AND_OFFSET(ipc_perm, key); CHECK_SIZE_AND_OFFSET(ipc_perm, seq); # else CHECK_SIZE_AND_OFFSET(ipc_perm, __key); CHECK_SIZE_AND_OFFSET(ipc_perm, __seq); # endif CHECK_SIZE_AND_OFFSET(ipc_perm, uid); CHECK_SIZE_AND_OFFSET(ipc_perm, gid); CHECK_SIZE_AND_OFFSET(ipc_perm, cuid); CHECK_SIZE_AND_OFFSET(ipc_perm, cgid); #if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21) /* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */ CHECK_SIZE_AND_OFFSET(ipc_perm, mode); #endif CHECK_TYPE_SIZE(shmid_ds); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid); CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch); #endif CHECK_TYPE_SIZE(clock_t); #if SANITIZER_LINUX CHECK_TYPE_SIZE(clockid_t); #endif #if !SANITIZER_ANDROID CHECK_TYPE_SIZE(ifaddrs); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask); #if SANITIZER_LINUX || SANITIZER_FREEBSD // Compare against the union, because we can't reach into the union in a // compliant way. #ifdef ifa_dstaddr #undef ifa_dstaddr #endif # if SANITIZER_FREEBSD CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr); # else COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) == sizeof(((ifaddrs *)nullptr)->ifa_ifu)); COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) == offsetof(ifaddrs, ifa_ifu)); # endif // SANITIZER_FREEBSD #else CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr); #endif // SANITIZER_LINUX CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data); #endif #if SANITIZER_LINUX COMPILER_CHECK(sizeof(__sanitizer_mallinfo) == sizeof(struct mallinfo)); #endif #if !SANITIZER_ANDROID CHECK_TYPE_SIZE(timeb); CHECK_SIZE_AND_OFFSET(timeb, time); CHECK_SIZE_AND_OFFSET(timeb, millitm); CHECK_SIZE_AND_OFFSET(timeb, timezone); CHECK_SIZE_AND_OFFSET(timeb, dstflag); #endif CHECK_TYPE_SIZE(passwd); CHECK_SIZE_AND_OFFSET(passwd, pw_name); CHECK_SIZE_AND_OFFSET(passwd, pw_passwd); CHECK_SIZE_AND_OFFSET(passwd, pw_uid); CHECK_SIZE_AND_OFFSET(passwd, pw_gid); CHECK_SIZE_AND_OFFSET(passwd, pw_dir); CHECK_SIZE_AND_OFFSET(passwd, pw_shell); #if !SANITIZER_ANDROID CHECK_SIZE_AND_OFFSET(passwd, pw_gecos); #endif #if SANITIZER_MAC CHECK_SIZE_AND_OFFSET(passwd, pw_change); CHECK_SIZE_AND_OFFSET(passwd, pw_expire); CHECK_SIZE_AND_OFFSET(passwd, pw_class); #endif CHECK_TYPE_SIZE(group); CHECK_SIZE_AND_OFFSET(group, gr_name); CHECK_SIZE_AND_OFFSET(group, gr_passwd); CHECK_SIZE_AND_OFFSET(group, gr_gid); CHECK_SIZE_AND_OFFSET(group, gr_mem); #if HAVE_RPC_XDR_H || HAVE_TIRPC_RPC_XDR_H CHECK_TYPE_SIZE(XDR); CHECK_SIZE_AND_OFFSET(XDR, x_op); CHECK_SIZE_AND_OFFSET(XDR, x_ops); CHECK_SIZE_AND_OFFSET(XDR, x_public); CHECK_SIZE_AND_OFFSET(XDR, x_private); CHECK_SIZE_AND_OFFSET(XDR, x_base); CHECK_SIZE_AND_OFFSET(XDR, x_handy); COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE); COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE); COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE); #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE)); CHECK_SIZE_AND_OFFSET(FILE, _flags); CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr); CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end); CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base); CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr); CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end); CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base); CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base); CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end); CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base); CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base); CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end); CHECK_SIZE_AND_OFFSET(FILE, _markers); CHECK_SIZE_AND_OFFSET(FILE, _chain); CHECK_SIZE_AND_OFFSET(FILE, _fileno); #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk)); CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit); CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev); CHECK_TYPE_SIZE(obstack); CHECK_SIZE_AND_OFFSET(obstack, chunk_size); CHECK_SIZE_AND_OFFSET(obstack, chunk); CHECK_SIZE_AND_OFFSET(obstack, object_base); CHECK_SIZE_AND_OFFSET(obstack, next_free); CHECK_TYPE_SIZE(cookie_io_functions_t); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close); #endif #if SANITIZER_LINUX || SANITIZER_FREEBSD CHECK_TYPE_SIZE(sem_t); #endif #if SANITIZER_LINUX && defined(__arm__) COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN); #endif #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h (revision 320961) @@ -1,1491 +1,1488 @@ //===-- sanitizer_platform_limits_posix.h ---------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of Sanitizer common code. // // Sizes and layouts of platform-specific POSIX data structures. //===----------------------------------------------------------------------===// #ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H #define SANITIZER_PLATFORM_LIMITS_POSIX_H #include "sanitizer_internal_defs.h" #include "sanitizer_platform.h" #if SANITIZER_FREEBSD // FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that // incorporates the map structure. # define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544))) // Get sys/_types.h, because that tells us whether 64-bit inodes are // used in struct dirent below. #include #else # define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle)) #endif // !SANITIZER_FREEBSD #ifndef __GLIBC_PREREQ #define __GLIBC_PREREQ(x, y) 0 #endif namespace __sanitizer { extern unsigned struct_utsname_sz; extern unsigned struct_stat_sz; #if !SANITIZER_FREEBSD && !SANITIZER_IOS extern unsigned struct_stat64_sz; #endif extern unsigned struct_rusage_sz; extern unsigned siginfo_t_sz; extern unsigned struct_itimerval_sz; extern unsigned pthread_t_sz; extern unsigned pthread_cond_t_sz; extern unsigned pid_t_sz; extern unsigned timeval_sz; extern unsigned uid_t_sz; extern unsigned gid_t_sz; extern unsigned mbstate_t_sz; extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; #if !SANITIZER_ANDROID extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; extern unsigned ucontext_t_sz; #endif // !SANITIZER_ANDROID #if SANITIZER_LINUX #if defined(__x86_64__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 0; #elif defined(__i386__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 96; #elif defined(__arm__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; #elif defined(__aarch64__) const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 104; #elif defined(__powerpc__) && !defined(__powerpc64__) const unsigned struct_kernel_stat_sz = 72; const unsigned struct_kernel_stat64_sz = 104; #elif defined(__powerpc64__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 104; #elif defined(__mips__) const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) : FIRST_32_SECOND_64(160, 216); const unsigned struct_kernel_stat64_sz = 104; #elif defined(__s390__) && !defined(__s390x__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; #elif defined(__s390x__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 0; #elif defined(__sparc__) && defined(__arch64__) const unsigned struct___old_kernel_stat_sz = 0; const unsigned struct_kernel_stat_sz = 104; const unsigned struct_kernel_stat64_sz = 144; #elif defined(__sparc__) && !defined(__arch64__) const unsigned struct___old_kernel_stat_sz = 0; const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; #endif struct __sanitizer_perf_event_attr { unsigned type; unsigned size; // More fields that vary with the kernel version. }; extern unsigned struct_epoll_event_sz; extern unsigned struct_sysinfo_sz; extern unsigned __user_cap_header_struct_sz; extern unsigned __user_cap_data_struct_sz; extern unsigned struct_new_utsname_sz; extern unsigned struct_old_utsname_sz; extern unsigned struct_oldold_utsname_sz; const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long); #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD #if defined(__powerpc64__) || defined(__s390__) const unsigned struct___old_kernel_stat_sz = 0; #elif !defined(__sparc__) const unsigned struct___old_kernel_stat_sz = 32; #endif extern unsigned struct_rlimit_sz; extern unsigned struct_utimbuf_sz; extern unsigned struct_timespec_sz; struct __sanitizer_iocb { u64 aio_data; u32 aio_key_or_aio_reserved1; // Simply crazy. u32 aio_reserved1_or_aio_key; // Luckily, we don't need these. u16 aio_lio_opcode; s16 aio_reqprio; u32 aio_fildes; u64 aio_buf; u64 aio_nbytes; s64 aio_offset; u64 aio_reserved2; u64 aio_reserved3; }; struct __sanitizer_io_event { u64 data; u64 obj; u64 res; u64 res2; }; const unsigned iocb_cmd_pread = 0; const unsigned iocb_cmd_pwrite = 1; const unsigned iocb_cmd_preadv = 7; const unsigned iocb_cmd_pwritev = 8; struct __sanitizer___sysctl_args { int *name; int nlen; void *oldval; uptr *oldlenp; void *newval; uptr newlen; unsigned long ___unused[4]; }; const unsigned old_sigset_t_sz = sizeof(unsigned long); struct __sanitizer_sem_t { #if SANITIZER_ANDROID && defined(_LP64) int data[4]; #elif SANITIZER_ANDROID && !defined(_LP64) int data; #elif SANITIZER_LINUX uptr data[4]; #elif SANITIZER_FREEBSD u32 data[4]; #endif }; #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_ANDROID struct __sanitizer_mallinfo { uptr v[10]; }; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_mallinfo { int v[10]; }; extern unsigned struct_ustat_sz; extern unsigned struct_rlimit64_sz; extern unsigned struct_statvfs64_sz; struct __sanitizer_ipc_perm { int __key; int uid; int gid; int cuid; int cgid; #ifdef __powerpc__ unsigned mode; unsigned __seq; u64 __unused1; u64 __unused2; #elif defined(__sparc__) #if defined(__arch64__) unsigned mode; unsigned short __pad1; #else unsigned short __pad1; unsigned short mode; unsigned short __pad2; #endif unsigned short __seq; unsigned long long __unused1; unsigned long long __unused2; #elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__) unsigned int mode; unsigned short __seq; unsigned short __pad1; unsigned long __unused1; unsigned long __unused2; #else unsigned short mode; unsigned short __pad1; unsigned short __seq; unsigned short __pad2; #if defined(__x86_64__) && !defined(_LP64) u64 __unused1; u64 __unused2; #else unsigned long __unused1; unsigned long __unused2; #endif #endif }; struct __sanitizer_shmid_ds { __sanitizer_ipc_perm shm_perm; #if defined(__sparc__) #if !defined(__arch64__) u32 __pad1; #endif long shm_atime; #if !defined(__arch64__) u32 __pad2; #endif long shm_dtime; #if !defined(__arch64__) u32 __pad3; #endif long shm_ctime; uptr shm_segsz; int shm_cpid; int shm_lpid; unsigned long shm_nattch; unsigned long __glibc_reserved1; unsigned long __glibc_reserved2; #else #ifndef __powerpc__ uptr shm_segsz; #elif !defined(__powerpc64__) uptr __unused0; #endif #if defined(__x86_64__) && !defined(_LP64) u64 shm_atime; u64 shm_dtime; u64 shm_ctime; #else uptr shm_atime; #if !defined(_LP64) && !defined(__mips__) uptr __unused1; #endif uptr shm_dtime; #if !defined(_LP64) && !defined(__mips__) uptr __unused2; #endif uptr shm_ctime; #if !defined(_LP64) && !defined(__mips__) uptr __unused3; #endif #endif #ifdef __powerpc__ uptr shm_segsz; #endif int shm_cpid; int shm_lpid; #if defined(__x86_64__) && !defined(_LP64) u64 shm_nattch; u64 __unused4; u64 __unused5; #else uptr shm_nattch; uptr __unused4; uptr __unused5; #endif #endif }; #elif SANITIZER_FREEBSD struct __sanitizer_ipc_perm { unsigned int cuid; unsigned int cgid; unsigned int uid; unsigned int gid; unsigned short mode; unsigned short seq; long key; }; struct __sanitizer_shmid_ds { __sanitizer_ipc_perm shm_perm; unsigned long shm_segsz; unsigned int shm_lpid; unsigned int shm_cpid; int shm_nattch; unsigned long shm_atime; unsigned long shm_dtime; unsigned long shm_ctime; }; #endif #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID extern unsigned struct_msqid_ds_sz; extern unsigned struct_mq_attr_sz; extern unsigned struct_timex_sz; extern unsigned struct_statvfs_sz; #endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID struct __sanitizer_iovec { void *iov_base; uptr iov_len; }; #if !SANITIZER_ANDROID struct __sanitizer_ifaddrs { struct __sanitizer_ifaddrs *ifa_next; char *ifa_name; unsigned int ifa_flags; void *ifa_addr; // (struct sockaddr *) void *ifa_netmask; // (struct sockaddr *) // This is a union on Linux. # ifdef ifa_dstaddr # undef ifa_dstaddr # endif void *ifa_dstaddr; // (struct sockaddr *) void *ifa_data; }; #endif // !SANITIZER_ANDROID #if SANITIZER_MAC typedef unsigned long __sanitizer_pthread_key_t; #else typedef unsigned __sanitizer_pthread_key_t; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_XDR { int x_op; void *x_ops; uptr x_public; uptr x_private; uptr x_base; unsigned x_handy; }; const int __sanitizer_XDR_ENCODE = 0; const int __sanitizer_XDR_DECODE = 1; const int __sanitizer_XDR_FREE = 2; #endif struct __sanitizer_passwd { char *pw_name; char *pw_passwd; int pw_uid; int pw_gid; #if SANITIZER_MAC || SANITIZER_FREEBSD long pw_change; char *pw_class; #endif #if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)) char *pw_gecos; #endif char *pw_dir; char *pw_shell; #if SANITIZER_MAC || SANITIZER_FREEBSD long pw_expire; #endif #if SANITIZER_FREEBSD int pw_fields; #endif }; struct __sanitizer_group { char *gr_name; char *gr_passwd; int gr_gid; char **gr_mem; }; #if defined(__x86_64__) && !defined(_LP64) typedef long long __sanitizer_time_t; #else typedef long __sanitizer_time_t; #endif struct __sanitizer_timeb { __sanitizer_time_t time; unsigned short millitm; short timezone; short dstflag; }; struct __sanitizer_ether_addr { u8 octet[6]; }; struct __sanitizer_tm { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; int tm_year; int tm_wday; int tm_yday; int tm_isdst; long int tm_gmtoff; const char *tm_zone; }; #if SANITIZER_LINUX struct __sanitizer_mntent { char *mnt_fsname; char *mnt_dir; char *mnt_type; char *mnt_opts; int mnt_freq; int mnt_passno; }; #endif #if SANITIZER_MAC || SANITIZER_FREEBSD struct __sanitizer_msghdr { void *msg_name; unsigned msg_namelen; struct __sanitizer_iovec *msg_iov; unsigned msg_iovlen; void *msg_control; unsigned msg_controllen; int msg_flags; }; struct __sanitizer_cmsghdr { unsigned cmsg_len; int cmsg_level; int cmsg_type; }; #else struct __sanitizer_msghdr { void *msg_name; unsigned msg_namelen; struct __sanitizer_iovec *msg_iov; uptr msg_iovlen; void *msg_control; uptr msg_controllen; int msg_flags; }; struct __sanitizer_cmsghdr { uptr cmsg_len; int cmsg_level; int cmsg_type; }; #endif #if SANITIZER_MAC struct __sanitizer_dirent { unsigned long long d_ino; unsigned long long d_seekoff; unsigned short d_reclen; // more fields that we don't care about }; #elif SANITIZER_FREEBSD struct __sanitizer_dirent { #if defined(__INO64) unsigned long long d_fileno; unsigned long long d_off; #else unsigned int d_fileno; #endif unsigned short d_reclen; // more fields that we don't care about }; #elif SANITIZER_ANDROID || defined(__x86_64__) struct __sanitizer_dirent { unsigned long long d_ino; unsigned long long d_off; unsigned short d_reclen; // more fields that we don't care about }; #else struct __sanitizer_dirent { uptr d_ino; uptr d_off; unsigned short d_reclen; // more fields that we don't care about }; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_dirent64 { unsigned long long d_ino; unsigned long long d_off; unsigned short d_reclen; // more fields that we don't care about }; #endif // 'clock_t' is 32 bits wide on x64 FreeBSD #if SANITIZER_FREEBSD typedef int __sanitizer_clock_t; #elif defined(__x86_64__) && !defined(_LP64) typedef long long __sanitizer_clock_t; #else typedef long __sanitizer_clock_t; #endif #if SANITIZER_LINUX typedef int __sanitizer_clockid_t; #endif #if SANITIZER_LINUX || SANITIZER_FREEBSD #if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\ || defined(__mips__) typedef unsigned __sanitizer___kernel_uid_t; typedef unsigned __sanitizer___kernel_gid_t; #else typedef unsigned short __sanitizer___kernel_uid_t; typedef unsigned short __sanitizer___kernel_gid_t; #endif #if defined(__x86_64__) && !defined(_LP64) typedef long long __sanitizer___kernel_off_t; #else typedef long __sanitizer___kernel_off_t; #endif #if defined(__powerpc__) || defined(__mips__) typedef unsigned int __sanitizer___kernel_old_uid_t; typedef unsigned int __sanitizer___kernel_old_gid_t; #else typedef unsigned short __sanitizer___kernel_old_uid_t; typedef unsigned short __sanitizer___kernel_old_gid_t; #endif typedef long long __sanitizer___kernel_loff_t; typedef struct { unsigned long fds_bits[1024 / (8 * sizeof(long))]; } __sanitizer___kernel_fd_set; #endif // This thing depends on the platform. We are only interested in the upper // limit. Verified with a compiler assert in .cc. const int pthread_attr_t_max_sz = 128; union __sanitizer_pthread_attr_t { char size[pthread_attr_t_max_sz]; // NOLINT void *align; }; #if SANITIZER_ANDROID # if SANITIZER_MIPS typedef unsigned long __sanitizer_sigset_t[16/sizeof(unsigned long)]; # else typedef unsigned long __sanitizer_sigset_t; # endif #elif SANITIZER_MAC typedef unsigned __sanitizer_sigset_t; #elif SANITIZER_LINUX struct __sanitizer_sigset_t { // The size is determined by looking at sizeof of real sigset_t on linux. uptr val[128 / sizeof(uptr)]; }; #elif SANITIZER_FREEBSD struct __sanitizer_sigset_t { // uint32_t * 4 unsigned int __bits[4]; }; #endif // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros. #if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64) struct __sanitizer_sigaction { unsigned sa_flags; union { void (*sigaction)(int sig, void *siginfo, void *uctx); void (*handler)(int sig); }; __sanitizer_sigset_t sa_mask; void (*sa_restorer)(); }; #elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32 struct __sanitizer_sigaction { unsigned sa_flags; union { void (*sigaction)(int sig, void *siginfo, void *uctx); void (*handler)(int sig); }; __sanitizer_sigset_t sa_mask; }; #elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32) struct __sanitizer_sigaction { union { void (*sigaction)(int sig, void *siginfo, void *uctx); void (*handler)(int sig); }; __sanitizer_sigset_t sa_mask; uptr sa_flags; void (*sa_restorer)(); }; #else // !SANITIZER_ANDROID struct __sanitizer_sigaction { #if defined(__mips__) && !SANITIZER_FREEBSD unsigned int sa_flags; #endif union { void (*sigaction)(int sig, void *siginfo, void *uctx); void (*handler)(int sig); }; #if SANITIZER_FREEBSD int sa_flags; __sanitizer_sigset_t sa_mask; #else #if defined(__s390x__) int sa_resv; #else __sanitizer_sigset_t sa_mask; #endif #ifndef __mips__ #if defined(__sparc__) #if __GLIBC_PREREQ (2, 20) // On sparc glibc 2.19 and earlier sa_flags was unsigned long. #if defined(__arch64__) // To maintain ABI compatibility on sparc64 when switching to an int, // __glibc_reserved0 was added. int __glibc_reserved0; #endif int sa_flags; #else unsigned long sa_flags; #endif #else int sa_flags; #endif #endif #endif #if SANITIZER_LINUX void (*sa_restorer)(); #endif #if defined(__mips__) && (SANITIZER_WORDSIZE == 32) int sa_resv[1]; #endif #if defined(__s390x__) __sanitizer_sigset_t sa_mask; #endif }; #endif // !SANITIZER_ANDROID #if SANITIZER_FREEBSD typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; #elif defined(__mips__) struct __sanitizer_kernel_sigset_t { uptr sig[2]; }; #else struct __sanitizer_kernel_sigset_t { u8 sig[8]; }; #endif // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros. #if SANITIZER_MIPS struct __sanitizer_kernel_sigaction_t { unsigned int sa_flags; union { void (*handler)(int signo); void (*sigaction)(int signo, void *info, void *ctx); }; __sanitizer_kernel_sigset_t sa_mask; void (*sa_restorer)(void); }; #else struct __sanitizer_kernel_sigaction_t { union { void (*handler)(int signo); void (*sigaction)(int signo, void *info, void *ctx); }; unsigned long sa_flags; void (*sa_restorer)(void); __sanitizer_kernel_sigset_t sa_mask; }; #endif extern uptr sig_ign; extern uptr sig_dfl; extern uptr sa_siginfo; #if SANITIZER_LINUX extern int e_tabsz; #endif extern int af_inet; extern int af_inet6; uptr __sanitizer_in_addr_sz(int af); #if SANITIZER_LINUX || SANITIZER_FREEBSD struct __sanitizer_dl_phdr_info { uptr dlpi_addr; const char *dlpi_name; const void *dlpi_phdr; short dlpi_phnum; }; extern unsigned struct_ElfW_Phdr_sz; #endif struct __sanitizer_addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; #if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD unsigned ai_addrlen; char *ai_canonname; void *ai_addr; #else // LINUX unsigned ai_addrlen; void *ai_addr; char *ai_canonname; #endif struct __sanitizer_addrinfo *ai_next; }; struct __sanitizer_hostent { char *h_name; char **h_aliases; int h_addrtype; int h_length; char **h_addr_list; }; struct __sanitizer_pollfd { int fd; short events; short revents; }; #if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD typedef unsigned __sanitizer_nfds_t; #else typedef unsigned long __sanitizer_nfds_t; #endif #if !SANITIZER_ANDROID # if SANITIZER_LINUX struct __sanitizer_glob_t { uptr gl_pathc; char **gl_pathv; uptr gl_offs; int gl_flags; void (*gl_closedir)(void *dirp); void *(*gl_readdir)(void *dirp); void *(*gl_opendir)(const char *); int (*gl_lstat)(const char *, void *); int (*gl_stat)(const char *, void *); }; # elif SANITIZER_FREEBSD struct __sanitizer_glob_t { uptr gl_pathc; uptr gl_matchc; uptr gl_offs; int gl_flags; char **gl_pathv; int (*gl_errfunc)(const char*, int); void (*gl_closedir)(void *dirp); struct dirent *(*gl_readdir)(void *dirp); void *(*gl_opendir)(const char*); int (*gl_lstat)(const char*, void* /* struct stat* */); int (*gl_stat)(const char*, void* /* struct stat* */); }; # endif // SANITIZER_FREEBSD # if SANITIZER_LINUX || SANITIZER_FREEBSD extern int glob_nomatch; extern int glob_altdirfunc; # endif #endif // !SANITIZER_ANDROID extern unsigned path_max; struct __sanitizer_wordexp_t { uptr we_wordc; char **we_wordv; uptr we_offs; #if SANITIZER_FREEBSD char *we_strings; uptr we_nbytes; #endif }; #if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_FILE { int _flags; char *_IO_read_ptr; char *_IO_read_end; char *_IO_read_base; char *_IO_write_base; char *_IO_write_ptr; char *_IO_write_end; char *_IO_buf_base; char *_IO_buf_end; char *_IO_save_base; char *_IO_backup_base; char *_IO_save_end; void *_markers; __sanitizer_FILE *_chain; int _fileno; }; # define SANITIZER_HAS_STRUCT_FILE 1 #else typedef void __sanitizer_FILE; # define SANITIZER_HAS_STRUCT_FILE 0 #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ defined(__s390__)) extern unsigned struct_user_regs_struct_sz; extern unsigned struct_user_fpregs_struct_sz; extern unsigned struct_user_fpxregs_struct_sz; extern unsigned struct_user_vfpregs_struct_sz; extern int ptrace_peektext; extern int ptrace_peekdata; extern int ptrace_peekuser; extern int ptrace_getregs; extern int ptrace_setregs; extern int ptrace_getfpregs; extern int ptrace_setfpregs; extern int ptrace_getfpxregs; extern int ptrace_setfpxregs; extern int ptrace_getvfpregs; extern int ptrace_setvfpregs; extern int ptrace_getsiginfo; extern int ptrace_setsiginfo; extern int ptrace_getregset; extern int ptrace_setregset; extern int ptrace_geteventmsg; #endif #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID extern unsigned struct_shminfo_sz; extern unsigned struct_shm_info_sz; extern int shmctl_ipc_stat; extern int shmctl_ipc_info; extern int shmctl_shm_info; extern int shmctl_shm_stat; #endif #if !SANITIZER_MAC && !SANITIZER_FREEBSD extern unsigned struct_utmp_sz; #endif #if !SANITIZER_ANDROID extern unsigned struct_utmpx_sz; #endif extern int map_fixed; // ioctl arguments struct __sanitizer_ifconf { int ifc_len; union { void *ifcu_req; } ifc_ifcu; #if SANITIZER_MAC } __attribute__((packed)); #else }; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer__obstack_chunk { char *limit; struct __sanitizer__obstack_chunk *prev; }; struct __sanitizer_obstack { long chunk_size; struct __sanitizer__obstack_chunk *chunk; char *object_base; char *next_free; uptr more_fields[7]; }; typedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size); typedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf, uptr size); typedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset, int whence); typedef int (*__sanitizer_cookie_io_close)(void *cookie); struct __sanitizer_cookie_io_functions_t { __sanitizer_cookie_io_read read; __sanitizer_cookie_io_write write; __sanitizer_cookie_io_seek seek; __sanitizer_cookie_io_close close; }; #endif #define IOC_NRBITS 8 #define IOC_TYPEBITS 8 #if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \ defined(__sparc__) #define IOC_SIZEBITS 13 #define IOC_DIRBITS 3 #define IOC_NONE 1U #define IOC_WRITE 4U #define IOC_READ 2U #else #define IOC_SIZEBITS 14 #define IOC_DIRBITS 2 #define IOC_NONE 0U #define IOC_WRITE 1U #define IOC_READ 2U #endif #define IOC_NRMASK ((1 << IOC_NRBITS) - 1) #define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1) #define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1) #if defined(IOC_DIRMASK) #undef IOC_DIRMASK #endif #define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1) #define IOC_NRSHIFT 0 #define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS) #define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS) #define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS) #define EVIOC_EV_MAX 0x1f #define EVIOC_ABS_MAX 0x3f #define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK) #define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK) #define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) #if defined(__sparc__) // In sparc the 14 bits SIZE field overlaps with the // least significant bit of DIR, so either IOC_READ or // IOC_WRITE shall be 1 in order to get a non-zero SIZE. #define IOC_SIZE(nr) \ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff)) #else #define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) #endif extern unsigned struct_ifreq_sz; extern unsigned struct_termios_sz; extern unsigned struct_winsize_sz; #if SANITIZER_LINUX extern unsigned struct_arpreq_sz; extern unsigned struct_cdrom_msf_sz; extern unsigned struct_cdrom_multisession_sz; extern unsigned struct_cdrom_read_audio_sz; extern unsigned struct_cdrom_subchnl_sz; extern unsigned struct_cdrom_ti_sz; extern unsigned struct_cdrom_tocentry_sz; extern unsigned struct_cdrom_tochdr_sz; extern unsigned struct_cdrom_volctrl_sz; extern unsigned struct_ff_effect_sz; extern unsigned struct_floppy_drive_params_sz; extern unsigned struct_floppy_drive_struct_sz; extern unsigned struct_floppy_fdc_state_sz; extern unsigned struct_floppy_max_errors_sz; extern unsigned struct_floppy_raw_cmd_sz; extern unsigned struct_floppy_struct_sz; extern unsigned struct_floppy_write_errors_sz; extern unsigned struct_format_descr_sz; extern unsigned struct_hd_driveid_sz; extern unsigned struct_hd_geometry_sz; extern unsigned struct_input_absinfo_sz; extern unsigned struct_input_id_sz; extern unsigned struct_mtpos_sz; extern unsigned struct_termio_sz; extern unsigned struct_vt_consize_sz; extern unsigned struct_vt_sizes_sz; extern unsigned struct_vt_stat_sz; #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD extern unsigned struct_copr_buffer_sz; extern unsigned struct_copr_debug_buf_sz; extern unsigned struct_copr_msg_sz; extern unsigned struct_midi_info_sz; extern unsigned struct_mtget_sz; extern unsigned struct_mtop_sz; extern unsigned struct_rtentry_sz; extern unsigned struct_sbi_instrument_sz; extern unsigned struct_seq_event_rec_sz; extern unsigned struct_synth_info_sz; extern unsigned struct_vt_mode_sz; #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_ax25_parms_struct_sz; extern unsigned struct_cyclades_monitor_sz; extern unsigned struct_input_keymap_entry_sz; extern unsigned struct_ipx_config_data_sz; extern unsigned struct_kbdiacrs_sz; extern unsigned struct_kbentry_sz; extern unsigned struct_kbkeycode_sz; extern unsigned struct_kbsentry_sz; extern unsigned struct_mtconfiginfo_sz; extern unsigned struct_nr_parms_struct_sz; extern unsigned struct_scc_modem_sz; extern unsigned struct_scc_stat_sz; extern unsigned struct_serial_multiport_struct_sz; extern unsigned struct_serial_struct_sz; extern unsigned struct_sockaddr_ax25_sz; extern unsigned struct_unimapdesc_sz; extern unsigned struct_unimapinit_sz; #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID extern unsigned struct_audio_buf_info_sz; extern unsigned struct_ppp_stats_sz; #endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #if !SANITIZER_ANDROID && !SANITIZER_MAC extern unsigned struct_sioc_sg_req_sz; extern unsigned struct_sioc_vif_req_sz; #endif // ioctl request identifiers // A special value to mark ioctls that are not present on the target platform, // when it can not be determined without including any system headers. extern const unsigned IOCTL_NOT_PRESENT; extern unsigned IOCTL_FIOASYNC; extern unsigned IOCTL_FIOCLEX; extern unsigned IOCTL_FIOGETOWN; extern unsigned IOCTL_FIONBIO; extern unsigned IOCTL_FIONCLEX; extern unsigned IOCTL_FIOSETOWN; extern unsigned IOCTL_SIOCADDMULTI; extern unsigned IOCTL_SIOCATMARK; extern unsigned IOCTL_SIOCDELMULTI; extern unsigned IOCTL_SIOCGIFADDR; extern unsigned IOCTL_SIOCGIFBRDADDR; extern unsigned IOCTL_SIOCGIFCONF; extern unsigned IOCTL_SIOCGIFDSTADDR; extern unsigned IOCTL_SIOCGIFFLAGS; extern unsigned IOCTL_SIOCGIFMETRIC; extern unsigned IOCTL_SIOCGIFMTU; extern unsigned IOCTL_SIOCGIFNETMASK; extern unsigned IOCTL_SIOCGPGRP; extern unsigned IOCTL_SIOCSIFADDR; extern unsigned IOCTL_SIOCSIFBRDADDR; extern unsigned IOCTL_SIOCSIFDSTADDR; extern unsigned IOCTL_SIOCSIFFLAGS; extern unsigned IOCTL_SIOCSIFMETRIC; extern unsigned IOCTL_SIOCSIFMTU; extern unsigned IOCTL_SIOCSIFNETMASK; extern unsigned IOCTL_SIOCSPGRP; extern unsigned IOCTL_TIOCCONS; extern unsigned IOCTL_TIOCEXCL; extern unsigned IOCTL_TIOCGETD; extern unsigned IOCTL_TIOCGPGRP; extern unsigned IOCTL_TIOCGWINSZ; extern unsigned IOCTL_TIOCMBIC; extern unsigned IOCTL_TIOCMBIS; extern unsigned IOCTL_TIOCMGET; extern unsigned IOCTL_TIOCMSET; extern unsigned IOCTL_TIOCNOTTY; extern unsigned IOCTL_TIOCNXCL; extern unsigned IOCTL_TIOCOUTQ; extern unsigned IOCTL_TIOCPKT; extern unsigned IOCTL_TIOCSCTTY; extern unsigned IOCTL_TIOCSETD; extern unsigned IOCTL_TIOCSPGRP; extern unsigned IOCTL_TIOCSTI; extern unsigned IOCTL_TIOCSWINSZ; #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID extern unsigned IOCTL_SIOCGETSGCNT; extern unsigned IOCTL_SIOCGETVIFCNT; #endif #if SANITIZER_LINUX extern unsigned IOCTL_EVIOCGABS; extern unsigned IOCTL_EVIOCGBIT; extern unsigned IOCTL_EVIOCGEFFECTS; extern unsigned IOCTL_EVIOCGID; extern unsigned IOCTL_EVIOCGKEY; extern unsigned IOCTL_EVIOCGKEYCODE; extern unsigned IOCTL_EVIOCGLED; extern unsigned IOCTL_EVIOCGNAME; extern unsigned IOCTL_EVIOCGPHYS; extern unsigned IOCTL_EVIOCGRAB; extern unsigned IOCTL_EVIOCGREP; extern unsigned IOCTL_EVIOCGSND; extern unsigned IOCTL_EVIOCGSW; extern unsigned IOCTL_EVIOCGUNIQ; extern unsigned IOCTL_EVIOCGVERSION; extern unsigned IOCTL_EVIOCRMFF; extern unsigned IOCTL_EVIOCSABS; extern unsigned IOCTL_EVIOCSFF; extern unsigned IOCTL_EVIOCSKEYCODE; extern unsigned IOCTL_EVIOCSREP; extern unsigned IOCTL_BLKFLSBUF; extern unsigned IOCTL_BLKGETSIZE; extern unsigned IOCTL_BLKRAGET; extern unsigned IOCTL_BLKRASET; extern unsigned IOCTL_BLKROGET; extern unsigned IOCTL_BLKROSET; extern unsigned IOCTL_BLKRRPART; extern unsigned IOCTL_CDROMAUDIOBUFSIZ; extern unsigned IOCTL_CDROMEJECT; extern unsigned IOCTL_CDROMEJECT_SW; extern unsigned IOCTL_CDROMMULTISESSION; extern unsigned IOCTL_CDROMPAUSE; extern unsigned IOCTL_CDROMPLAYMSF; extern unsigned IOCTL_CDROMPLAYTRKIND; extern unsigned IOCTL_CDROMREADAUDIO; extern unsigned IOCTL_CDROMREADCOOKED; extern unsigned IOCTL_CDROMREADMODE1; extern unsigned IOCTL_CDROMREADMODE2; extern unsigned IOCTL_CDROMREADRAW; extern unsigned IOCTL_CDROMREADTOCENTRY; extern unsigned IOCTL_CDROMREADTOCHDR; extern unsigned IOCTL_CDROMRESET; extern unsigned IOCTL_CDROMRESUME; extern unsigned IOCTL_CDROMSEEK; extern unsigned IOCTL_CDROMSTART; extern unsigned IOCTL_CDROMSTOP; extern unsigned IOCTL_CDROMSUBCHNL; extern unsigned IOCTL_CDROMVOLCTRL; extern unsigned IOCTL_CDROMVOLREAD; extern unsigned IOCTL_CDROM_GET_UPC; extern unsigned IOCTL_FDCLRPRM; extern unsigned IOCTL_FDDEFPRM; extern unsigned IOCTL_FDFLUSH; extern unsigned IOCTL_FDFMTBEG; extern unsigned IOCTL_FDFMTEND; extern unsigned IOCTL_FDFMTTRK; extern unsigned IOCTL_FDGETDRVPRM; extern unsigned IOCTL_FDGETDRVSTAT; extern unsigned IOCTL_FDGETDRVTYP; extern unsigned IOCTL_FDGETFDCSTAT; extern unsigned IOCTL_FDGETMAXERRS; extern unsigned IOCTL_FDGETPRM; extern unsigned IOCTL_FDMSGOFF; extern unsigned IOCTL_FDMSGON; extern unsigned IOCTL_FDPOLLDRVSTAT; extern unsigned IOCTL_FDRAWCMD; extern unsigned IOCTL_FDRESET; extern unsigned IOCTL_FDSETDRVPRM; extern unsigned IOCTL_FDSETEMSGTRESH; extern unsigned IOCTL_FDSETMAXERRS; extern unsigned IOCTL_FDSETPRM; extern unsigned IOCTL_FDTWADDLE; extern unsigned IOCTL_FDWERRORCLR; extern unsigned IOCTL_FDWERRORGET; extern unsigned IOCTL_HDIO_DRIVE_CMD; extern unsigned IOCTL_HDIO_GETGEO; extern unsigned IOCTL_HDIO_GET_32BIT; extern unsigned IOCTL_HDIO_GET_DMA; extern unsigned IOCTL_HDIO_GET_IDENTITY; extern unsigned IOCTL_HDIO_GET_KEEPSETTINGS; extern unsigned IOCTL_HDIO_GET_MULTCOUNT; extern unsigned IOCTL_HDIO_GET_NOWERR; extern unsigned IOCTL_HDIO_GET_UNMASKINTR; extern unsigned IOCTL_HDIO_SET_32BIT; extern unsigned IOCTL_HDIO_SET_DMA; extern unsigned IOCTL_HDIO_SET_KEEPSETTINGS; extern unsigned IOCTL_HDIO_SET_MULTCOUNT; extern unsigned IOCTL_HDIO_SET_NOWERR; extern unsigned IOCTL_HDIO_SET_UNMASKINTR; extern unsigned IOCTL_MTIOCPOS; extern unsigned IOCTL_PPPIOCGASYNCMAP; extern unsigned IOCTL_PPPIOCGDEBUG; extern unsigned IOCTL_PPPIOCGFLAGS; extern unsigned IOCTL_PPPIOCGUNIT; extern unsigned IOCTL_PPPIOCGXASYNCMAP; extern unsigned IOCTL_PPPIOCSASYNCMAP; extern unsigned IOCTL_PPPIOCSDEBUG; extern unsigned IOCTL_PPPIOCSFLAGS; extern unsigned IOCTL_PPPIOCSMAXCID; extern unsigned IOCTL_PPPIOCSMRU; extern unsigned IOCTL_PPPIOCSXASYNCMAP; extern unsigned IOCTL_SIOCDARP; extern unsigned IOCTL_SIOCDRARP; extern unsigned IOCTL_SIOCGARP; extern unsigned IOCTL_SIOCGIFENCAP; extern unsigned IOCTL_SIOCGIFHWADDR; extern unsigned IOCTL_SIOCGIFMAP; extern unsigned IOCTL_SIOCGIFMEM; extern unsigned IOCTL_SIOCGIFNAME; extern unsigned IOCTL_SIOCGIFSLAVE; extern unsigned IOCTL_SIOCGRARP; extern unsigned IOCTL_SIOCGSTAMP; extern unsigned IOCTL_SIOCSARP; extern unsigned IOCTL_SIOCSIFENCAP; extern unsigned IOCTL_SIOCSIFHWADDR; extern unsigned IOCTL_SIOCSIFLINK; extern unsigned IOCTL_SIOCSIFMAP; extern unsigned IOCTL_SIOCSIFMEM; extern unsigned IOCTL_SIOCSIFSLAVE; extern unsigned IOCTL_SIOCSRARP; extern unsigned IOCTL_SNDCTL_COPR_HALT; extern unsigned IOCTL_SNDCTL_COPR_LOAD; extern unsigned IOCTL_SNDCTL_COPR_RCODE; extern unsigned IOCTL_SNDCTL_COPR_RCVMSG; extern unsigned IOCTL_SNDCTL_COPR_RDATA; extern unsigned IOCTL_SNDCTL_COPR_RESET; extern unsigned IOCTL_SNDCTL_COPR_RUN; extern unsigned IOCTL_SNDCTL_COPR_SENDMSG; extern unsigned IOCTL_SNDCTL_COPR_WCODE; extern unsigned IOCTL_SNDCTL_COPR_WDATA; extern unsigned IOCTL_TCFLSH; extern unsigned IOCTL_TCGETA; extern unsigned IOCTL_TCGETS; extern unsigned IOCTL_TCSBRK; extern unsigned IOCTL_TCSBRKP; extern unsigned IOCTL_TCSETA; extern unsigned IOCTL_TCSETAF; extern unsigned IOCTL_TCSETAW; extern unsigned IOCTL_TCSETS; extern unsigned IOCTL_TCSETSF; extern unsigned IOCTL_TCSETSW; extern unsigned IOCTL_TCXONC; extern unsigned IOCTL_TIOCGLCKTRMIOS; extern unsigned IOCTL_TIOCGSOFTCAR; extern unsigned IOCTL_TIOCINQ; extern unsigned IOCTL_TIOCLINUX; extern unsigned IOCTL_TIOCSERCONFIG; extern unsigned IOCTL_TIOCSERGETLSR; extern unsigned IOCTL_TIOCSERGWILD; extern unsigned IOCTL_TIOCSERSWILD; extern unsigned IOCTL_TIOCSLCKTRMIOS; extern unsigned IOCTL_TIOCSSOFTCAR; extern unsigned IOCTL_VT_DISALLOCATE; extern unsigned IOCTL_VT_GETSTATE; extern unsigned IOCTL_VT_RESIZE; extern unsigned IOCTL_VT_RESIZEX; extern unsigned IOCTL_VT_SENDSIG; #endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD extern unsigned IOCTL_MTIOCGET; extern unsigned IOCTL_MTIOCTOP; extern unsigned IOCTL_SIOCADDRT; extern unsigned IOCTL_SIOCDELRT; extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE; extern unsigned IOCTL_SNDCTL_DSP_GETFMTS; extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK; extern unsigned IOCTL_SNDCTL_DSP_POST; extern unsigned IOCTL_SNDCTL_DSP_RESET; extern unsigned IOCTL_SNDCTL_DSP_SETFMT; extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT; extern unsigned IOCTL_SNDCTL_DSP_SPEED; extern unsigned IOCTL_SNDCTL_DSP_STEREO; extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE; extern unsigned IOCTL_SNDCTL_DSP_SYNC; extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE; extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR; extern unsigned IOCTL_SNDCTL_MIDI_INFO; extern unsigned IOCTL_SNDCTL_MIDI_PRETIME; extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE; extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT; extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT; extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS; extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS; extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND; extern unsigned IOCTL_SNDCTL_SEQ_PANIC; extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE; extern unsigned IOCTL_SNDCTL_SEQ_RESET; extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES; extern unsigned IOCTL_SNDCTL_SEQ_SYNC; extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI; extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD; extern unsigned IOCTL_SNDCTL_SYNTH_INFO; extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL; extern unsigned IOCTL_SNDCTL_TMR_CONTINUE; extern unsigned IOCTL_SNDCTL_TMR_METRONOME; extern unsigned IOCTL_SNDCTL_TMR_SELECT; extern unsigned IOCTL_SNDCTL_TMR_SOURCE; extern unsigned IOCTL_SNDCTL_TMR_START; extern unsigned IOCTL_SNDCTL_TMR_STOP; extern unsigned IOCTL_SNDCTL_TMR_TEMPO; extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE; extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM; extern unsigned IOCTL_SOUND_MIXER_READ_BASS; extern unsigned IOCTL_SOUND_MIXER_READ_CAPS; extern unsigned IOCTL_SOUND_MIXER_READ_CD; extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK; extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE; extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN; extern unsigned IOCTL_SOUND_MIXER_READ_IMIX; extern unsigned IOCTL_SOUND_MIXER_READ_LINE1; extern unsigned IOCTL_SOUND_MIXER_READ_LINE2; extern unsigned IOCTL_SOUND_MIXER_READ_LINE3; extern unsigned IOCTL_SOUND_MIXER_READ_LINE; extern unsigned IOCTL_SOUND_MIXER_READ_LOUD; extern unsigned IOCTL_SOUND_MIXER_READ_MIC; extern unsigned IOCTL_SOUND_MIXER_READ_MUTE; extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN; extern unsigned IOCTL_SOUND_MIXER_READ_PCM; extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV; extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK; extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC; extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER; extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS; extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH; extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE; extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME; extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM; extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS; extern unsigned IOCTL_SOUND_MIXER_WRITE_CD; extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE; extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN; extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX; extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1; extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2; extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3; extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE; extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD; extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC; extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE; extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN; extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM; extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV; extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC; extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER; extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH; extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE; extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME; extern unsigned IOCTL_SOUND_PCM_READ_BITS; extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS; extern unsigned IOCTL_SOUND_PCM_READ_FILTER; extern unsigned IOCTL_SOUND_PCM_READ_RATE; extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS; extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER; extern unsigned IOCTL_VT_ACTIVATE; extern unsigned IOCTL_VT_GETMODE; extern unsigned IOCTL_VT_OPENQRY; extern unsigned IOCTL_VT_RELDISP; extern unsigned IOCTL_VT_SETMODE; extern unsigned IOCTL_VT_WAITACTIVE; #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned IOCTL_CYGETDEFTHRESH; extern unsigned IOCTL_CYGETDEFTIMEOUT; extern unsigned IOCTL_CYGETMON; extern unsigned IOCTL_CYGETTHRESH; extern unsigned IOCTL_CYGETTIMEOUT; extern unsigned IOCTL_CYSETDEFTHRESH; extern unsigned IOCTL_CYSETDEFTIMEOUT; extern unsigned IOCTL_CYSETTHRESH; extern unsigned IOCTL_CYSETTIMEOUT; extern unsigned IOCTL_EQL_EMANCIPATE; extern unsigned IOCTL_EQL_ENSLAVE; extern unsigned IOCTL_EQL_GETMASTRCFG; extern unsigned IOCTL_EQL_GETSLAVECFG; extern unsigned IOCTL_EQL_SETMASTRCFG; extern unsigned IOCTL_EQL_SETSLAVECFG; extern unsigned IOCTL_EVIOCGKEYCODE_V2; extern unsigned IOCTL_EVIOCGPROP; extern unsigned IOCTL_EVIOCSKEYCODE_V2; extern unsigned IOCTL_FS_IOC_GETFLAGS; extern unsigned IOCTL_FS_IOC_GETVERSION; extern unsigned IOCTL_FS_IOC_SETFLAGS; extern unsigned IOCTL_FS_IOC_SETVERSION; extern unsigned IOCTL_GIO_CMAP; extern unsigned IOCTL_GIO_FONT; extern unsigned IOCTL_GIO_UNIMAP; extern unsigned IOCTL_GIO_UNISCRNMAP; extern unsigned IOCTL_KDADDIO; extern unsigned IOCTL_KDDELIO; extern unsigned IOCTL_KDGETKEYCODE; extern unsigned IOCTL_KDGKBDIACR; extern unsigned IOCTL_KDGKBENT; extern unsigned IOCTL_KDGKBLED; extern unsigned IOCTL_KDGKBMETA; extern unsigned IOCTL_KDGKBSENT; extern unsigned IOCTL_KDMAPDISP; extern unsigned IOCTL_KDSETKEYCODE; extern unsigned IOCTL_KDSIGACCEPT; extern unsigned IOCTL_KDSKBDIACR; extern unsigned IOCTL_KDSKBENT; extern unsigned IOCTL_KDSKBLED; extern unsigned IOCTL_KDSKBMETA; extern unsigned IOCTL_KDSKBSENT; extern unsigned IOCTL_KDUNMAPDISP; extern unsigned IOCTL_LPABORT; extern unsigned IOCTL_LPABORTOPEN; extern unsigned IOCTL_LPCAREFUL; extern unsigned IOCTL_LPCHAR; extern unsigned IOCTL_LPGETIRQ; extern unsigned IOCTL_LPGETSTATUS; extern unsigned IOCTL_LPRESET; extern unsigned IOCTL_LPSETIRQ; extern unsigned IOCTL_LPTIME; extern unsigned IOCTL_LPWAIT; extern unsigned IOCTL_MTIOCGETCONFIG; extern unsigned IOCTL_MTIOCSETCONFIG; extern unsigned IOCTL_PIO_CMAP; extern unsigned IOCTL_PIO_FONT; extern unsigned IOCTL_PIO_UNIMAP; extern unsigned IOCTL_PIO_UNIMAPCLR; extern unsigned IOCTL_PIO_UNISCRNMAP; extern unsigned IOCTL_SCSI_IOCTL_GET_IDLUN; extern unsigned IOCTL_SCSI_IOCTL_PROBE_HOST; extern unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE; extern unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE; extern unsigned IOCTL_SIOCAIPXITFCRT; extern unsigned IOCTL_SIOCAIPXPRISLT; extern unsigned IOCTL_SIOCAX25ADDUID; extern unsigned IOCTL_SIOCAX25DELUID; extern unsigned IOCTL_SIOCAX25GETPARMS; extern unsigned IOCTL_SIOCAX25GETUID; extern unsigned IOCTL_SIOCAX25NOUID; extern unsigned IOCTL_SIOCAX25SETPARMS; extern unsigned IOCTL_SIOCDEVPLIP; extern unsigned IOCTL_SIOCIPXCFGDATA; extern unsigned IOCTL_SIOCNRDECOBS; extern unsigned IOCTL_SIOCNRGETPARMS; extern unsigned IOCTL_SIOCNRRTCTL; extern unsigned IOCTL_SIOCNRSETPARMS; extern unsigned IOCTL_SNDCTL_DSP_GETISPACE; extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE; extern unsigned IOCTL_TIOCGSERIAL; extern unsigned IOCTL_TIOCSERGETMULTI; extern unsigned IOCTL_TIOCSERSETMULTI; extern unsigned IOCTL_TIOCSSERIAL; #endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID extern unsigned IOCTL_GIO_SCRNMAP; extern unsigned IOCTL_KDDISABIO; extern unsigned IOCTL_KDENABIO; extern unsigned IOCTL_KDGETLED; extern unsigned IOCTL_KDGETMODE; extern unsigned IOCTL_KDGKBMODE; extern unsigned IOCTL_KDGKBTYPE; extern unsigned IOCTL_KDMKTONE; extern unsigned IOCTL_KDSETLED; extern unsigned IOCTL_KDSETMODE; extern unsigned IOCTL_KDSKBMODE; extern unsigned IOCTL_KIOCSOUND; extern unsigned IOCTL_PIO_SCRNMAP; #endif - extern const int errno_EINVAL; - extern const int errno_EOWNERDEAD; - extern const int si_SEGV_MAPERR; extern const int si_SEGV_ACCERR; } // namespace __sanitizer #define CHECK_TYPE_SIZE(TYPE) \ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) #define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \ sizeof(((CLASS *) NULL)->MEMBER)); \ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ offsetof(CLASS, MEMBER)) // For sigaction, which is a function and struct at the same time, // and thus requires explicit "struct" in sizeof() expression. #define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \ sizeof(((struct CLASS *) NULL)->MEMBER)); \ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ offsetof(struct CLASS, MEMBER)) #endif Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_posix.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_posix.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_posix.cc (revision 320961) @@ -1,408 +1,325 @@ //===-- sanitizer_posix.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements POSIX-specific functions from // sanitizer_posix.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_POSIX #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_posix.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include #include #include #include -#if SANITIZER_LINUX -#include -#endif - -#if SANITIZER_LINUX && !SANITIZER_ANDROID -#include -#endif - #if SANITIZER_FREEBSD // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before // that, it was never implemented. So just define it to zero. #undef MAP_NORESERVE #define MAP_NORESERVE 0 #endif namespace __sanitizer { // ------------- sanitizer_common.h uptr GetMmapGranularity() { return GetPageSize(); } -#if SANITIZER_WORDSIZE == 32 -// Take care of unusable kernel area in top gigabyte. -static uptr GetKernelAreaSize() { -#if SANITIZER_LINUX && !SANITIZER_X32 - const uptr gbyte = 1UL << 30; - - // Firstly check if there are writable segments - // mapped to top gigabyte (e.g. stack). - MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr end, prot; - while (proc_maps.Next(/*start*/nullptr, &end, - /*offset*/nullptr, /*filename*/nullptr, - /*filename_size*/0, &prot)) { - if ((end >= 3 * gbyte) - && (prot & MemoryMappingLayout::kProtectionWrite) != 0) - return 0; - } - -#if !SANITIZER_ANDROID - // Even if nothing is mapped, top Gb may still be accessible - // if we are running on 64-bit kernel. - // Uname may report misleading results if personality type - // is modified (e.g. under schroot) so check this as well. - struct utsname uname_info; - int pers = personality(0xffffffffUL); - if (!(pers & PER_MASK) - && uname(&uname_info) == 0 - && internal_strstr(uname_info.machine, "64")) - return 0; -#endif // SANITIZER_ANDROID - - // Top gigabyte is reserved for kernel. - return gbyte; -#else - return 0; -#endif // SANITIZER_LINUX && !SANITIZER_X32 -} -#endif // SANITIZER_WORDSIZE == 32 - -uptr GetMaxVirtualAddress() { -#if SANITIZER_WORDSIZE == 64 -# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM - // Ideally, we would derive the upper bound from MACH_VM_MAX_ADDRESS. The - // upper bound can change depending on the device. - return 0x200000000 - 1; -# elif defined(__powerpc64__) || defined(__aarch64__) - // On PowerPC64 we have two different address space layouts: 44- and 46-bit. - // We somehow need to figure out which one we are using now and choose - // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. - // Note that with 'ulimit -s unlimited' the stack is moved away from the top - // of the address space, so simply checking the stack address is not enough. - // This should (does) work for both PowerPC64 Endian modes. - // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. - return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; -# elif defined(__mips64) - return (1ULL << 40) - 1; // 0x000000ffffffffffUL; -# elif defined(__s390x__) - return (1ULL << 53) - 1; // 0x001fffffffffffffUL; -# else - return (1ULL << 47) - 1; // 0x00007fffffffffffUL; -# endif -#else // SANITIZER_WORDSIZE == 32 -# if defined(__s390__) - return (1ULL << 31) - 1; // 0x7fffffff; -# else - uptr res = (1ULL << 32) - 1; // 0xffffffff; - if (!common_flags()->full_address_space) - res -= GetKernelAreaSize(); - CHECK_LT(reinterpret_cast(&res), res); - return res; -# endif -#endif // SANITIZER_WORDSIZE -} - void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { size = RoundUpTo(size, GetPageSizeCached()); uptr res = internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; if (UNLIKELY(internal_iserror(res, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report); IncreaseTotalMmap(size); return (void *)res; } void UnmapOrDie(void *addr, uptr size) { if (!addr || !size) return; uptr res = internal_munmap(addr, size); if (UNLIKELY(internal_iserror(res))) { Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", SanitizerToolName, size, size, addr); CHECK("unable to unmap" && 0); } DecreaseTotalMmap(size); } void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { size = RoundUpTo(size, GetPageSizeCached()); uptr res = internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; if (UNLIKELY(internal_iserror(res, &reserrno))) { if (reserrno == ENOMEM) return nullptr; ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); } IncreaseTotalMmap(size); return (void *)res; } // We want to map a chunk of address space aligned to 'alignment'. -// We do it by maping a bit more and then unmaping redundant pieces. +// We do it by mapping a bit more and then unmapping redundant pieces. // We probably can do it with fewer syscalls in some OS-dependent way. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type); if (UNLIKELY(!map_res)) return nullptr; uptr map_end = map_res + map_size; uptr res = map_res; if (!IsAligned(res, alignment)) { res = (map_res + alignment - 1) & ~(alignment - 1); UnmapOrDie((void*)map_res, res - map_res); } uptr end = res + size; if (end != map_end) UnmapOrDie((void*)end, map_end - end); return (void*)res; } void *MmapNoReserveOrDie(uptr size, const char *mem_type) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap(nullptr, RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, -1, 0); int reserrno; if (UNLIKELY(internal_iserror(p, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno); IncreaseTotalMmap(size); return (void *)p; } void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0); int reserrno; if (UNLIKELY(internal_iserror(p, &reserrno))) { if (tolerate_enomem && reserrno == ENOMEM) return nullptr; char mem_type[40]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); } IncreaseTotalMmap(size); return (void *)p; } void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/); } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/); } bool MprotectNoAccess(uptr addr, uptr size) { return 0 == internal_mprotect((void*)addr, size, PROT_NONE); } bool MprotectReadOnly(uptr addr, uptr size) { return 0 == internal_mprotect((void *)addr, size, PROT_READ); } fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) { int flags; switch (mode) { case RdOnly: flags = O_RDONLY; break; case WrOnly: flags = O_WRONLY | O_CREAT; break; case RdWr: flags = O_RDWR | O_CREAT; break; } fd_t res = internal_open(filename, flags, 0660); if (internal_iserror(res, errno_p)) return kInvalidFd; return res; } void CloseFile(fd_t fd) { internal_close(fd); } bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, error_t *error_p) { uptr res = internal_read(fd, buff, buff_size); if (internal_iserror(res, error_p)) return false; if (bytes_read) *bytes_read = res; return true; } bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, error_t *error_p) { uptr res = internal_write(fd, buff, buff_size); if (internal_iserror(res, error_p)) return false; if (bytes_written) *bytes_written = res; return true; } bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { uptr res = internal_rename(oldpath, newpath); return !internal_iserror(res, error_p); } void *MapFileToMemory(const char *file_name, uptr *buff_size) { fd_t fd = OpenFile(file_name, RdOnly); CHECK(fd != kInvalidFd); uptr fsize = internal_filesize(fd); CHECK_NE(fsize, (uptr)-1); CHECK_GT(fsize, 0); *buff_size = RoundUpTo(fsize, GetPageSizeCached()); uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); return internal_iserror(map) ? nullptr : (void *)map; } void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { uptr flags = MAP_SHARED; if (addr) flags |= MAP_FIXED; uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset); int mmap_errno = 0; if (internal_iserror(p, &mmap_errno)) { Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n", fd, (long long)offset, size, p, mmap_errno); return nullptr; } return (void *)p; } static inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2, uptr end2) { CHECK(start1 <= end1); CHECK(start2 <= end2); return (end1 < start2) || (end2 < start1); } // FIXME: this is thread-unsafe, but should not cause problems most of the time. // When the shadow is mapped only a single thread usually exists (plus maybe // several worker threads on Mac, which aren't expected to map big chunks of // memory). bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr start, end; - while (proc_maps.Next(&start, &end, - /*offset*/nullptr, /*filename*/nullptr, - /*filename_size*/0, /*protection*/nullptr)) { - if (start == end) continue; // Empty range. - CHECK_NE(0, end); - if (!IntervalsAreSeparate(start, end - 1, range_start, range_end)) + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + if (segment.start == segment.end) continue; // Empty range. + CHECK_NE(0, segment.end); + if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start, + range_end)) return false; } return true; } void DumpProcessMap() { MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr start, end; const sptr kBufSize = 4095; char *filename = (char*)MmapOrDie(kBufSize, __func__); + MemoryMappedSegment segment(filename, kBufSize); Report("Process memory map follows:\n"); - while (proc_maps.Next(&start, &end, /* file_offset */nullptr, - filename, kBufSize, /* protection */nullptr)) { - Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename); + while (proc_maps.Next(&segment)) { + Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end, + segment.filename); } Report("End of process memory map.\n"); UnmapOrDie(filename, kBufSize); } const char *GetPwd() { return GetEnv("PWD"); } bool IsPathSeparator(const char c) { return c == '/'; } bool IsAbsolutePath(const char *path) { return path != nullptr && IsPathSeparator(path[0]); } void ReportFile::Write(const char *buffer, uptr length) { SpinMutexLock l(mu); static const char *kWriteError = "ReportFile::Write() can't output requested buffer!\n"; ReopenIfNecessary(); if (length != internal_write(fd, buffer, length)) { internal_write(fd, kWriteError, internal_strlen(kWriteError)); Die(); } } bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) { - uptr s, e, off, prot; - InternalScopedString buff(kMaxPathLength); MemoryMappingLayout proc_maps(/*cache_enabled*/false); - while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) { - if ((prot & MemoryMappingLayout::kProtectionExecute) != 0 - && internal_strcmp(module, buff.data()) == 0) { - *start = s; - *end = e; + InternalScopedString buff(kMaxPathLength); + MemoryMappedSegment segment(buff.data(), kMaxPathLength); + while (proc_maps.Next(&segment)) { + if (segment.IsExecutable() && + internal_strcmp(module, segment.filename) == 0) { + *start = segment.start; + *end = segment.end; return true; } } return false; } SignalContext SignalContext::Create(void *siginfo, void *context) { auto si = (siginfo_t *)siginfo; uptr addr = (uptr)si->si_addr; uptr pc, sp, bp; GetPcSpBp(context, &pc, &sp, &bp); WriteFlag write_flag = GetWriteFlag(context); bool is_memory_access = si->si_signo == SIGSEGV; return SignalContext(context, addr, pc, sp, bp, is_memory_access, write_flag); } const char *DescribeSignalOrException(int signo) { switch (signo) { case SIGFPE: return "FPE"; case SIGILL: return "ILL"; case SIGABRT: return "ABRT"; case SIGSEGV: return "SEGV"; case SIGBUS: return "BUS"; } return "UNKNOWN SIGNAL"; } } // namespace __sanitizer #endif // SANITIZER_POSIX Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps.h (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps.h (revision 320961) @@ -1,103 +1,118 @@ //===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer. // // Information about the process mappings. //===----------------------------------------------------------------------===// #ifndef SANITIZER_PROCMAPS_H #define SANITIZER_PROCMAPS_H #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" namespace __sanitizer { #if SANITIZER_FREEBSD || SANITIZER_LINUX struct ProcSelfMapsBuff { char *data; uptr mmaped_size; uptr len; }; // Reads process memory map in an OS-specific way. void ReadProcMaps(ProcSelfMapsBuff *proc_maps); #endif // SANITIZER_FREEBSD || SANITIZER_LINUX +// Memory protection masks. +static const uptr kProtectionRead = 1; +static const uptr kProtectionWrite = 2; +static const uptr kProtectionExecute = 4; +static const uptr kProtectionShared = 8; + +struct MemoryMappedSegment { + MemoryMappedSegment(char *buff = nullptr, uptr size = 0) + : filename(buff), filename_size(size) {} + ~MemoryMappedSegment() {} + + bool IsReadable() { return protection & kProtectionRead; } + bool IsWritable() { return protection & kProtectionWrite; } + bool IsExecutable() { return protection & kProtectionExecute; } + bool IsShared() { return protection & kProtectionShared; } + + uptr start; + uptr end; + uptr offset; + char *filename; // owned by caller + uptr filename_size; + uptr protection; + ModuleArch arch; + u8 uuid[kModuleUUIDSize]; +}; + class MemoryMappingLayout { public: explicit MemoryMappingLayout(bool cache_enabled); ~MemoryMappingLayout(); - bool Next(uptr *start, uptr *end, uptr *offset, char filename[], - uptr filename_size, uptr *protection, ModuleArch *arch = nullptr, - u8 *uuid = nullptr); + bool Next(MemoryMappedSegment *segment); void Reset(); // In some cases, e.g. when running under a sandbox on Linux, ASan is unable // to obtain the memory mappings. It should fall back to pre-cached data // instead of aborting. static void CacheMemoryMappings(); // Adds all mapped objects into a vector. void DumpListOfModules(InternalMmapVector *modules); - // Memory protection masks. - static const uptr kProtectionRead = 1; - static const uptr kProtectionWrite = 2; - static const uptr kProtectionExecute = 4; - static const uptr kProtectionShared = 8; - private: void LoadFromCache(); // FIXME: Hide implementation details for different platforms in // platform-specific files. # if SANITIZER_FREEBSD || SANITIZER_LINUX ProcSelfMapsBuff proc_self_maps_; const char *current_; // Static mappings cache. static ProcSelfMapsBuff cached_proc_self_maps_; static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_. # elif SANITIZER_MAC template - bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, char filename[], - uptr filename_size, ModuleArch *arch, u8 *uuid, - uptr *protection); - void GetSegmentAddrRange(uptr *start, uptr *end, uptr vmaddr, uptr vmsize); + bool NextSegmentLoad(MemoryMappedSegment *segment); int current_image_; u32 current_magic_; u32 current_filetype_; ModuleArch current_arch_; u8 current_uuid_[kModuleUUIDSize]; int current_load_cmd_count_; char *current_load_cmd_addr_; bool current_instrumented_; # endif }; typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, /*out*/uptr *stats, uptr stats_size); // Parse the contents of /proc/self/smaps and generate a memory profile. // |cb| is a tool-specific callback that fills the |stats| array containing // |stats_size| elements. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); // Returns code range for the specified module. bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end); bool IsDecimal(char c); uptr ParseDecimal(const char **p); bool IsHex(char c); uptr ParseHex(const char **p); } // namespace __sanitizer #endif // SANITIZER_PROCMAPS_H Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_common.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_common.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_common.cc (revision 320961) @@ -1,176 +1,174 @@ //===-- sanitizer_procmaps_common.cc --------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Information about the process mappings (common parts). //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" namespace __sanitizer { // Linker initialized. ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_; StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized. static int TranslateDigit(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } // Parse a number and promote 'p' up to the first non-digit character. static uptr ParseNumber(const char **p, int base) { uptr n = 0; int d; CHECK(base >= 2 && base <= 16); while ((d = TranslateDigit(**p)) >= 0 && d < base) { n = n * base + d; (*p)++; } return n; } bool IsDecimal(char c) { int d = TranslateDigit(c); return d >= 0 && d < 10; } uptr ParseDecimal(const char **p) { return ParseNumber(p, 10); } bool IsHex(char c) { int d = TranslateDigit(c); return d >= 0 && d < 16; } uptr ParseHex(const char **p) { return ParseNumber(p, 16); } MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { ReadProcMaps(&proc_self_maps_); if (cache_enabled) { if (proc_self_maps_.mmaped_size == 0) { LoadFromCache(); CHECK_GT(proc_self_maps_.len, 0); } } else { CHECK_GT(proc_self_maps_.mmaped_size, 0); } Reset(); // FIXME: in the future we may want to cache the mappings on demand only. if (cache_enabled) CacheMemoryMappings(); } MemoryMappingLayout::~MemoryMappingLayout() { // Only unmap the buffer if it is different from the cached one. Otherwise // it will be unmapped when the cache is refreshed. if (proc_self_maps_.data != cached_proc_self_maps_.data) { UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size); } } void MemoryMappingLayout::Reset() { current_ = proc_self_maps_.data; } // static void MemoryMappingLayout::CacheMemoryMappings() { SpinMutexLock l(&cache_lock_); // Don't invalidate the cache if the mappings are unavailable. ProcSelfMapsBuff old_proc_self_maps; old_proc_self_maps = cached_proc_self_maps_; ReadProcMaps(&cached_proc_self_maps_); if (cached_proc_self_maps_.mmaped_size == 0) { cached_proc_self_maps_ = old_proc_self_maps; } else { if (old_proc_self_maps.mmaped_size) { UnmapOrDie(old_proc_self_maps.data, old_proc_self_maps.mmaped_size); } } } void MemoryMappingLayout::LoadFromCache() { SpinMutexLock l(&cache_lock_); if (cached_proc_self_maps_.data) { proc_self_maps_ = cached_proc_self_maps_; } } void MemoryMappingLayout::DumpListOfModules( InternalMmapVector *modules) { Reset(); - uptr cur_beg, cur_end, cur_offset, prot; InternalScopedString module_name(kMaxPathLength); - for (uptr i = 0; Next(&cur_beg, &cur_end, &cur_offset, module_name.data(), - module_name.size(), &prot); - i++) { - const char *cur_name = module_name.data(); + MemoryMappedSegment segment(module_name.data(), module_name.size()); + for (uptr i = 0; Next(&segment); i++) { + const char *cur_name = segment.filename; if (cur_name[0] == '\0') continue; // Don't subtract 'cur_beg' from the first entry: // * If a binary is compiled w/o -pie, then the first entry in // process maps is likely the binary itself (all dynamic libs // are mapped higher in address space). For such a binary, // instruction offset in binary coincides with the actual // instruction address in virtual memory (as code section // is mapped to a fixed memory range). // * If a binary is compiled with -pie, all the modules are // mapped high at address space (in particular, higher than // shadow memory of the tool), so the module can't be the // first entry. - uptr base_address = (i ? cur_beg : 0) - cur_offset; + uptr base_address = (i ? segment.start : 0) - segment.offset; LoadedModule cur_module; cur_module.set(cur_name, base_address); - cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute, - prot & kProtectionWrite); + cur_module.addAddressRange(segment.start, segment.end, + segment.IsExecutable(), segment.IsWritable()); modules->push_back(cur_module); } } void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { char *smaps = nullptr; uptr smaps_cap = 0; uptr smaps_len = 0; if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len)) return; uptr start = 0; bool file = false; const char *pos = smaps; while (pos < smaps + smaps_len) { if (IsHex(pos[0])) { start = ParseHex(&pos); for (; *pos != '/' && *pos > '\n'; pos++) {} file = *pos == '/'; } else if (internal_strncmp(pos, "Rss:", 4) == 0) { while (!IsDecimal(*pos)) pos++; uptr rss = ParseDecimal(&pos) * 1024; cb(start, rss, file, stats, stats_size); } while (*pos++ != '\n') {} } UnmapOrDie(smaps, smaps_cap); } } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc (revision 320961) @@ -1,90 +1,81 @@ //===-- sanitizer_procmaps_freebsd.cc -------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Information about the process mappings (FreeBSD-specific parts). //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_FREEBSD #include "sanitizer_common.h" #include "sanitizer_freebsd.h" #include "sanitizer_procmaps.h" #include #include #include // Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode. #if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) # include # if __FreeBSD_version <= 902001 // v9.2 # define kinfo_vmentry xkinfo_vmentry # endif #endif namespace __sanitizer { void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() }; size_t Size = 0; int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0); CHECK_EQ(Err, 0); CHECK_GT(Size, 0); size_t MmapedSize = Size * 4 / 3; void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()"); Size = MmapedSize; Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0); CHECK_EQ(Err, 0); proc_maps->data = (char*)VmMap; proc_maps->mmaped_size = MmapedSize; proc_maps->len = Size; } -bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size, - uptr *protection, ModuleArch *arch, u8 *uuid) { - CHECK(!arch && "not implemented"); - CHECK(!uuid && "not implemented"); +bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { char *last = proc_self_maps_.data + proc_self_maps_.len; if (current_ >= last) return false; - uptr dummy; - if (!start) start = &dummy; - if (!end) end = &dummy; - if (!offset) offset = &dummy; - if (!protection) protection = &dummy; struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_; - *start = (uptr)VmEntry->kve_start; - *end = (uptr)VmEntry->kve_end; - *offset = (uptr)VmEntry->kve_offset; + segment->start = (uptr)VmEntry->kve_start; + segment->end = (uptr)VmEntry->kve_end; + segment->offset = (uptr)VmEntry->kve_offset; - *protection = 0; + segment->protection = 0; if ((VmEntry->kve_protection & KVME_PROT_READ) != 0) - *protection |= kProtectionRead; + segment->protection |= kProtectionRead; if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0) - *protection |= kProtectionWrite; + segment->protection |= kProtectionWrite; if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0) - *protection |= kProtectionExecute; + segment->protection |= kProtectionExecute; - if (filename != NULL && filename_size > 0) { - internal_snprintf(filename, - Min(filename_size, (uptr)PATH_MAX), - "%s", VmEntry->kve_path); + if (segment->filename != NULL && segment->filename_size > 0) { + internal_snprintf(segment->filename, + Min(segment->filename_size, (uptr)PATH_MAX), "%s", + VmEntry->kve_path); } current_ += VmEntry->kve_structsize; return true; } } // namespace __sanitizer #endif // SANITIZER_FREEBSD Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_linux.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_linux.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_linux.cc (revision 320961) @@ -1,92 +1,77 @@ //===-- sanitizer_procmaps_linux.cc ---------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Information about the process mappings (Linux-specific parts). //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_procmaps.h" namespace __sanitizer { void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { ReadFileToBuffer("/proc/self/maps", &proc_maps->data, &proc_maps->mmaped_size, &proc_maps->len); } static bool IsOneOf(char c, char c1, char c2) { return c == c1 || c == c2; } -bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size, - uptr *protection, ModuleArch *arch, u8 *uuid) { - CHECK(!arch && "not implemented"); - CHECK(!uuid && "not implemented"); +bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { char *last = proc_self_maps_.data + proc_self_maps_.len; if (current_ >= last) return false; - uptr dummy; - if (!start) start = &dummy; - if (!end) end = &dummy; - if (!offset) offset = &dummy; - if (!protection) protection = &dummy; char *next_line = (char*)internal_memchr(current_, '\n', last - current_); if (next_line == 0) next_line = last; // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar - *start = ParseHex(¤t_); + segment->start = ParseHex(¤t_); CHECK_EQ(*current_++, '-'); - *end = ParseHex(¤t_); + segment->end = ParseHex(¤t_); CHECK_EQ(*current_++, ' '); CHECK(IsOneOf(*current_, '-', 'r')); - *protection = 0; - if (*current_++ == 'r') - *protection |= kProtectionRead; + segment->protection = 0; + if (*current_++ == 'r') segment->protection |= kProtectionRead; CHECK(IsOneOf(*current_, '-', 'w')); - if (*current_++ == 'w') - *protection |= kProtectionWrite; + if (*current_++ == 'w') segment->protection |= kProtectionWrite; CHECK(IsOneOf(*current_, '-', 'x')); - if (*current_++ == 'x') - *protection |= kProtectionExecute; + if (*current_++ == 'x') segment->protection |= kProtectionExecute; CHECK(IsOneOf(*current_, 's', 'p')); - if (*current_++ == 's') - *protection |= kProtectionShared; + if (*current_++ == 's') segment->protection |= kProtectionShared; CHECK_EQ(*current_++, ' '); - *offset = ParseHex(¤t_); + segment->offset = ParseHex(¤t_); CHECK_EQ(*current_++, ' '); ParseHex(¤t_); CHECK_EQ(*current_++, ':'); ParseHex(¤t_); CHECK_EQ(*current_++, ' '); while (IsDecimal(*current_)) current_++; // Qemu may lack the trailing space. // https://github.com/google/sanitizers/issues/160 // CHECK_EQ(*current_++, ' '); // Skip spaces. while (current_ < next_line && *current_ == ' ') current_++; // Fill in the filename. - uptr i = 0; - while (current_ < next_line) { - if (filename && i < filename_size - 1) - filename[i++] = *current_; - current_++; + if (segment->filename) { + uptr len = Min((uptr)(next_line - current_), segment->filename_size - 1); + internal_strncpy(segment->filename, current_, len); + segment->filename[len] = 0; } - if (filename && i < filename_size) - filename[i] = 0; + current_ = next_line + 1; return true; } } // namespace __sanitizer #endif // SANITIZER_LINUX Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_mac.cc (revision 320961) @@ -1,345 +1,314 @@ //===-- sanitizer_procmaps_mac.cc -----------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Information about the process mappings (Mac-specific parts). //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_MAC #include "sanitizer_common.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include #include #include // These are not available in older macOS SDKs. #ifndef CPU_SUBTYPE_X86_64_H #define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */ #endif #ifndef CPU_SUBTYPE_ARM_V7S #define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */ #endif #ifndef CPU_SUBTYPE_ARM_V7K #define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12) #endif #ifndef CPU_TYPE_ARM64 #define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) #endif namespace __sanitizer { MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { Reset(); } MemoryMappingLayout::~MemoryMappingLayout() { } // More information about Mach-O headers can be found in mach-o/loader.h // Each Mach-O image has a header (mach_header or mach_header_64) starting with // a magic number, and a list of linker load commands directly following the // header. // A load command is at least two 32-bit words: the command type and the // command size in bytes. We're interested only in segment load commands // (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped // into the task's address space. // The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or // segment_command_64 correspond to the memory address, memory size and the // file offset of the current memory segment. // Because these fields are taken from the images as is, one needs to add // _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime. void MemoryMappingLayout::Reset() { // Count down from the top. // TODO(glider): as per man 3 dyld, iterating over the headers with // _dyld_image_count is thread-unsafe. We need to register callbacks for // adding and removing images which will invalidate the MemoryMappingLayout // state. current_image_ = _dyld_image_count(); current_load_cmd_count_ = -1; current_load_cmd_addr_ = 0; current_magic_ = 0; current_filetype_ = 0; current_arch_ = kModuleArchUnknown; internal_memset(current_uuid_, 0, kModuleUUIDSize); } // The dyld load address should be unchanged throughout process execution, // and it is expensive to compute once many libraries have been loaded, // so cache it here and do not reset. static mach_header *dyld_hdr = 0; static const char kDyldPath[] = "/usr/lib/dyld"; static const int kDyldImageIdx = -1; // static void MemoryMappingLayout::CacheMemoryMappings() { // No-op on Mac for now. } void MemoryMappingLayout::LoadFromCache() { // No-op on Mac for now. } +// _dyld_get_image_header() and related APIs don't report dyld itself. +// We work around this by manually recursing through the memory map +// until we hit a Mach header matching dyld instead. These recurse +// calls are expensive, but the first memory map generation occurs +// early in the process, when dyld is one of the only images loaded, +// so it will be hit after only a few iterations. +static mach_header *get_dyld_image_header() { + mach_port_name_t port; + if (task_for_pid(mach_task_self(), internal_getpid(), &port) != + KERN_SUCCESS) { + return nullptr; + } + + unsigned depth = 1; + vm_size_t size = 0; + vm_address_t address = 0; + kern_return_t err = KERN_SUCCESS; + mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; + + while (true) { + struct vm_region_submap_info_64 info; + err = vm_region_recurse_64(port, &address, &size, &depth, + (vm_region_info_t)&info, &count); + if (err != KERN_SUCCESS) return nullptr; + + if (size >= sizeof(mach_header) && info.protection & kProtectionRead) { + mach_header *hdr = (mach_header *)address; + if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) && + hdr->filetype == MH_DYLINKER) { + return hdr; + } + } + address += size; + } +} + +const mach_header *get_dyld_hdr() { + if (!dyld_hdr) dyld_hdr = get_dyld_image_header(); + + return dyld_hdr; +} + // Next and NextSegmentLoad were inspired by base/sysinfo.cc in // Google Perftools, https://github.com/gperftools/gperftools. // NextSegmentLoad scans the current image for the next segment load command // and returns the start and end addresses and file offset of the corresponding // segment. // Note that the segment addresses are not necessarily sorted. template -bool MemoryMappingLayout::NextSegmentLoad(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size, - ModuleArch *arch, u8 *uuid, - uptr *protection) { +bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) { const char *lc = current_load_cmd_addr_; current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize; if (((const load_command *)lc)->cmd == kLCSegment) { const SegmentCommand* sc = (const SegmentCommand *)lc; - GetSegmentAddrRange(start, end, sc->vmaddr, sc->vmsize); - if (protection) { - // Return the initial protection. - *protection = sc->initprot; + + if (current_image_ == kDyldImageIdx) { + // vmaddr is masked with 0xfffff because on macOS versions < 10.12, + // it contains an absolute address rather than an offset for dyld. + // To make matters even more complicated, this absolute address + // isn't actually the absolute segment address, but the offset portion + // of the address is accurate when combined with the dyld base address, + // and the mask will give just this offset. + segment->start = (sc->vmaddr & 0xfffff) + (uptr)get_dyld_hdr(); + segment->end = (sc->vmaddr & 0xfffff) + sc->vmsize + (uptr)get_dyld_hdr(); + } else { + const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_); + segment->start = sc->vmaddr + dlloff; + segment->end = sc->vmaddr + sc->vmsize + dlloff; } - if (offset) { - if (current_filetype_ == /*MH_EXECUTE*/ 0x2) { - *offset = sc->vmaddr; - } else { - *offset = sc->fileoff; - } + + // Return the initial protection. + segment->protection = sc->initprot; + segment->offset = + (current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff; + if (segment->filename) { + const char *src = (current_image_ == kDyldImageIdx) + ? kDyldPath + : _dyld_get_image_name(current_image_); + internal_strncpy(segment->filename, src, segment->filename_size); } - if (filename) { - if (current_image_ == kDyldImageIdx) { - internal_strncpy(filename, kDyldPath, filename_size); - } else { - internal_strncpy(filename, _dyld_get_image_name(current_image_), - filename_size); - } - } - if (arch) { - *arch = current_arch_; - } - if (uuid) { - internal_memcpy(uuid, current_uuid_, kModuleUUIDSize); - } + segment->arch = current_arch_; + internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize); return true; } return false; } ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) { cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK; switch (cputype) { case CPU_TYPE_I386: return kModuleArchI386; case CPU_TYPE_X86_64: if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64; if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H; CHECK(0 && "Invalid subtype of x86_64"); return kModuleArchUnknown; case CPU_TYPE_ARM: if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6; if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7; if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S; if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K; CHECK(0 && "Invalid subtype of ARM"); return kModuleArchUnknown; case CPU_TYPE_ARM64: return kModuleArchARM64; default: CHECK(0 && "Invalid CPU type"); return kModuleArchUnknown; } } static const load_command *NextCommand(const load_command *lc) { return (const load_command *)((char *)lc + lc->cmdsize); } static void FindUUID(const load_command *first_lc, u8 *uuid_output) { for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) { if (lc->cmd != LC_UUID) continue; const uuid_command *uuid_lc = (const uuid_command *)lc; const uint8_t *uuid = &uuid_lc->uuid[0]; internal_memcpy(uuid_output, uuid, kModuleUUIDSize); return; } } static bool IsModuleInstrumented(const load_command *first_lc) { for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) { if (lc->cmd != LC_LOAD_DYLIB) continue; const dylib_command *dylib_lc = (const dylib_command *)lc; uint32_t dylib_name_offset = dylib_lc->dylib.name.offset; const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset; dylib_name = StripModuleName(dylib_name); if (dylib_name != 0 && (internal_strstr(dylib_name, "libclang_rt."))) { return true; } } return false; } -// _dyld_get_image_header() and related APIs don't report dyld itself. -// We work around this by manually recursing through the memory map -// until we hit a Mach header matching dyld instead. These recurse -// calls are expensive, but the first memory map generation occurs -// early in the process, when dyld is one of the only images loaded, -// so it will be hit after only a few iterations. -static mach_header *get_dyld_image_header() { - mach_port_name_t port; - if (task_for_pid(mach_task_self(), internal_getpid(), &port) != - KERN_SUCCESS) { - return nullptr; - } - - unsigned depth = 1; - vm_size_t size = 0; - vm_address_t address = 0; - kern_return_t err = KERN_SUCCESS; - mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; - - while (true) { - struct vm_region_submap_info_64 info; - err = vm_region_recurse_64(port, &address, &size, &depth, - (vm_region_info_t)&info, &count); - if (err != KERN_SUCCESS) return nullptr; - - if (size >= sizeof(mach_header) && - info.protection & MemoryMappingLayout::kProtectionRead) { - mach_header *hdr = (mach_header *)address; - if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) && - hdr->filetype == MH_DYLINKER) { - return hdr; - } - } - address += size; - } -} - -const mach_header *get_dyld_hdr() { - if (!dyld_hdr) dyld_hdr = get_dyld_image_header(); - - return dyld_hdr; -} - -void MemoryMappingLayout::GetSegmentAddrRange(uptr *start, uptr *end, - uptr vmaddr, uptr vmsize) { - if (current_image_ == kDyldImageIdx) { - // vmaddr is masked with 0xfffff because on macOS versions < 10.12, - // it contains an absolute address rather than an offset for dyld. - // To make matters even more complicated, this absolute address - // isn't actually the absolute segment address, but the offset portion - // of the address is accurate when combined with the dyld base address, - // and the mask will give just this offset. - if (start) *start = (vmaddr & 0xfffff) + (uptr)get_dyld_hdr(); - if (end) *end = (vmaddr & 0xfffff) + vmsize + (uptr)get_dyld_hdr(); - } else { - const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_); - if (start) *start = vmaddr + dlloff; - if (end) *end = vmaddr + vmsize + dlloff; - } -} - -bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size, - uptr *protection, ModuleArch *arch, u8 *uuid) { +bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { for (; current_image_ >= kDyldImageIdx; current_image_--) { const mach_header *hdr = (current_image_ == kDyldImageIdx) ? get_dyld_hdr() : _dyld_get_image_header(current_image_); if (!hdr) continue; if (current_load_cmd_count_ < 0) { // Set up for this image; current_load_cmd_count_ = hdr->ncmds; current_magic_ = hdr->magic; current_filetype_ = hdr->filetype; current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype); switch (current_magic_) { #ifdef MH_MAGIC_64 case MH_MAGIC_64: { current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64); break; } #endif case MH_MAGIC: { current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header); break; } default: { continue; } } FindUUID((const load_command *)current_load_cmd_addr_, ¤t_uuid_[0]); current_instrumented_ = IsModuleInstrumented((const load_command *)current_load_cmd_addr_); } for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) { switch (current_magic_) { // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64. #ifdef MH_MAGIC_64 case MH_MAGIC_64: { if (NextSegmentLoad( - start, end, offset, filename, filename_size, arch, uuid, - protection)) + segment)) return true; break; } #endif case MH_MAGIC: { - if (NextSegmentLoad( - start, end, offset, filename, filename_size, arch, uuid, - protection)) + if (NextSegmentLoad(segment)) return true; break; } } } // If we get here, no more load_cmd's in this image talk about // segments. Go on to the next image. } return false; } void MemoryMappingLayout::DumpListOfModules( InternalMmapVector *modules) { Reset(); - uptr cur_beg, cur_end, prot; - ModuleArch cur_arch; - u8 cur_uuid[kModuleUUIDSize]; InternalScopedString module_name(kMaxPathLength); - for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(), - module_name.size(), &prot, &cur_arch, &cur_uuid[0]); - i++) { - const char *cur_name = module_name.data(); - if (cur_name[0] == '\0') - continue; + MemoryMappedSegment segment(module_name.data(), kMaxPathLength); + for (uptr i = 0; Next(&segment); i++) { + if (segment.filename[0] == '\0') continue; LoadedModule *cur_module = nullptr; if (!modules->empty() && - 0 == internal_strcmp(cur_name, modules->back().full_name())) { + 0 == internal_strcmp(segment.filename, modules->back().full_name())) { cur_module = &modules->back(); } else { modules->push_back(LoadedModule()); cur_module = &modules->back(); - cur_module->set(cur_name, cur_beg, cur_arch, cur_uuid, - current_instrumented_); + cur_module->set(segment.filename, segment.start, segment.arch, + segment.uuid, current_instrumented_); } - cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute, - prot & kProtectionWrite); + cur_module->addAddressRange(segment.start, segment.end, + segment.IsExecutable(), segment.IsWritable()); } } } // namespace __sanitizer #endif // SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc (revision 320961) @@ -1,142 +1,143 @@ //===-- sanitizer_stacktrace_libcdep.cc -----------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. //===----------------------------------------------------------------------===// #include "sanitizer_common.h" #include "sanitizer_placement_new.h" #include "sanitizer_stacktrace.h" #include "sanitizer_stacktrace_printer.h" #include "sanitizer_symbolizer.h" namespace __sanitizer { void StackTrace::Print() const { if (trace == nullptr || size == 0) { Printf(" \n\n"); return; } InternalScopedString frame_desc(GetPageSizeCached() * 2); InternalScopedString dedup_token(GetPageSizeCached()); int dedup_frames = common_flags()->dedup_token_length; uptr frame_num = 0; for (uptr i = 0; i < size && trace[i]; i++) { // PCs in stack traces are actually the return addresses, that is, // addresses of the next instructions after the call. uptr pc = GetPreviousInstructionPc(trace[i]); SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc); CHECK(frames); for (SymbolizedStack *cur = frames; cur; cur = cur->next) { frame_desc.clear(); RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++, cur->info, common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix); Printf("%s\n", frame_desc.data()); if (dedup_frames-- > 0) { if (dedup_token.length()) dedup_token.append("--"); - dedup_token.append(cur->info.function); + if (cur->info.function != nullptr) + dedup_token.append(cur->info.function); } } frames->ClearAll(); } // Always print a trailing empty line after stack trace. Printf("\n"); if (dedup_token.length()) Printf("DEDUP_TOKEN: %s\n", dedup_token.data()); } void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top, uptr stack_bottom, bool request_fast_unwind) { top_frame_bp = (max_depth > 0) ? bp : 0; // Avoid doing any work for small max_depth. if (max_depth == 0) { size = 0; return; } if (max_depth == 1) { size = 1; trace_buffer[0] = pc; return; } if (!WillUseFastUnwind(request_fast_unwind)) { #if SANITIZER_CAN_SLOW_UNWIND if (context) SlowUnwindStackWithContext(pc, context, max_depth); else SlowUnwindStack(pc, max_depth); #else UNREACHABLE("slow unwind requested but not available"); #endif } else { FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth); } } static int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) { const char *found_module_name = nullptr; bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC( pc, &found_module_name, pc_offset); if (!ok) return false; if (module_name && module_name_len) { internal_strncpy(module_name, found_module_name, module_name_len); module_name[module_name_len - 1] = '\x00'; } return true; } } // namespace __sanitizer using namespace __sanitizer; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf, uptr out_buf_size) { if (!out_buf_size) return; pc = StackTrace::GetPreviousInstructionPc(pc); SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc); if (!frame) { internal_strncpy(out_buf, "", out_buf_size); out_buf[out_buf_size - 1] = 0; return; } InternalScopedString frame_desc(GetPageSizeCached()); RenderFrame(&frame_desc, fmt, 0, frame->info, common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix); internal_strncpy(out_buf, frame_desc.data(), out_buf_size); out_buf[out_buf_size - 1] = 0; } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_symbolize_global(uptr data_addr, const char *fmt, char *out_buf, uptr out_buf_size) { if (!out_buf_size) return; out_buf[0] = 0; DataInfo DI; if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return; InternalScopedString data_desc(GetPageSizeCached()); RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix); internal_strncpy(out_buf, data_desc.data(), out_buf_size); out_buf[out_buf_size - 1] = 0; } SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_module_and_offset_for_pc( // NOLINT uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) { return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len, pc_offset); } } // extern "C" Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc (revision 320961) @@ -1,1029 +1,1030 @@ //===-- sanitizer_win.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements windows-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" #if SANITIZER_WINDOWS #define WIN32_LEAN_AND_MEAN #define NOGDI #include #include #include #include #include "sanitizer_common.h" #include "sanitizer_dbghelp.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" #include "sanitizer_win_defs.h" // A macro to tell the compiler that this part of the code cannot be reached, // if the compiler supports this feature. Since we're using this in // code that is called when terminating the process, the expansion of the // macro should not terminate the process to avoid infinite recursion. #if defined(__clang__) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(__GNUC__) && \ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) # define BUILTIN_UNREACHABLE() __builtin_unreachable() #elif defined(_MSC_VER) # define BUILTIN_UNREACHABLE() __assume(0) #else # define BUILTIN_UNREACHABLE() #endif namespace __sanitizer { #include "sanitizer_syscall_generic.inc" // --------------------- sanitizer_common.h uptr GetPageSize() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; } uptr GetMmapGranularity() { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwAllocationGranularity; } uptr GetMaxVirtualAddress() { SYSTEM_INFO si; GetSystemInfo(&si); return (uptr)si.lpMaximumApplicationAddress; } bool FileExists(const char *filename) { return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; } uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } // In contrast to POSIX, on Windows GetCurrentThreadId() // returns a system-unique identifier. tid_t GetTid() { return GetCurrentThreadId(); } uptr GetThreadSelf() { return GetTid(); } #if !SANITIZER_GO void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); CHECK(stack_bottom); MEMORY_BASIC_INFORMATION mbi; CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); // FIXME: is it possible for the stack to not be a single allocation? // Are these values what ASan expects to get (reserved, not committed; // including stack guard page) ? *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; *stack_bottom = (uptr)mbi.AllocationBase; } #endif // #if !SANITIZER_GO void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError(), raw_report); return rv; } void UnmapOrDie(void *addr, uptr size) { if (!size || !addr) return; MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); // MEM_RELEASE can only be used to unmap whole regions previously mapped with // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that // fails try MEM_DECOMMIT. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { Report("ERROR: %s failed to " "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", SanitizerToolName, size, size, addr, GetLastError()); CHECK("unable to unmap" && 0); } } } static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, const char *mmap_type) { error_t last_error = GetLastError(); if (last_error == ERROR_NOT_ENOUGH_MEMORY) return nullptr; ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); } void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (rv == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); return rv; } // We want to map a chunk of address space aligned to 'alignment'. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); // Windows will align our allocations to at least 64K. alignment = Max(alignment, GetMmapGranularity()); uptr mapped_addr = (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // If we got it right on the first try, return. Otherwise, unmap it and go to // the slow path. if (IsAligned(mapped_addr, alignment)) return (void*)mapped_addr; if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // If we didn't get an aligned address, overallocate, find an aligned address, // unmap, and try to allocate at that aligned address. int retries = 0; const int kMaxRetries = 10; for (; retries < kMaxRetries && (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); retries++) { // Overallocate size + alignment bytes. mapped_addr = (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); if (!mapped_addr) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // Find the aligned address. uptr aligned_addr = RoundUpTo(mapped_addr, alignment); // Free the overallocation. if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); // Attempt to allocate exactly the number of bytes we need at the aligned // address. This may fail for a number of reasons, in which case we continue // the loop. mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } // Fail if we can't make this work quickly. if (retries == kMaxRetries && mapped_addr == 0) return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); return (void *)mapped_addr; } void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. (void)name; // unsupported #if !SANITIZER_GO && SANITIZER_WINDOWS64 // On asan/Windows64, use MEM_COMMIT would result in error // 1455:ERROR_COMMITMENT_LIMIT. // Asan uses exception handler to commit page on demand. void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); #else void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); #endif if (p == 0) Report("ERROR: %s failed to " "allocate %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return p; } // Memory space mapped by 'MmapFixedOrDie' must have been reserved by // 'MmapFixedNoAccess'. void *MmapFixedOrDie(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); } return p; } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_COMMIT, PAGE_READWRITE); if (p == 0) { char mem_type[30]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); } return p; } void *MmapNoReserveOrDie(uptr size, const char *mem_type) { // FIXME: make this really NoReserve? return MmapOrDie(size, mem_type); } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { (void)name; // unsupported void *res = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); return res; } void *MmapNoAccess(uptr size) { void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); if (res == 0) Report("WARNING: %s failed to " "mprotect %p (%zd) bytes (error code: %d)\n", SanitizerToolName, size, size, GetLastError()); return res; } bool MprotectNoAccess(uptr addr, uptr size) { DWORD old_protection; return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); } void ReleaseMemoryPagesToOS(uptr beg, uptr end) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. } void NoHugePagesInRegion(uptr addr, uptr size) { // FIXME: probably similar to ReleaseMemoryToOS. } void DontDumpShadowMemory(uptr addr, uptr length) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. } -uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) { +uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, + uptr *largest_gap_found) { uptr address = 0; while (true) { MEMORY_BASIC_INFORMATION info; if (!::VirtualQuery((void*)address, &info, sizeof(info))) return 0; if (info.State == MEM_FREE) { uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, alignment); if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) return shadow_address; } // Move to the next region. address = (uptr)info.BaseAddress + info.RegionSize; } return 0; } bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MEMORY_BASIC_INFORMATION mbi; CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); return mbi.Protect == PAGE_NOACCESS && (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; } void *MapFileToMemory(const char *file_name, uptr *buff_size) { UNIMPLEMENTED(); } void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { UNIMPLEMENTED(); } static const int kMaxEnvNameLength = 128; static const DWORD kMaxEnvValueLength = 32767; namespace { struct EnvVariable { char name[kMaxEnvNameLength]; char value[kMaxEnvValueLength]; }; } // namespace static const int kEnvVariables = 5; static EnvVariable env_vars[kEnvVariables]; static int num_env_vars; const char *GetEnv(const char *name) { // Note: this implementation caches the values of the environment variables // and limits their quantity. for (int i = 0; i < num_env_vars; i++) { if (0 == internal_strcmp(name, env_vars[i].name)) return env_vars[i].value; } CHECK_LT(num_env_vars, kEnvVariables); DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, kMaxEnvValueLength); if (rv > 0 && rv < kMaxEnvValueLength) { CHECK_LT(internal_strlen(name), kMaxEnvNameLength); internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); num_env_vars++; return env_vars[num_env_vars - 1].value; } return 0; } const char *GetPwd() { UNIMPLEMENTED(); } u32 GetUid() { UNIMPLEMENTED(); } namespace { struct ModuleInfo { const char *filepath; uptr base_address; uptr end_address; }; #if !SANITIZER_GO int CompareModulesBase(const void *pl, const void *pr) { const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr; if (l->base_address < r->base_address) return -1; return l->base_address > r->base_address; } #endif } // namespace #if !SANITIZER_GO void DumpProcessMap() { Report("Dumping process modules:\n"); ListOfModules modules; modules.init(); uptr num_modules = modules.size(); InternalScopedBuffer module_infos(num_modules); for (size_t i = 0; i < num_modules; ++i) { module_infos[i].filepath = modules[i].full_name(); module_infos[i].base_address = modules[i].ranges().front()->beg; module_infos[i].end_address = modules[i].ranges().back()->end; } qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), CompareModulesBase); for (size_t i = 0; i < num_modules; ++i) { const ModuleInfo &mi = module_infos[i]; if (mi.end_address != 0) { Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, mi.filepath[0] ? mi.filepath : "[no name]"); } else if (mi.filepath[0]) { Printf("\t??\?-??? %s\n", mi.filepath); } else { Printf("\t???\n"); } } } #endif void PrintModuleMap() { } void DisableCoreDumperIfNecessary() { // Do nothing. } void ReExec() { UNIMPLEMENTED(); } void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { } bool StackSizeIsUnlimited() { UNIMPLEMENTED(); } void SetStackSizeLimitInBytes(uptr limit) { UNIMPLEMENTED(); } bool AddressSpaceIsUnlimited() { UNIMPLEMENTED(); } void SetAddressSpaceUnlimited() { UNIMPLEMENTED(); } bool IsPathSeparator(const char c) { return c == '\\' || c == '/'; } bool IsAbsolutePath(const char *path) { UNIMPLEMENTED(); } void SleepForSeconds(int seconds) { Sleep(seconds * 1000); } void SleepForMillis(int millis) { Sleep(millis); } u64 NanoTime() { return 0; } void Abort() { internal__exit(3); } #if !SANITIZER_GO // Read the file to extract the ImageBase field from the PE header. If ASLR is // disabled and this virtual address is available, the loader will typically // load the image at this address. Therefore, we call it the preferred base. Any // addresses in the DWARF typically assume that the object has been loaded at // this address. static uptr GetPreferredBase(const char *modname) { fd_t fd = OpenFile(modname, RdOnly, nullptr); if (fd == kInvalidFd) return 0; FileCloser closer(fd); // Read just the DOS header. IMAGE_DOS_HEADER dos_header; uptr bytes_read; if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || bytes_read != sizeof(dos_header)) return 0; // The file should start with the right signature. if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) return 0; // The layout at e_lfanew is: // "PE\0\0" // IMAGE_FILE_HEADER // IMAGE_OPTIONAL_HEADER // Seek to e_lfanew and read all that data. char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)]; if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == INVALID_SET_FILE_POINTER) return 0; if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) || bytes_read != sizeof(buf)) return 0; // Check for "PE\0\0" before the PE header. char *pe_sig = &buf[0]; if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) return 0; // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. IMAGE_OPTIONAL_HEADER *pe_header = (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); // Check for more magic in the PE header. if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) return 0; // Finally, return the ImageBase. return (uptr)pe_header->ImageBase; } void ListOfModules::init() { clear(); HANDLE cur_process = GetCurrentProcess(); // Query the list of modules. Start by assuming there are no more than 256 // modules and retry if that's not sufficient. HMODULE *hmodules = 0; uptr modules_buffer_size = sizeof(HMODULE) * 256; DWORD bytes_required; while (!hmodules) { hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, &bytes_required)); if (bytes_required > modules_buffer_size) { // Either there turned out to be more than 256 hmodules, or new hmodules // could have loaded since the last try. Retry. UnmapOrDie(hmodules, modules_buffer_size); hmodules = 0; modules_buffer_size = bytes_required; } } // |num_modules| is the number of modules actually present, size_t num_modules = bytes_required / sizeof(HMODULE); for (size_t i = 0; i < num_modules; ++i) { HMODULE handle = hmodules[i]; MODULEINFO mi; if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) continue; // Get the UTF-16 path and convert to UTF-8. wchar_t modname_utf16[kMaxPathLength]; int modname_utf16_len = GetModuleFileNameW(handle, modname_utf16, kMaxPathLength); if (modname_utf16_len == 0) modname_utf16[0] = '\0'; char module_name[kMaxPathLength]; int module_name_len = ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1, &module_name[0], kMaxPathLength, NULL, NULL); module_name[module_name_len] = '\0'; uptr base_address = (uptr)mi.lpBaseOfDll; uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; // Adjust the base address of the module so that we get a VA instead of an // RVA when computing the module offset. This helps llvm-symbolizer find the // right DWARF CU. In the common case that the image is loaded at it's // preferred address, we will now print normal virtual addresses. uptr preferred_base = GetPreferredBase(&module_name[0]); uptr adjusted_base = base_address - preferred_base; LoadedModule cur_module; cur_module.set(module_name, adjusted_base); // We add the whole module as one single address range. cur_module.addAddressRange(base_address, end_address, /*executable*/ true, /*writable*/ true); modules_.push_back(cur_module); } UnmapOrDie(hmodules, modules_buffer_size); }; // We can't use atexit() directly at __asan_init time as the CRT is not fully // initialized at this point. Place the functions into a vector and use // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). InternalMmapVectorNoCtor atexit_functions; int Atexit(void (*function)(void)) { atexit_functions.push_back(function); return 0; } static int RunAtexit() { int ret = 0; for (uptr i = 0; i < atexit_functions.size(); ++i) { ret |= atexit(atexit_functions[i]); } return ret; } #pragma section(".CRT$XID", long, read) // NOLINT __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; #endif // ------------------ sanitizer_libc.h fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { // FIXME: Use the wide variants to handle Unicode filenames. fd_t res; if (mode == RdOnly) { res = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); } else if (mode == WrOnly) { res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); } else { UNIMPLEMENTED(); } CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); CHECK(res != kStderrFd || kStderrFd == kInvalidFd); if (res == kInvalidFd && last_error) *last_error = GetLastError(); return res; } void CloseFile(fd_t fd) { CloseHandle(fd); } bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, error_t *error_p) { CHECK(fd != kInvalidFd); // bytes_read can't be passed directly to ReadFile: // uptr is unsigned long long on 64-bit Windows. unsigned long num_read_long; bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); if (!success && error_p) *error_p = GetLastError(); if (bytes_read) *bytes_read = num_read_long; return success; } bool SupportsColoredOutput(fd_t fd) { // FIXME: support colored output. return false; } bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, error_t *error_p) { CHECK(fd != kInvalidFd); // Handle null optional parameters. error_t dummy_error; error_p = error_p ? error_p : &dummy_error; uptr dummy_bytes_written; bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; // Initialize output parameters in case we fail. *error_p = 0; *bytes_written = 0; // Map the conventional Unix fds 1 and 2 to Windows handles. They might be // closed, in which case this will fail. if (fd == kStdoutFd || fd == kStderrFd) { fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); if (fd == 0) { *error_p = ERROR_INVALID_HANDLE; return false; } } DWORD bytes_written_32; if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { *error_p = GetLastError(); return false; } else { *bytes_written = bytes_written_32; return true; } } bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { UNIMPLEMENTED(); } uptr internal_sched_yield() { Sleep(0); return 0; } void internal__exit(int exitcode) { // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, // so add our own breakpoint here. if (::IsDebuggerPresent()) __debugbreak(); TerminateProcess(GetCurrentProcess(), exitcode); BUILTIN_UNREACHABLE(); } uptr internal_ftruncate(fd_t fd, uptr size) { UNIMPLEMENTED(); } uptr GetRSS() { return 0; } void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } void internal_join_thread(void *th) { } // ---------------------- BlockingMutex ---------------- {{{1 const uptr LOCK_UNINITIALIZED = 0; const uptr LOCK_READY = (uptr)-1; BlockingMutex::BlockingMutex(LinkerInitialized li) { // FIXME: see comments in BlockingMutex::Lock() for the details. CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED); CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); owner_ = LOCK_READY; } BlockingMutex::BlockingMutex() { CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); owner_ = LOCK_READY; } void BlockingMutex::Lock() { if (owner_ == LOCK_UNINITIALIZED) { // FIXME: hm, global BlockingMutex objects are not initialized?!? // This might be a side effect of the clang+cl+link Frankenbuild... new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1)); // FIXME: If it turns out the linker doesn't invoke our // constructors, we should probably manually Lock/Unlock all the global // locks while we're starting in one thread to avoid double-init races. } EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_); CHECK_EQ(owner_, LOCK_READY); owner_ = GetThreadSelf(); } void BlockingMutex::Unlock() { CHECK_EQ(owner_, GetThreadSelf()); owner_ = LOCK_READY; LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_); } void BlockingMutex::CheckLocked() { CHECK_EQ(owner_, GetThreadSelf()); } uptr GetTlsSize() { return 0; } void InitTlsSize() { } void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { #if SANITIZER_GO *stk_addr = 0; *stk_size = 0; *tls_addr = 0; *tls_size = 0; #else uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); *stk_addr = stack_bottom; *stk_size = stack_top - stack_bottom; *tls_addr = 0; *tls_size = 0; #endif } #if !SANITIZER_GO void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { CHECK_GE(max_depth, 2); // FIXME: CaptureStackBackTrace might be too slow for us. // FIXME: Compare with StackWalk64. // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax), (void**)trace, 0); if (size == 0) return; // Skip the RTL frames by searching for the PC in the stacktrace. uptr pc_location = LocatePcInTrace(pc); PopStackFrames(pc_location); } void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, u32 max_depth) { CONTEXT ctx = *(CONTEXT *)context; STACKFRAME64 stack_frame; memset(&stack_frame, 0, sizeof(stack_frame)); InitializeDbgHelpIfNeeded(); size = 0; #if defined(_WIN64) int machine_type = IMAGE_FILE_MACHINE_AMD64; stack_frame.AddrPC.Offset = ctx.Rip; stack_frame.AddrFrame.Offset = ctx.Rbp; stack_frame.AddrStack.Offset = ctx.Rsp; #else int machine_type = IMAGE_FILE_MACHINE_I386; stack_frame.AddrPC.Offset = ctx.Eip; stack_frame.AddrFrame.Offset = ctx.Ebp; stack_frame.AddrStack.Offset = ctx.Esp; #endif stack_frame.AddrPC.Mode = AddrModeFlat; stack_frame.AddrFrame.Mode = AddrModeFlat; stack_frame.AddrStack.Mode = AddrModeFlat; while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(), &stack_frame, &ctx, NULL, SymFunctionTableAccess64, SymGetModuleBase64, NULL) && size < Min(max_depth, kStackTraceMax)) { trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset; } } #endif // #if !SANITIZER_GO void ReportFile::Write(const char *buffer, uptr length) { SpinMutexLock l(mu); ReopenIfNecessary(); if (!WriteToFile(fd, buffer, length)) { // stderr may be closed, but we may be able to print to the debugger // instead. This is the case when launching a program from Visual Studio, // and the following routine should write to its console. OutputDebugStringA(buffer); } } void SetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void UnsetAlternateSignalStack() { // FIXME: Decide what to do on Windows. } void InstallDeadlySignalHandlers(SignalHandlerType handler) { (void)handler; // FIXME: Decide what to do on Windows. } HandleSignalMode GetHandleSignalMode(int signum) { // FIXME: Decide what to do on Windows. return kHandleSignalNo; } // Check based on flags if we should handle this exception. bool IsHandledDeadlyException(DWORD exceptionCode) { switch (exceptionCode) { case EXCEPTION_ACCESS_VIOLATION: case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: case EXCEPTION_STACK_OVERFLOW: case EXCEPTION_DATATYPE_MISALIGNMENT: case EXCEPTION_IN_PAGE_ERROR: return common_flags()->handle_segv; case EXCEPTION_ILLEGAL_INSTRUCTION: case EXCEPTION_PRIV_INSTRUCTION: case EXCEPTION_BREAKPOINT: return common_flags()->handle_sigill; case EXCEPTION_FLT_DENORMAL_OPERAND: case EXCEPTION_FLT_DIVIDE_BY_ZERO: case EXCEPTION_FLT_INEXACT_RESULT: case EXCEPTION_FLT_INVALID_OPERATION: case EXCEPTION_FLT_OVERFLOW: case EXCEPTION_FLT_STACK_CHECK: case EXCEPTION_FLT_UNDERFLOW: case EXCEPTION_INT_DIVIDE_BY_ZERO: case EXCEPTION_INT_OVERFLOW: return common_flags()->handle_sigfpe; } return false; } const char *DescribeSignalOrException(int signo) { unsigned code = signo; // Get the string description of the exception if this is a known deadly // exception. switch (code) { case EXCEPTION_ACCESS_VIOLATION: return "access-violation"; case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded"; case EXCEPTION_STACK_OVERFLOW: return "stack-overflow"; case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment"; case EXCEPTION_IN_PAGE_ERROR: return "in-page-error"; case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction"; case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction"; case EXCEPTION_BREAKPOINT: return "breakpoint"; case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand"; case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero"; case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result"; case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation"; case EXCEPTION_FLT_OVERFLOW: return "flt-overflow"; case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check"; case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow"; case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero"; case EXCEPTION_INT_OVERFLOW: return "int-overflow"; } return "unknown exception"; } bool IsAccessibleMemoryRange(uptr beg, uptr size) { SYSTEM_INFO si; GetNativeSystemInfo(&si); uptr page_size = si.dwPageSize; uptr page_mask = ~(page_size - 1); for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; page <= end;) { MEMORY_BASIC_INFORMATION info; if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) return false; if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || info.Protect == PAGE_EXECUTE) return false; if (info.RegionSize == 0) return false; page += info.RegionSize; } return true; } SignalContext SignalContext::Create(void *siginfo, void *context) { EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; CONTEXT *context_record = (CONTEXT *)context; uptr pc = (uptr)exception_record->ExceptionAddress; #ifdef _WIN64 uptr bp = (uptr)context_record->Rbp; uptr sp = (uptr)context_record->Rsp; #else uptr bp = (uptr)context_record->Ebp; uptr sp = (uptr)context_record->Esp; #endif uptr access_addr = exception_record->ExceptionInformation[1]; // The contents of this array are documented at // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx // The first element indicates read as 0, write as 1, or execute as 8. The // second element is the faulting address. WriteFlag write_flag = SignalContext::UNKNOWN; switch (exception_record->ExceptionInformation[0]) { case 0: write_flag = SignalContext::READ; break; case 1: write_flag = SignalContext::WRITE; break; case 8: write_flag = SignalContext::UNKNOWN; break; } bool is_memory_access = write_flag != SignalContext::UNKNOWN; return SignalContext(context, access_addr, pc, sp, bp, is_memory_access, write_flag); } void SignalContext::DumpAllRegisters(void *context) { // FIXME: Implement this. } uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { // FIXME: Actually implement this function. CHECK_GT(buf_len, 0); buf[0] = 0; return 0; } uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { return ReadBinaryName(buf, buf_len); } void CheckVMASize() { // Do nothing. } void MaybeReexec() { // No need to re-exec on Windows. } char **GetArgv() { // FIXME: Actually implement this function. return 0; } pid_t StartSubprocess(const char *program, const char *const argv[], fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) { // FIXME: implement on this platform // Should be implemented based on // SymbolizerProcess::StarAtSymbolizerSubprocess // from lib/sanitizer_common/sanitizer_symbolizer_win.cc. return -1; } bool IsProcessRunning(pid_t pid) { // FIXME: implement on this platform. return false; } int WaitForProcess(pid_t pid) { return -1; } // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } // FIXME: implement on this platform. bool GetRandom(void *buffer, uptr length) { UNIMPLEMENTED(); } } // namespace __sanitizer #endif // _WIN32 Index: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp (revision 320960) +++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp (revision 320961) @@ -1,723 +1,723 @@ //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Scudo Hardened Allocator implementation. /// It uses the sanitizer_common allocator as a base and aims at mitigating /// heap corruption vulnerabilities. It provides a checksum-guarded chunk /// header, a delayed free list, and additional sanity checks. /// //===----------------------------------------------------------------------===// #include "scudo_allocator.h" #include "scudo_crc32.h" #include "scudo_tls.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_quarantine.h" #include #include namespace __scudo { // Global static cookie, initialized at start-up. static uptr Cookie; // We default to software CRC32 if the alternatives are not supported, either // at compilation or at runtime. static atomic_uint8_t HashAlgorithm = { CRC32Software }; INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) { // If the hardware CRC32 feature is defined here, it was enabled everywhere, // as opposed to only for scudo_crc32.cpp. This means that other hardware // specific instructions were likely emitted at other places, and as a // result there is no reason to not use it here. #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) Crc = CRC32_INTRINSIC(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = CRC32_INTRINSIC(Crc, Array[i]); return Crc; #else if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) { Crc = computeHardwareCRC32(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = computeHardwareCRC32(Crc, Array[i]); return Crc; } Crc = computeSoftwareCRC32(Crc, Value); for (uptr i = 0; i < ArraySize; i++) Crc = computeSoftwareCRC32(Crc, Array[i]); return Crc; #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) } static ScudoBackendAllocator &getBackendAllocator(); struct ScudoChunk : UnpackedHeader { // We can't use the offset member of the chunk itself, as we would double // fetch it without any warranty that it wouldn't have been tampered. To // prevent this, we work with a local copy of the header. void *getAllocBeg(UnpackedHeader *Header) { return reinterpret_cast( reinterpret_cast(this) - (Header->Offset << MinAlignmentLog)); } // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { uptr Size = getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header), Header->FromPrimary); if (Size == 0) return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); } // Compute the checksum of the Chunk pointer and its ChunkHeader. u16 computeChecksum(UnpackedHeader *Header) const { UnpackedHeader ZeroChecksumHeader = *Header; ZeroChecksumHeader.Checksum = 0; uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); u32 Crc = computeCRC32(Cookie, reinterpret_cast(this), HeaderHolder, ARRAY_SIZE(HeaderHolder)); return static_cast(Crc); } // Checks the validity of a chunk by verifying its checksum. It doesn't // incur termination in the event of an invalid chunk. bool isValid() { UnpackedHeader NewUnpackedHeader; const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); NewUnpackedHeader = bit_cast(NewPackedHeader); return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader)); } // Nulls out a chunk header. When returning the chunk to the backend, there // is no need to store a valid ChunkAvailable header, as this would be // computationally expensive. Zeroing out serves the same purpose by making // the header invalid. In the extremely rare event where 0 would be a valid // checksum for the chunk, the state of the chunk is ChunkAvailable anyway. COMPILER_CHECK(ChunkAvailable == 0); void eraseHeader() { PackedHeader NullPackedHeader = 0; AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NullPackedHeader); } // Loads and unpacks the header, verifying the checksum in the process. void loadHeader(UnpackedHeader *NewUnpackedHeader) const { const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); *NewUnpackedHeader = bit_cast(NewPackedHeader); if (UNLIKELY(NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader))) { dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); } } // Packs and stores the header, computing the checksum in the process. void storeHeader(UnpackedHeader *NewUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NewPackedHeader); } // Packs and stores the header, computing the checksum in the process. We // compare the current header with the expected provided one to ensure that // we are not being raced by a corruption occurring in another thread. void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader, UnpackedHeader *OldUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); PackedHeader OldPackedHeader = bit_cast(*OldUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader, &OldPackedHeader, NewPackedHeader, memory_order_relaxed))) { dieWithMessage("ERROR: race on chunk header at address %p\n", this); } } }; ScudoChunk *getScudoChunk(uptr UserBeg) { return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); } struct AllocatorOptions { u32 QuarantineSizeMb; u32 ThreadLocalQuarantineSizeKb; bool MayReturnNull; s32 ReleaseToOSIntervalMs; bool DeallocationTypeMismatch; bool DeleteSizeMismatch; bool ZeroContents; void setFrom(const Flags *f, const CommonFlags *cf); void copyTo(Flags *f, CommonFlags *cf) const; }; void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { MayReturnNull = cf->allocator_may_return_null; ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; QuarantineSizeMb = f->QuarantineSizeMb; ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; DeallocationTypeMismatch = f->DeallocationTypeMismatch; DeleteSizeMismatch = f->DeleteSizeMismatch; ZeroContents = f->ZeroContents; } void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { cf->allocator_may_return_null = MayReturnNull; cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; f->QuarantineSizeMb = QuarantineSizeMb; f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; f->DeallocationTypeMismatch = DeallocationTypeMismatch; f->DeleteSizeMismatch = DeleteSizeMismatch; f->ZeroContents = ZeroContents; } static void initScudoInternal(const AllocatorOptions &Options); static bool ScudoInitIsRunning = false; void initScudo() { SanitizerToolName = "Scudo"; CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); ScudoInitIsRunning = true; // Check if hardware CRC32 is supported in the binary and by the platform, if // so, opt for the CRC32 hardware version of the checksum. if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature)) atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); initFlags(); AllocatorOptions Options; Options.setFrom(getFlags(), common_flags()); initScudoInternal(Options); // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use. ScudoInitIsRunning = false; } struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} // Chunk recycling function, returns a quarantined chunk to the backend, // first making sure it hasn't been tampered with. void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); if (UNLIKELY(Header.State != ChunkQuarantine)) { dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", Chunk); } Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary); } // Internal quarantine allocation and deallocation functions. We first check // that the batches are indeed serviced by the Primary. // TODO(kostyak): figure out the best way to protect the batches. COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); void *Allocate(uptr Size) { return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true); } void Deallocate(void *Ptr) { getBackendAllocator().Deallocate(Cache_, Ptr, true); } AllocatorCache *Cache_; }; typedef Quarantine ScudoQuarantine; typedef ScudoQuarantine::Cache ScudoQuarantineCache; COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { return &ThreadContext->Cache; } ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { return reinterpret_cast< ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); } -Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) { +ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) { return &ThreadContext->Prng; } struct ScudoAllocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); typedef ReturnNullOrDieOnFailure FailureHandler; ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; // The fallback caches are used when the thread local caches have been // 'detroyed' on thread tear-down. They are protected by a Mutex as they can // be accessed by different threads. StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; ScudoQuarantineCache FallbackQuarantineCache; - Xorshift128Plus FallbackPrng; + ScudoPrng FallbackPrng; bool DeallocationTypeMismatch; bool ZeroContents; bool DeleteSizeMismatch; explicit ScudoAllocator(LinkerInitialized) : AllocatorQuarantine(LINKER_INITIALIZED), FallbackQuarantineCache(LINKER_INITIALIZED) {} void init(const AllocatorOptions &Options) { // Verify that the header offset field can hold the maximum offset. In the // case of the Secondary allocator, it takes care of alignment and the // offset will always be 0. In the case of the Primary, the worst case // scenario happens in the last size class, when the backend allocation // would already be aligned on the requested alignment, which would happen // to be the maximum alignment that would fit in that size class. As a // result, the maximum offset will be at most the maximum alignment for the // last size class minus the header size, in multiples of MinAlignment. UnpackedHeader Header = {}; uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex( SizeClassMap::kMaxSize - MinAlignment); uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; Header.Offset = MaxOffset; if (Header.Offset != MaxOffset) { dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " "header\n"); } // Verify that we can fit the maximum size or amount of unused bytes in the // header. Given that the Secondary fits the allocation to a page, the worst // case scenario happens in the Primary. It will depend on the second to // last and last class sizes, as well as the dynamic base for the Primary. // The following is an over-approximation that works for our needs. uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) { dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in " "the header\n"); } DeallocationTypeMismatch = Options.DeallocationTypeMismatch; DeleteSizeMismatch = Options.DeleteSizeMismatch; ZeroContents = Options.ZeroContents; SetAllocatorMayReturnNull(Options.MayReturnNull); BackendAllocator.Init(Options.ReleaseToOSIntervalMs); AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); BackendAllocator.InitCache(&FallbackAllocatorCache); - FallbackPrng.initFromURandom(); - Cookie = FallbackPrng.getNext(); + FallbackPrng.init(); + Cookie = FallbackPrng.getU64(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { initThreadMaybe(); if (UNLIKELY(!UserPtr)) return false; uptr UserBeg = reinterpret_cast(UserPtr); if (!IsAligned(UserBeg, MinAlignment)) return false; return getScudoChunk(UserBeg)->isValid(); } // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { initThreadMaybe(); if (UNLIKELY(Alignment > MaxAlignment)) return FailureHandler::OnBadRequest(); if (UNLIKELY(Alignment < MinAlignment)) Alignment = MinAlignment; if (UNLIKELY(Size >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); if (UNLIKELY(Size == 0)) Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; uptr AlignedSize = (Alignment > MinAlignment) ? NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); // Primary and Secondary backed allocations have a different treatment. We // deal with alignment requirements of Primary serviced allocations here, // but the Secondary will take care of its own alignment needs. bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment); void *Ptr; - uptr Salt; + u8 Salt; uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize; uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { - Salt = getPrng(ThreadContext)->getNext(); + Salt = getPrng(ThreadContext)->getU8(); Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), AllocationSize, AllocationAlignment, FromPrimary); ThreadContext->unlock(); } else { SpinMutexLock l(&FallbackMutex); - Salt = FallbackPrng.getNext(); + Salt = FallbackPrng.getU8(); Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, AllocationAlignment, FromPrimary); } if (UNLIKELY(!Ptr)) return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary)); UnpackedHeader Header = {}; uptr AllocBeg = reinterpret_cast(Ptr); uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; if (UNLIKELY(!IsAligned(UserBeg, Alignment))) { // Since the Secondary takes care of alignment, a non-aligned pointer // means it is from the Primary. It is also the only case where the offset // field of the header would be non-zero. CHECK(FromPrimary); UserBeg = RoundUpTo(UserBeg, Alignment); uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; } CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize); Header.State = ChunkAllocated; Header.AllocType = Type; if (FromPrimary) { Header.FromPrimary = FromPrimary; Header.SizeOrUnusedBytes = Size; } else { // The secondary fits the allocations to a page, so the amount of unused // bytes is the difference between the end of the user allocation and the // next page boundary. uptr PageSize = GetPageSizeCached(); uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1); if (TrailingBytes) Header.SizeOrUnusedBytes = PageSize - TrailingBytes; } Header.Salt = static_cast(Salt); getScudoChunk(UserBeg)->storeHeader(&Header); void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); return UserPtr; } // Place a chunk in the quarantine. In the event of a zero-sized quarantine, // we directly deallocate the chunk, otherwise the flow would lead to the // chunk being loaded (and checked) twice, and stored (and checksummed) once, // with no additional security value. void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, uptr Size) { bool FromPrimary = Header->FromPrimary; bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr, FromPrimary); ThreadContext->unlock(); } else { SpinMutexLock Lock(&FallbackMutex); getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr, FromPrimary); } } else { UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), QuarantineCallback( getAllocatorCache(ThreadContext)), Chunk, Size); ThreadContext->unlock(); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, QuarantineCallback(&FallbackAllocatorCache), Chunk, Size); } } } // Deallocates a Chunk, which means adding it to the delayed free list (or // Quarantine). void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (UNLIKELY(!UserPtr)) return; uptr UserBeg = reinterpret_cast(UserPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to deallocate a chunk not properly " "aligned at address %p\n", UserPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when deallocating address " "%p\n", UserPtr); } if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. if (OldHeader.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) { dieWithMessage("ERROR: allocation type mismatch on address %p\n", UserPtr); } } } uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes; if (DeleteSizeMismatch) { if (DeleteSize && DeleteSize != Size) { dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", UserPtr); } } // If a small memory amount was allocated with a larger alignment, we want // to take that into account. Otherwise the Quarantine would be filled with // tiny chunks, taking a lot of VA memory. This is an approximation of the // usable size, that allows us to not call GetActuallyAllocatedSize. uptr LiableSize = Size + (OldHeader.Offset << MinAlignment); quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize); } // Reallocates a chunk. We can save on a new allocation if the new requested // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { initThreadMaybe(); uptr UserBeg = reinterpret_cast(OldPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to reallocate a chunk not properly " "aligned at address %p\n", OldPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when reallocating address " "%p\n", OldPtr); } if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", OldPtr); } uptr UsableSize = Chunk->getUsableSize(&OldHeader); // The new size still fits in the current chunk, and the size difference // is reasonable. if (NewSize <= UsableSize && (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = OldHeader.FromPrimary ? NewSize : UsableSize - NewSize; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); return OldPtr; } // Otherwise, we have to allocate a new chunk and copy the contents of the // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : UsableSize - OldHeader.SizeOrUnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize); } return NewPtr; } // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { initThreadMaybe(); if (UNLIKELY(!Ptr)) return 0; uptr UserBeg = reinterpret_cast(Ptr); ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader Header; Chunk->loadHeader(&Header); // Getting the usable size of a chunk only makes sense if it's allocated. if (UNLIKELY(Header.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", Ptr); } return Chunk->getUsableSize(&Header); } void *calloc(uptr NMemB, uptr Size) { initThreadMaybe(); if (CheckForCallocOverflow(NMemB, Size)) return FailureHandler::OnBadRequest(); return allocate(NMemB * Size, MinAlignment, FromMalloc, true); } void commitBack(ScudoThreadContext *ThreadContext) { AllocatorCache *Cache = getAllocatorCache(ThreadContext); AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), QuarantineCallback(Cache)); BackendAllocator.DestroyCache(Cache); } uptr getStats(AllocatorStat StatType) { initThreadMaybe(); uptr stats[AllocatorStatCount]; BackendAllocator.GetStats(stats); return stats[StatType]; } }; static ScudoAllocator Instance(LINKER_INITIALIZED); static ScudoBackendAllocator &getBackendAllocator() { return Instance.BackendAllocator; } static void initScudoInternal(const AllocatorOptions &Options) { Instance.init(Options); } void ScudoThreadContext::init() { getBackendAllocator().InitCache(&Cache); - Prng.initFromURandom(); + Prng.init(); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } void ScudoThreadContext::commitBack() { Instance.commitBack(this); } void *scudoMalloc(uptr Size, AllocType Type) { return Instance.allocate(Size, MinAlignment, Type); } void scudoFree(void *Ptr, AllocType Type) { Instance.deallocate(Ptr, 0, Type); } void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { Instance.deallocate(Ptr, Size, Type); } void *scudoRealloc(void *Ptr, uptr Size) { if (!Ptr) return Instance.allocate(Size, MinAlignment, FromMalloc); if (Size == 0) { Instance.deallocate(Ptr, 0, FromMalloc); return nullptr; } return Instance.reallocate(Ptr, Size); } void *scudoCalloc(uptr NMemB, uptr Size) { return Instance.calloc(NMemB, Size); } void *scudoValloc(uptr Size) { return Instance.allocate(Size, GetPageSizeCached(), FromMemalign); } void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); Size = RoundUpTo(Size, PageSize); if (Size == 0) { // pvalloc(0) should allocate one page. Size = PageSize; } return Instance.allocate(Size, PageSize, FromMemalign); } void *scudoMemalign(uptr Alignment, uptr Size) { if (UNLIKELY(!IsPowerOfTwo(Alignment))) return ScudoAllocator::FailureHandler::OnBadRequest(); return Instance.allocate(Size, Alignment, FromMemalign); } int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) { *MemPtr = ScudoAllocator::FailureHandler::OnBadRequest(); return EINVAL; } *MemPtr = Instance.allocate(Size, Alignment, FromMemalign); if (!*MemPtr) return ENOMEM; return 0; } void *scudoAlignedAlloc(uptr Alignment, uptr Size) { // Alignment must be a power of 2, Size must be a multiple of Alignment. if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0)) return ScudoAllocator::FailureHandler::OnBadRequest(); return Instance.allocate(Size, Alignment, FromMalloc); } uptr scudoMallocUsableSize(void *Ptr) { return Instance.getUsableSize(Ptr); } } // namespace __scudo using namespace __scudo; // MallocExtension helper functions uptr __sanitizer_get_current_allocated_bytes() { return Instance.getStats(AllocatorStatAllocated); } uptr __sanitizer_get_heap_size() { return Instance.getStats(AllocatorStatMapped); } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *Ptr) { return Instance.isValidPointer(Ptr); } uptr __sanitizer_get_allocated_size(const void *Ptr) { return Instance.getUsableSize(Ptr); } Index: vendor/compiler-rt/dist/lib/scudo/scudo_tls.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_tls.h (revision 320960) +++ vendor/compiler-rt/dist/lib/scudo/scudo_tls.h (revision 320961) @@ -1,47 +1,47 @@ //===-- scudo_tls.h ---------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Scudo thread local structure definition. /// Implementation will differ based on the thread local storage primitives /// offered by the underlying platform. /// //===----------------------------------------------------------------------===// #ifndef SCUDO_TLS_H_ #define SCUDO_TLS_H_ #include "scudo_allocator.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform.h" namespace __scudo { // Platform specific base thread context definitions. #include "scudo_tls_context_android.inc" #include "scudo_tls_context_linux.inc" struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform { AllocatorCache Cache; - Xorshift128Plus Prng; + ScudoPrng Prng; uptr QuarantineCachePlaceHolder[4]; void init(); void commitBack(); }; void initThread(); // Platform specific dastpath functions definitions. #include "scudo_tls_android.inc" #include "scudo_tls_linux.inc" } // namespace __scudo #endif // SCUDO_TLS_H_ Index: vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp (revision 320960) +++ vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp (revision 320961) @@ -1,162 +1,126 @@ //===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Platform specific utility functions. /// //===----------------------------------------------------------------------===// #include "scudo_utils.h" #include #include #include #include #if defined(__x86_64__) || defined(__i386__) # include #endif #if defined(__arm__) || defined(__aarch64__) # include #endif // TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less // complicated string formatting code. The following is a // temporary workaround to be able to use __sanitizer::VSNPrintf. namespace __sanitizer { extern int VSNPrintf(char *buff, int buff_length, const char *format, va_list args); } // namespace __sanitizer namespace __scudo { FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) { // Our messages are tiny, 256 characters is more than enough. char Message[256]; va_list Args; va_start(Args, Format); __sanitizer::VSNPrintf(Message, sizeof(Message), Format, Args); va_end(Args); RawWrite(Message); Die(); } #if defined(__x86_64__) || defined(__i386__) // i386 and x86_64 specific code to detect CRC32 hardware support via CPUID. // CRC32 requires the SSE 4.2 instruction set. typedef struct { u32 Eax; u32 Ebx; u32 Ecx; u32 Edx; } CPUIDRegs; static void getCPUID(CPUIDRegs *Regs, u32 Level) { __get_cpuid(Level, &Regs->Eax, &Regs->Ebx, &Regs->Ecx, &Regs->Edx); } CPUIDRegs getCPUFeatures() { CPUIDRegs VendorRegs = {}; getCPUID(&VendorRegs, 0); bool IsIntel = (VendorRegs.Ebx == signature_INTEL_ebx) && (VendorRegs.Edx == signature_INTEL_edx) && (VendorRegs.Ecx == signature_INTEL_ecx); bool IsAMD = (VendorRegs.Ebx == signature_AMD_ebx) && (VendorRegs.Edx == signature_AMD_edx) && (VendorRegs.Ecx == signature_AMD_ecx); // Default to an empty feature set if not on a supported CPU. CPUIDRegs FeaturesRegs = {}; if (IsIntel || IsAMD) { getCPUID(&FeaturesRegs, 1); } return FeaturesRegs; } #ifndef bit_SSE4_2 # define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines. #endif bool testCPUFeature(CPUFeature Feature) { CPUIDRegs FeaturesRegs = getCPUFeatures(); switch (Feature) { case CRC32CPUFeature: // CRC32 is provided by SSE 4.2. return !!(FeaturesRegs.Ecx & bit_SSE4_2); default: break; } return false; } #elif defined(__arm__) || defined(__aarch64__) // For ARM and AArch64, hardware CRC32 support is indicated in the // AT_HWVAL auxiliary vector. #ifndef HWCAP_CRC32 # define HWCAP_CRC32 (1<<7) // HWCAP_CRC32 is missing on older platforms. #endif bool testCPUFeature(CPUFeature Feature) { uptr HWCap = getauxval(AT_HWCAP); switch (Feature) { case CRC32CPUFeature: return !!(HWCap & HWCAP_CRC32); default: break; } return false; } #else bool testCPUFeature(CPUFeature Feature) { return false; } #endif // defined(__x86_64__) || defined(__i386__) -// readRetry will attempt to read Count bytes from the Fd specified, and if -// interrupted will retry to read additional bytes to reach Count. -static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) { - ssize_t AmountRead = 0; - while (static_cast(AmountRead) < Count) { - ssize_t Result = read(Fd, Buffer + AmountRead, Count - AmountRead); - if (Result > 0) - AmountRead += Result; - else if (!Result) - break; - else if (errno != EINTR) { - AmountRead = -1; - break; - } - } - return AmountRead; -} - -static void fillRandom(u8 *Data, ssize_t Size) { - int Fd = open("/dev/urandom", O_RDONLY); - if (Fd < 0) { - dieWithMessage("ERROR: failed to open /dev/urandom.\n"); - } - bool Success = readRetry(Fd, Data, Size) == Size; - close(Fd); - if (!Success) { - dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n"); - } -} - -// Seeds the xorshift state with /dev/urandom. -// TODO(kostyak): investigate using getrandom() if available. -void Xorshift128Plus::initFromURandom() { - fillRandom(reinterpret_cast(State), sizeof(State)); -} - } // namespace __scudo Index: vendor/compiler-rt/dist/lib/scudo/scudo_utils.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_utils.h (revision 320960) +++ vendor/compiler-rt/dist/lib/scudo/scudo_utils.h (revision 320961) @@ -1,58 +1,93 @@ //===-- scudo_utils.h -------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Header for scudo_utils.cpp. /// //===----------------------------------------------------------------------===// #ifndef SCUDO_UTILS_H_ #define SCUDO_UTILS_H_ #include #include "sanitizer_common/sanitizer_common.h" namespace __scudo { template inline Dest bit_cast(const Source& source) { static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!"); Dest dest; memcpy(&dest, &source, sizeof(dest)); return dest; } void NORETURN dieWithMessage(const char *Format, ...); enum CPUFeature { CRC32CPUFeature = 0, MaxCPUFeature, }; bool testCPUFeature(CPUFeature feature); -// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B -// The state (128 bits) will be stored in thread local storage. -struct Xorshift128Plus { +INLINE u64 rotl(const u64 X, int K) { + return (X << K) | (X >> (64 - K)); +} + +// XoRoShiRo128+ PRNG (http://xoroshiro.di.unimi.it/). +struct XoRoShiRo128Plus { public: - void initFromURandom(); - u64 getNext() { - u64 x = State[0]; - const u64 y = State[1]; - State[0] = y; - x ^= x << 23; - State[1] = x ^ y ^ (x >> 17) ^ (y >> 26); - return State[1] + y; + void init() { + if (UNLIKELY(!GetRandom(reinterpret_cast(State), sizeof(State)))) { + // Early processes (eg: init) do not have /dev/urandom yet, but we still + // have to provide them with some degree of entropy. Not having a secure + // seed is not as problematic for them, as they are less likely to be + // the target of heap based vulnerabilities exploitation attempts. + State[0] = NanoTime(); + State[1] = 0; + } + fillCache(); } + u8 getU8() { + if (UNLIKELY(isCacheEmpty())) + fillCache(); + const u8 Result = static_cast(CachedBytes & 0xff); + CachedBytes >>= 8; + CachedBytesAvailable--; + return Result; + } + u64 getU64() { return next(); } + private: + u8 CachedBytesAvailable; + u64 CachedBytes; u64 State[2]; + u64 next() { + const u64 S0 = State[0]; + u64 S1 = State[1]; + const u64 Result = S0 + S1; + S1 ^= S0; + State[0] = rotl(S0, 55) ^ S1 ^ (S1 << 14); + State[1] = rotl(S1, 36); + return Result; + } + bool isCacheEmpty() { + return CachedBytesAvailable == 0; + } + void fillCache() { + CachedBytes = next(); + CachedBytesAvailable = sizeof(CachedBytes); + } }; + +typedef XoRoShiRo128Plus ScudoPrng; } // namespace __scudo #endif // SCUDO_UTILS_H_ Index: vendor/compiler-rt/dist/lib/tsan/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/tsan/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/CMakeLists.txt (revision 320961) @@ -1,238 +1,238 @@ # Build for the ThreadSanitizer runtime support library. include_directories(..) set(TSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) # SANITIZER_COMMON_CFLAGS contains -fPIC, but it's performance-critical for # TSan runtime to be built with -fPIE to reduce the number of register spills. append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE TSAN_CFLAGS) append_rtti_flag(OFF TSAN_CFLAGS) if(COMPILER_RT_TSAN_DEBUG_OUTPUT) # Add extra debug information to TSan runtime. This configuration is rarely # used, but we need to support it so that debug output will not bitrot. list(APPEND TSAN_CFLAGS -DTSAN_COLLECT_STATS=1 -DTSAN_DEBUG_OUTPUT=2) endif() set(TSAN_RTL_CFLAGS ${TSAN_CFLAGS}) append_list_if(COMPILER_RT_HAS_MSSE3_FLAG -msse3 TSAN_RTL_CFLAGS) append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=530 TSAN_RTL_CFLAGS) append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors TSAN_RTL_CFLAGS) set(TSAN_SOURCES rtl/tsan_clock.cc rtl/tsan_debugging.cc rtl/tsan_external.cc rtl/tsan_fd.cc rtl/tsan_flags.cc rtl/tsan_ignoreset.cc rtl/tsan_interceptors.cc rtl/tsan_interface.cc rtl/tsan_interface_ann.cc rtl/tsan_interface_atomic.cc rtl/tsan_interface_java.cc rtl/tsan_malloc_mac.cc rtl/tsan_md5.cc rtl/tsan_mman.cc rtl/tsan_mutex.cc rtl/tsan_mutexset.cc rtl/tsan_preinit.cc rtl/tsan_report.cc rtl/tsan_rtl.cc rtl/tsan_rtl_mutex.cc rtl/tsan_rtl_proc.cc rtl/tsan_rtl_report.cc rtl/tsan_rtl_thread.cc rtl/tsan_stack_trace.cc rtl/tsan_stat.cc rtl/tsan_suppressions.cc rtl/tsan_symbolize.cc rtl/tsan_sync.cc) set(TSAN_CXX_SOURCES rtl/tsan_new_delete.cc) if(APPLE) list(APPEND TSAN_SOURCES rtl/tsan_interceptors_mac.cc rtl/tsan_libdispatch_mac.cc rtl/tsan_platform_mac.cc rtl/tsan_platform_posix.cc) elseif(UNIX) # Assume Linux list(APPEND TSAN_SOURCES rtl/tsan_platform_linux.cc rtl/tsan_platform_posix.cc) endif() set(TSAN_HEADERS rtl/tsan_clock.h rtl/tsan_defs.h rtl/tsan_dense_alloc.h rtl/tsan_fd.h rtl/tsan_flags.h rtl/tsan_flags.inc rtl/tsan_ignoreset.h rtl/tsan_interceptors.h rtl/tsan_interface_ann.h rtl/tsan_interface.h rtl/tsan_interface_inl.h rtl/tsan_interface_java.h rtl/tsan_mman.h rtl/tsan_mutex.h rtl/tsan_mutexset.h rtl/tsan_platform.h rtl/tsan_report.h rtl/tsan_rtl.h rtl/tsan_stack_trace.h rtl/tsan_stat.h rtl/tsan_suppressions.h rtl/tsan_symbolize.h rtl/tsan_sync.h rtl/tsan_trace.h rtl/tsan_update_shadow_word_inl.h rtl/tsan_vector.h) set(TSAN_RUNTIME_LIBRARIES) add_compiler_rt_component(tsan) if(APPLE) - set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S) + set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S) # Xcode will try to compile this file as C ('clang -x c'), and that will fail. if (${CMAKE_GENERATOR} STREQUAL "Xcode") enable_language(ASM) else() # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C) endif() add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS) add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS) add_compiler_rt_runtime(clang_rt.tsan SHARED OS ${TSAN_SUPPORTED_OS} ARCHS ${TSAN_SUPPORTED_ARCH} SOURCES ${TSAN_SOURCES} ${TSAN_CXX_SOURCES} ${TSAN_ASM_SOURCES} OBJECT_LIBS RTInterception RTSanitizerCommon RTSanitizerCommonLibc RTUbsan CFLAGS ${TSAN_RTL_CFLAGS} LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS} PARENT_TARGET tsan) add_compiler_rt_object_libraries(RTTsan_dynamic OS ${TSAN_SUPPORTED_OS} ARCHS ${TSAN_SUPPORTED_ARCH} SOURCES ${TSAN_SOURCES} ${TSAN_CXX_SOURCES} ${TSAN_ASM_SOURCES} CFLAGS ${TSAN_RTL_CFLAGS}) # Build and check Go runtime. set(BUILDGO_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/go/buildgo.sh) add_custom_target(GotsanRuntimeCheck COMMAND env "CC=${CMAKE_C_COMPILER} ${OSX_SYSROOT_FLAG}" IN_TMPDIR=1 SILENT=1 ${BUILDGO_SCRIPT} DEPENDS tsan ${BUILDGO_SCRIPT} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/go COMMENT "Checking TSan Go runtime..." VERBATIM) else() foreach(arch ${TSAN_SUPPORTED_ARCH}) if(arch STREQUAL "x86_64") set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S) # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C) # Sanity check for Go runtime. set(BUILDGO_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/go/buildgo.sh) add_custom_target(GotsanRuntimeCheck COMMAND env "CC=${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}" IN_TMPDIR=1 SILENT=1 ${BUILDGO_SCRIPT} DEPENDS clang_rt.tsan-${arch} ${BUILDGO_SCRIPT} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/go COMMENT "Checking TSan Go runtime..." VERBATIM) elseif(arch STREQUAL "aarch64") set(TSAN_ASM_SOURCES rtl/tsan_rtl_aarch64.S) # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C) elseif(arch MATCHES "powerpc64|powerpc64le") set(TSAN_ASM_SOURCES rtl/tsan_rtl_ppc64.S) # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C) elseif(arch MATCHES "mips64|mips64le") set(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S) # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C) else() set(TSAN_ASM_SOURCES) endif() add_compiler_rt_runtime(clang_rt.tsan STATIC ARCHS ${arch} SOURCES ${TSAN_SOURCES} ${TSAN_ASM_SOURCES} $ $ $ $ CFLAGS ${TSAN_RTL_CFLAGS}) add_compiler_rt_runtime(clang_rt.tsan_cxx STATIC ARCHS ${arch} SOURCES ${TSAN_CXX_SOURCES} $ CFLAGS ${TSAN_RTL_CFLAGS}) list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch} clang_rt.tsan_cxx-${arch}) add_sanitizer_rt_symbols(clang_rt.tsan ARCHS ${arch} EXTRA rtl/tsan.syms.extra) add_sanitizer_rt_symbols(clang_rt.tsan_cxx ARCHS ${arch} EXTRA rtl/tsan.syms.extra) add_dependencies(tsan clang_rt.tsan-${arch} clang_rt.tsan_cxx-${arch} clang_rt.tsan-${arch}-symbols clang_rt.tsan_cxx-${arch}-symbols) endforeach() endif() # Make sure that non-platform-specific files don't include any system headers. # FreeBSD does not install a number of Clang-provided headers for the compiler # in the base system due to incompatibilities between FreeBSD's and Clang's # versions. As a workaround do not use --sysroot=. on FreeBSD until this is # addressed. if(COMPILER_RT_HAS_SYSROOT_FLAG AND NOT CMAKE_SYSTEM_NAME MATCHES "FreeBSD") file(GLOB _tsan_generic_sources rtl/tsan*) file(GLOB _tsan_platform_sources rtl/tsan*posix* rtl/tsan*mac* rtl/tsan*linux*) list(REMOVE_ITEM _tsan_generic_sources ${_tsan_platform_sources}) set_source_files_properties(${_tsan_generic_sources} PROPERTIES COMPILE_FLAGS "--sysroot=.") endif() # Build libcxx instrumented with TSan. if(COMPILER_RT_HAS_LIBCXX_SOURCES AND COMPILER_RT_TEST_COMPILER_ID STREQUAL "Clang") set(libcxx_tsan_deps) foreach(arch ${TSAN_SUPPORTED_ARCH}) get_target_flags_for_arch(${arch} TARGET_CFLAGS) set(LIBCXX_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libcxx_tsan_${arch}) add_custom_libcxx(libcxx_tsan_${arch} ${LIBCXX_PREFIX} DEPS ${TSAN_RUNTIME_LIBRARIES} CFLAGS ${TARGET_CFLAGS} -fsanitize=thread) list(APPEND libcxx_tsan_deps libcxx_tsan_${arch}) endforeach() add_custom_target(libcxx_tsan DEPENDS ${libcxx_tsan_deps}) endif() if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(tests) endif() Index: vendor/compiler-rt/dist/lib/tsan/check_analyze.sh =================================================================== --- vendor/compiler-rt/dist/lib/tsan/check_analyze.sh (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/check_analyze.sh (revision 320961) @@ -1,48 +1,48 @@ #!/bin/bash # # Script that checks that critical functions in TSan runtime have correct number # of push/pop/rsp instructions to verify that runtime is efficient enough. set -u if [[ "$#" != 1 ]]; then echo "Usage: $0 /path/to/binary/built/with/tsan" exit 1 fi SCRIPTDIR=$(dirname $0) RES=$(${SCRIPTDIR}/analyze_libtsan.sh $1) PrintRes() { printf "%s\n" "$RES" } PrintRes check() { res=$(PrintRes | egrep "$1 .* $2 $3; ") if [ "$res" == "" ]; then echo FAILED $1 must contain $2 $3 exit 1 fi } for f in write1 write2 write4 write8; do check $f rsp 1 check $f push 2 check $f pop 12 done for f in read1 read2 read4 read8; do check $f rsp 1 - check $f push 4 - check $f pop 4 + check $f push 3 + check $f pop 3 done for f in func_entry func_exit; do check $f rsp 0 check $f push 0 check $f pop 0 check $f call 1 # TraceSwitch() done echo LGTM Index: vendor/compiler-rt/dist/lib/tsan/dd/dd_interceptors.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/dd/dd_interceptors.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/dd/dd_interceptors.cc (revision 320961) @@ -1,330 +1,329 @@ //===-- dd_interceptors.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "dd_rtl.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_procmaps.h" #include #include using namespace __dsan; __attribute__((tls_model("initial-exec"))) static __thread Thread *thr; __attribute__((tls_model("initial-exec"))) static __thread volatile int initing; static bool inited; static uptr g_data_start; static uptr g_data_end; static bool InitThread() { if (initing) return false; if (thr != 0) return true; initing = true; if (!inited) { inited = true; Initialize(); } thr = (Thread*)InternalAlloc(sizeof(*thr)); internal_memset(thr, 0, sizeof(*thr)); ThreadInit(thr); initing = false; return true; } INTERCEPTOR(int, pthread_mutex_destroy, pthread_mutex_t *m) { InitThread(); MutexDestroy(thr, (uptr)m); return REAL(pthread_mutex_destroy)(m); } INTERCEPTOR(int, pthread_mutex_lock, pthread_mutex_t *m) { InitThread(); MutexBeforeLock(thr, (uptr)m, true); int res = REAL(pthread_mutex_lock)(m); MutexAfterLock(thr, (uptr)m, true, false); return res; } INTERCEPTOR(int, pthread_mutex_trylock, pthread_mutex_t *m) { InitThread(); int res = REAL(pthread_mutex_trylock)(m); if (res == 0) MutexAfterLock(thr, (uptr)m, true, true); return res; } INTERCEPTOR(int, pthread_mutex_unlock, pthread_mutex_t *m) { InitThread(); MutexBeforeUnlock(thr, (uptr)m, true); return REAL(pthread_mutex_unlock)(m); } INTERCEPTOR(int, pthread_spin_destroy, pthread_spinlock_t *m) { InitThread(); int res = REAL(pthread_spin_destroy)(m); MutexDestroy(thr, (uptr)m); return res; } INTERCEPTOR(int, pthread_spin_lock, pthread_spinlock_t *m) { InitThread(); MutexBeforeLock(thr, (uptr)m, true); int res = REAL(pthread_spin_lock)(m); MutexAfterLock(thr, (uptr)m, true, false); return res; } INTERCEPTOR(int, pthread_spin_trylock, pthread_spinlock_t *m) { InitThread(); int res = REAL(pthread_spin_trylock)(m); if (res == 0) MutexAfterLock(thr, (uptr)m, true, true); return res; } INTERCEPTOR(int, pthread_spin_unlock, pthread_spinlock_t *m) { InitThread(); MutexBeforeUnlock(thr, (uptr)m, true); return REAL(pthread_spin_unlock)(m); } INTERCEPTOR(int, pthread_rwlock_destroy, pthread_rwlock_t *m) { InitThread(); MutexDestroy(thr, (uptr)m); return REAL(pthread_rwlock_destroy)(m); } INTERCEPTOR(int, pthread_rwlock_rdlock, pthread_rwlock_t *m) { InitThread(); MutexBeforeLock(thr, (uptr)m, false); int res = REAL(pthread_rwlock_rdlock)(m); MutexAfterLock(thr, (uptr)m, false, false); return res; } INTERCEPTOR(int, pthread_rwlock_tryrdlock, pthread_rwlock_t *m) { InitThread(); int res = REAL(pthread_rwlock_tryrdlock)(m); if (res == 0) MutexAfterLock(thr, (uptr)m, false, true); return res; } INTERCEPTOR(int, pthread_rwlock_timedrdlock, pthread_rwlock_t *m, const timespec *abstime) { InitThread(); int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); if (res == 0) MutexAfterLock(thr, (uptr)m, false, true); return res; } INTERCEPTOR(int, pthread_rwlock_wrlock, pthread_rwlock_t *m) { InitThread(); MutexBeforeLock(thr, (uptr)m, true); int res = REAL(pthread_rwlock_wrlock)(m); MutexAfterLock(thr, (uptr)m, true, false); return res; } INTERCEPTOR(int, pthread_rwlock_trywrlock, pthread_rwlock_t *m) { InitThread(); int res = REAL(pthread_rwlock_trywrlock)(m); if (res == 0) MutexAfterLock(thr, (uptr)m, true, true); return res; } INTERCEPTOR(int, pthread_rwlock_timedwrlock, pthread_rwlock_t *m, const timespec *abstime) { InitThread(); int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); if (res == 0) MutexAfterLock(thr, (uptr)m, true, true); return res; } INTERCEPTOR(int, pthread_rwlock_unlock, pthread_rwlock_t *m) { InitThread(); MutexBeforeUnlock(thr, (uptr)m, true); // note: not necessary write unlock return REAL(pthread_rwlock_unlock)(m); } static pthread_cond_t *init_cond(pthread_cond_t *c, bool force = false) { atomic_uintptr_t *p = (atomic_uintptr_t*)c; uptr cond = atomic_load(p, memory_order_acquire); if (!force && cond != 0) return (pthread_cond_t*)cond; void *newcond = malloc(sizeof(pthread_cond_t)); internal_memset(newcond, 0, sizeof(pthread_cond_t)); if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, memory_order_acq_rel)) return (pthread_cond_t*)newcond; free(newcond); return (pthread_cond_t*)cond; } INTERCEPTOR(int, pthread_cond_init, pthread_cond_t *c, const pthread_condattr_t *a) { InitThread(); pthread_cond_t *cond = init_cond(c, true); return REAL(pthread_cond_init)(cond, a); } INTERCEPTOR(int, pthread_cond_wait, pthread_cond_t *c, pthread_mutex_t *m) { InitThread(); pthread_cond_t *cond = init_cond(c); MutexBeforeUnlock(thr, (uptr)m, true); MutexBeforeLock(thr, (uptr)m, true); int res = REAL(pthread_cond_wait)(cond, m); MutexAfterLock(thr, (uptr)m, true, false); return res; } INTERCEPTOR(int, pthread_cond_timedwait, pthread_cond_t *c, pthread_mutex_t *m, const timespec *abstime) { InitThread(); pthread_cond_t *cond = init_cond(c); MutexBeforeUnlock(thr, (uptr)m, true); MutexBeforeLock(thr, (uptr)m, true); int res = REAL(pthread_cond_timedwait)(cond, m, abstime); MutexAfterLock(thr, (uptr)m, true, false); return res; } INTERCEPTOR(int, pthread_cond_signal, pthread_cond_t *c) { InitThread(); pthread_cond_t *cond = init_cond(c); return REAL(pthread_cond_signal)(cond); } INTERCEPTOR(int, pthread_cond_broadcast, pthread_cond_t *c) { InitThread(); pthread_cond_t *cond = init_cond(c); return REAL(pthread_cond_broadcast)(cond); } INTERCEPTOR(int, pthread_cond_destroy, pthread_cond_t *c) { InitThread(); pthread_cond_t *cond = init_cond(c); int res = REAL(pthread_cond_destroy)(cond); free(cond); atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); return res; } // for symbolizer INTERCEPTOR(char*, realpath, const char *path, char *resolved_path) { InitThread(); return REAL(realpath)(path, resolved_path); } INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) { InitThread(); return REAL(read)(fd, ptr, count); } INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) { InitThread(); return REAL(pread)(fd, ptr, count, offset); } extern "C" { void __dsan_before_mutex_lock(uptr m, int writelock) { if (!InitThread()) return; MutexBeforeLock(thr, m, writelock); } void __dsan_after_mutex_lock(uptr m, int writelock, int trylock) { if (!InitThread()) return; MutexAfterLock(thr, m, writelock, trylock); } void __dsan_before_mutex_unlock(uptr m, int writelock) { if (!InitThread()) return; MutexBeforeUnlock(thr, m, writelock); } void __dsan_mutex_destroy(uptr m) { if (!InitThread()) return; // if (m >= g_data_start && m < g_data_end) // return; MutexDestroy(thr, m); } } // extern "C" namespace __dsan { static void InitDataSeg() { MemoryMappingLayout proc_maps(true); - uptr start, end, offset; char name[128]; + MemoryMappedSegment segment(name, ARRAY_SIZE(name)); bool prev_is_data = false; - while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), - /*protection*/ 0)) { - bool is_data = offset != 0 && name[0] != 0; + while (proc_maps.Next(&segment)) { + bool is_data = segment.offset != 0 && segment.filename[0] != 0; // BSS may get merged with [heap] in /proc/self/maps. This is not very // reliable. - bool is_bss = offset == 0 && - (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data; - if (g_data_start == 0 && is_data) - g_data_start = start; - if (is_bss) - g_data_end = end; + bool is_bss = segment.offset == 0 && + (segment.filename[0] == 0 || + internal_strcmp(segment.filename, "[heap]") == 0) && + prev_is_data; + if (g_data_start == 0 && is_data) g_data_start = segment.start; + if (is_bss) g_data_end = segment.end; prev_is_data = is_data; } VPrintf(1, "guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); CHECK_LT(g_data_start, g_data_end); CHECK_GE((uptr)&g_data_start, g_data_start); CHECK_LT((uptr)&g_data_start, g_data_end); } void InitializeInterceptors() { INTERCEPT_FUNCTION(pthread_mutex_destroy); INTERCEPT_FUNCTION(pthread_mutex_lock); INTERCEPT_FUNCTION(pthread_mutex_trylock); INTERCEPT_FUNCTION(pthread_mutex_unlock); INTERCEPT_FUNCTION(pthread_spin_destroy); INTERCEPT_FUNCTION(pthread_spin_lock); INTERCEPT_FUNCTION(pthread_spin_trylock); INTERCEPT_FUNCTION(pthread_spin_unlock); INTERCEPT_FUNCTION(pthread_rwlock_destroy); INTERCEPT_FUNCTION(pthread_rwlock_rdlock); INTERCEPT_FUNCTION(pthread_rwlock_tryrdlock); INTERCEPT_FUNCTION(pthread_rwlock_timedrdlock); INTERCEPT_FUNCTION(pthread_rwlock_wrlock); INTERCEPT_FUNCTION(pthread_rwlock_trywrlock); INTERCEPT_FUNCTION(pthread_rwlock_timedwrlock); INTERCEPT_FUNCTION(pthread_rwlock_unlock); INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2"); // for symbolizer INTERCEPT_FUNCTION(realpath); INTERCEPT_FUNCTION(read); INTERCEPT_FUNCTION(pread); InitDataSeg(); } } // namespace __dsan Index: vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh =================================================================== --- vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh (revision 320961) @@ -1,136 +1,136 @@ #!/bin/sh set -e SRCS=" tsan_go.cc ../rtl/tsan_clock.cc ../rtl/tsan_external.cc ../rtl/tsan_flags.cc ../rtl/tsan_interface_atomic.cc ../rtl/tsan_md5.cc ../rtl/tsan_mutex.cc ../rtl/tsan_report.cc ../rtl/tsan_rtl.cc ../rtl/tsan_rtl_mutex.cc ../rtl/tsan_rtl_report.cc ../rtl/tsan_rtl_thread.cc ../rtl/tsan_rtl_proc.cc ../rtl/tsan_stack_trace.cc ../rtl/tsan_stat.cc ../rtl/tsan_suppressions.cc ../rtl/tsan_sync.cc ../../sanitizer_common/sanitizer_allocator.cc ../../sanitizer_common/sanitizer_common.cc ../../sanitizer_common/sanitizer_common_libcdep.cc ../../sanitizer_common/sanitizer_deadlock_detector2.cc ../../sanitizer_common/sanitizer_flag_parser.cc ../../sanitizer_common/sanitizer_flags.cc ../../sanitizer_common/sanitizer_libc.cc ../../sanitizer_common/sanitizer_persistent_allocator.cc ../../sanitizer_common/sanitizer_printf.cc ../../sanitizer_common/sanitizer_suppressions.cc ../../sanitizer_common/sanitizer_thread_registry.cc ../../sanitizer_common/sanitizer_stackdepot.cc ../../sanitizer_common/sanitizer_stacktrace.cc ../../sanitizer_common/sanitizer_symbolizer.cc ../../sanitizer_common/sanitizer_termination.cc " if [ "`uname -a | grep Linux`" != "" ]; then SUFFIX="linux_amd64" OSCFLAGS="-fPIC -ffreestanding -Wno-maybe-uninitialized -Wno-unused-const-variable -Werror -Wno-unknown-warning-option" OSLDFLAGS="-lpthread -fPIC -fpie" SRCS=" $SRCS ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_common.cc ../../sanitizer_common/sanitizer_procmaps_linux.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc " elif [ "`uname -a | grep FreeBSD`" != "" ]; then SUFFIX="freebsd_amd64" OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" OSLDFLAGS="-lpthread -fPIC -fpie" SRCS=" $SRCS ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_common.cc ../../sanitizer_common/sanitizer_procmaps_freebsd.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc " elif [ "`uname -a | grep Darwin`" != "" ]; then SUFFIX="darwin_amd64" - OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -mmacosx-version-min=10.7" + OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -isysroot $(xcodebuild -version -sdk macosx Path) -mmacosx-version-min=10.7" OSLDFLAGS="-lpthread -fPIC -fpie -mmacosx-version-min=10.7" SRCS=" $SRCS ../rtl/tsan_platform_mac.cc ../../sanitizer_common/sanitizer_mac.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_mac.cc " elif [ "`uname -a | grep MINGW`" != "" ]; then SUFFIX="windows_amd64" OSCFLAGS="-Wno-error=attributes -Wno-attributes -Wno-unused-const-variable -Wno-unknown-warning-option" OSLDFLAGS="" SRCS=" $SRCS ../rtl/tsan_platform_windows.cc ../../sanitizer_common/sanitizer_win.cc " else echo Unknown platform exit 1 fi CC=${CC:-gcc} IN_TMPDIR=${IN_TMPDIR:-0} SILENT=${SILENT:-0} if [ $IN_TMPDIR != "0" ]; then DIR=$(mktemp -qd /tmp/gotsan.XXXXXXXXXX) cleanup() { rm -rf $DIR } trap cleanup EXIT else DIR=. fi SRCS="$SRCS $ADD_SRCS" rm -f $DIR/gotsan.cc for F in $SRCS; do cat $F >> $DIR/gotsan.cc done FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS" if [ "$DEBUG" = "" ]; then FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer" else FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g" fi if [ "$SILENT" != "1" ]; then echo $CC gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS fi $CC $DIR/gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS $CC $OSCFLAGS test.c $DIR/race_$SUFFIX.syso -m64 -g -o $DIR/test $OSLDFLAGS export GORACE="exitcode=0 atexit_sleep_ms=0" if [ "$SILENT" != "1" ]; then $DIR/test else $DIR/test 2>/dev/null fi Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.cc (revision 320961) @@ -1,427 +1,426 @@ //===-- tsan_clock.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_clock.h" #include "tsan_rtl.h" #include "sanitizer_common/sanitizer_placement_new.h" // SyncClock and ThreadClock implement vector clocks for sync variables // (mutexes, atomic variables, file descriptors, etc) and threads, respectively. // ThreadClock contains fixed-size vector clock for maximum number of threads. // SyncClock contains growable vector clock for currently necessary number of // threads. // Together they implement very simple model of operations, namely: // // void ThreadClock::acquire(const SyncClock *src) { // for (int i = 0; i < kMaxThreads; i++) // clock[i] = max(clock[i], src->clock[i]); // } // // void ThreadClock::release(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = max(dst->clock[i], clock[i]); // } // // void ThreadClock::ReleaseStore(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = clock[i]; // } // // void ThreadClock::acq_rel(SyncClock *dst) { // acquire(dst); // release(dst); // } // // Conformance to this model is extensively verified in tsan_clock_test.cc. // However, the implementation is significantly more complex. The complexity // allows to implement important classes of use cases in O(1) instead of O(N). // // The use cases are: // 1. Singleton/once atomic that has a single release-store operation followed // by zillions of acquire-loads (the acquire-load is O(1)). // 2. Thread-local mutex (both lock and unlock can be O(1)). // 3. Leaf mutex (unlock is O(1)). // 4. A mutex shared by 2 threads (both lock and unlock can be O(1)). // 5. An atomic with a single writer (writes can be O(1)). // The implementation dynamically adopts to workload. So if an atomic is in // read-only phase, these reads will be O(1); if it later switches to read/write // phase, the implementation will correctly handle that by switching to O(N). // // Thread-safety note: all const operations on SyncClock's are conducted under // a shared lock; all non-const operations on SyncClock's are conducted under // an exclusive lock; ThreadClock's are private to respective threads and so // do not need any protection. // // Description of ThreadClock state: // clk_ - fixed size vector clock. // nclk_ - effective size of the vector clock (the rest is zeros). // tid_ - index of the thread associated with he clock ("current thread"). // last_acquire_ - current thread time when it acquired something from // other threads. // // Description of SyncClock state: // clk_ - variable size vector clock, low kClkBits hold timestamp, // the remaining bits hold "acquired" flag (the actual value is thread's // reused counter); // if acquried == thr->reused_, then the respective thread has already // acquired this clock (except possibly dirty_tids_). // dirty_tids_ - holds up to two indeces in the vector clock that other threads // need to acquire regardless of "acquired" flag value; // release_store_tid_ - denotes that the clock state is a result of // release-store operation by the thread with release_store_tid_ index. // release_store_reused_ - reuse count of release_store_tid_. // We don't have ThreadState in these methods, so this is an ugly hack that // works only in C++. #if !SANITIZER_GO # define CPP_STAT_INC(typ) StatInc(cur_thread(), typ) #else # define CPP_STAT_INC(typ) (void)0 #endif namespace __tsan { ThreadClock::ThreadClock(unsigned tid, unsigned reused) : tid_(tid) , reused_(reused + 1) { // 0 has special meaning CHECK_LT(tid, kMaxTidInClock); CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); nclk_ = tid_ + 1; last_acquire_ = 0; internal_memset(clk_, 0, sizeof(clk_)); clk_[tid_].reused = reused_; } +void ThreadClock::ResetCached(ClockCache *c) { +} + void ThreadClock::acquire(ClockCache *c, const SyncClock *src) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(src->size_, kMaxTid); CPP_STAT_INC(StatClockAcquire); // Check if it's empty -> no need to do anything. const uptr nclk = src->size_; if (nclk == 0) { CPP_STAT_INC(StatClockAcquireEmpty); return; } // Check if we've already acquired src after the last release operation on src bool acquired = false; if (nclk > tid_) { - CPP_STAT_INC(StatClockAcquireLarge); if (src->elem(tid_).reused == reused_) { - CPP_STAT_INC(StatClockAcquireRepeat); for (unsigned i = 0; i < kDirtyTids; i++) { unsigned tid = src->dirty_tids_[i]; if (tid != kInvalidTid) { u64 epoch = src->elem(tid).epoch; if (clk_[tid].epoch < epoch) { clk_[tid].epoch = epoch; acquired = true; } } } if (acquired) { CPP_STAT_INC(StatClockAcquiredSomething); last_acquire_ = clk_[tid_].epoch; } return; } } // O(N) acquire. CPP_STAT_INC(StatClockAcquireFull); nclk_ = max(nclk_, nclk); for (uptr i = 0; i < nclk; i++) { u64 epoch = src->elem(i).epoch; if (clk_[i].epoch < epoch) { clk_[i].epoch = epoch; acquired = true; } } // Remember that this thread has acquired this clock. if (nclk > tid_) src->elem(tid_).reused = reused_; if (acquired) { CPP_STAT_INC(StatClockAcquiredSomething); last_acquire_ = clk_[tid_].epoch; } } void ThreadClock::release(ClockCache *c, SyncClock *dst) const { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); if (dst->size_ == 0) { // ReleaseStore will correctly set release_store_tid_, // which can be important for future operations. ReleaseStore(c, dst); return; } CPP_STAT_INC(StatClockRelease); // Check if we need to resize dst. if (dst->size_ < nclk_) dst->Resize(c, nclk_); // Check if we had not acquired anything from other threads // since the last release on dst. If so, we need to update // only dst->elem(tid_). if (dst->elem(tid_).epoch > last_acquire_) { UpdateCurrentThread(dst); if (dst->release_store_tid_ != tid_ || dst->release_store_reused_ != reused_) dst->release_store_tid_ = kInvalidTid; return; } // O(N) release. CPP_STAT_INC(StatClockReleaseFull); // First, remember whether we've acquired dst. bool acquired = IsAlreadyAcquired(dst); if (acquired) CPP_STAT_INC(StatClockReleaseAcquired); // Update dst->clk_. for (uptr i = 0; i < nclk_; i++) { ClockElem &ce = dst->elem(i); ce.epoch = max(ce.epoch, clk_[i].epoch); ce.reused = 0; } // Clear 'acquired' flag in the remaining elements. if (nclk_ < dst->size_) CPP_STAT_INC(StatClockReleaseClearTail); for (uptr i = nclk_; i < dst->size_; i++) dst->elem(i).reused = 0; for (unsigned i = 0; i < kDirtyTids; i++) dst->dirty_tids_[i] = kInvalidTid; dst->release_store_tid_ = kInvalidTid; dst->release_store_reused_ = 0; // If we've acquired dst, remember this fact, // so that we don't need to acquire it on next acquire. if (acquired) dst->elem(tid_).reused = reused_; } void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); CPP_STAT_INC(StatClockStore); // Check if we need to resize dst. if (dst->size_ < nclk_) dst->Resize(c, nclk_); if (dst->release_store_tid_ == tid_ && dst->release_store_reused_ == reused_ && dst->elem(tid_).epoch > last_acquire_) { CPP_STAT_INC(StatClockStoreFast); UpdateCurrentThread(dst); return; } // O(N) release-store. CPP_STAT_INC(StatClockStoreFull); for (uptr i = 0; i < nclk_; i++) { ClockElem &ce = dst->elem(i); ce.epoch = clk_[i].epoch; ce.reused = 0; } // Clear the tail of dst->clk_. if (nclk_ < dst->size_) { for (uptr i = nclk_; i < dst->size_; i++) { ClockElem &ce = dst->elem(i); ce.epoch = 0; ce.reused = 0; } CPP_STAT_INC(StatClockStoreTail); } for (unsigned i = 0; i < kDirtyTids; i++) dst->dirty_tids_[i] = kInvalidTid; dst->release_store_tid_ = tid_; dst->release_store_reused_ = reused_; // Rememeber that we don't need to acquire it in future. dst->elem(tid_).reused = reused_; } void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) { CPP_STAT_INC(StatClockAcquireRelease); acquire(c, dst); ReleaseStore(c, dst); } // Updates only single element related to the current thread in dst->clk_. void ThreadClock::UpdateCurrentThread(SyncClock *dst) const { // Update the threads time, but preserve 'acquired' flag. dst->elem(tid_).epoch = clk_[tid_].epoch; for (unsigned i = 0; i < kDirtyTids; i++) { if (dst->dirty_tids_[i] == tid_) { - CPP_STAT_INC(StatClockReleaseFast1); + CPP_STAT_INC(StatClockReleaseFast); return; } if (dst->dirty_tids_[i] == kInvalidTid) { - CPP_STAT_INC(StatClockReleaseFast2); + CPP_STAT_INC(StatClockReleaseFast); dst->dirty_tids_[i] = tid_; return; } } // Reset all 'acquired' flags, O(N). CPP_STAT_INC(StatClockReleaseSlow); for (uptr i = 0; i < dst->size_; i++) dst->elem(i).reused = 0; for (unsigned i = 0; i < kDirtyTids; i++) dst->dirty_tids_[i] = kInvalidTid; } // Checks whether the current threads has already acquired src. bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { if (src->elem(tid_).reused != reused_) return false; for (unsigned i = 0; i < kDirtyTids; i++) { unsigned tid = src->dirty_tids_[i]; if (tid != kInvalidTid) { if (clk_[tid].epoch < src->elem(tid).epoch) return false; } } return true; } -void SyncClock::Resize(ClockCache *c, uptr nclk) { - CPP_STAT_INC(StatClockReleaseResize); - if (RoundUpTo(nclk, ClockBlock::kClockCount) <= - RoundUpTo(size_, ClockBlock::kClockCount)) { - // Growing within the same block. - // Memory is already allocated, just increase the size. - size_ = nclk; - return; - } - if (nclk <= ClockBlock::kClockCount) { - // Grow from 0 to one-level table. - CHECK_EQ(size_, 0); - CHECK_EQ(tab_, 0); - CHECK_EQ(tab_idx_, 0); - size_ = nclk; - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - return; - } - // Growing two-level table. - if (size_ == 0) { - // Allocate first level table. - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - } else if (size_ <= ClockBlock::kClockCount) { - // Transform one-level table to two-level table. - u32 old = tab_idx_; - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - tab_->table[0] = old; - } - // At this point we have first level table allocated. - // Add second level tables as necessary. - for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount); - i < nclk; i += ClockBlock::kClockCount) { - u32 idx = ctx->clock_alloc.Alloc(c); - ClockBlock *cb = ctx->clock_alloc.Map(idx); - internal_memset(cb, 0, sizeof(*cb)); - CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0); - tab_->table[i/ClockBlock::kClockCount] = idx; - } - size_ = nclk; -} - // Sets a single element in the vector clock. // This function is called only from weird places like AcquireGlobal. -void ThreadClock::set(unsigned tid, u64 v) { +void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) { DCHECK_LT(tid, kMaxTid); DCHECK_GE(v, clk_[tid].epoch); clk_[tid].epoch = v; if (nclk_ <= tid) nclk_ = tid + 1; last_acquire_ = clk_[tid_].epoch; } void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) { printf("clock=["); for (uptr i = 0; i < nclk_; i++) printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch); printf("] reused=["); for (uptr i = 0; i < nclk_; i++) printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused); printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_); } -SyncClock::SyncClock() - : release_store_tid_(kInvalidTid) - , release_store_reused_() - , tab_() - , tab_idx_() - , size_() { - for (uptr i = 0; i < kDirtyTids; i++) - dirty_tids_[i] = kInvalidTid; +SyncClock::SyncClock() { + ResetImpl(); } SyncClock::~SyncClock() { // Reset must be called before dtor. CHECK_EQ(size_, 0); CHECK_EQ(tab_, 0); CHECK_EQ(tab_idx_, 0); } void SyncClock::Reset(ClockCache *c) { if (size_ == 0) { // nothing } else if (size_ <= ClockBlock::kClockCount) { // One-level table. ctx->clock_alloc.Free(c, tab_idx_); } else { // Two-level table. for (uptr i = 0; i < size_; i += ClockBlock::kClockCount) ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]); ctx->clock_alloc.Free(c, tab_idx_); } + ResetImpl(); +} + +void SyncClock::ResetImpl() { tab_ = 0; tab_idx_ = 0; size_ = 0; release_store_tid_ = kInvalidTid; release_store_reused_ = 0; for (uptr i = 0; i < kDirtyTids; i++) dirty_tids_[i] = kInvalidTid; +} + +void SyncClock::Resize(ClockCache *c, uptr nclk) { + CPP_STAT_INC(StatClockReleaseResize); + if (RoundUpTo(nclk, ClockBlock::kClockCount) <= + RoundUpTo(size_, ClockBlock::kClockCount)) { + // Growing within the same block. + // Memory is already allocated, just increase the size. + size_ = nclk; + return; + } + if (nclk <= ClockBlock::kClockCount) { + // Grow from 0 to one-level table. + CHECK_EQ(size_, 0); + CHECK_EQ(tab_, 0); + CHECK_EQ(tab_idx_, 0); + size_ = nclk; + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + return; + } + // Growing two-level table. + if (size_ == 0) { + // Allocate first level table. + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + } else if (size_ <= ClockBlock::kClockCount) { + // Transform one-level table to two-level table. + u32 old = tab_idx_; + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + tab_->table[0] = old; + } + // At this point we have first level table allocated. + // Add second level tables as necessary. + for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount); + i < nclk; i += ClockBlock::kClockCount) { + u32 idx = ctx->clock_alloc.Alloc(c); + ClockBlock *cb = ctx->clock_alloc.Map(idx); + internal_memset(cb, 0, sizeof(*cb)); + CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0); + tab_->table[i/ClockBlock::kClockCount] = idx; + } + size_ = nclk; } ClockElem &SyncClock::elem(unsigned tid) const { DCHECK_LT(tid, size_); if (size_ <= ClockBlock::kClockCount) return tab_->clock[tid]; u32 idx = tab_->table[tid / ClockBlock::kClockCount]; ClockBlock *cb = ctx->clock_alloc.Map(idx); return cb->clock[tid % ClockBlock::kClockCount]; } void SyncClock::DebugDump(int(*printf)(const char *s, ...)) { printf("clock=["); for (uptr i = 0; i < size_; i++) printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch); printf("] reused=["); for (uptr i = 0; i < size_; i++) printf("%s%llu", i == 0 ? "" : ",", elem(i).reused); printf("] release_store_tid=%d/%d dirty_tids=%d/%d", release_store_tid_, release_store_reused_, dirty_tids_[0], dirty_tids_[1]); } } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.h =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.h (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_clock.h (revision 320961) @@ -1,129 +1,131 @@ //===-- tsan_clock.h --------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #ifndef TSAN_CLOCK_H #define TSAN_CLOCK_H #include "tsan_defs.h" #include "tsan_dense_alloc.h" namespace __tsan { struct ClockElem { u64 epoch : kClkBits; u64 reused : 64 - kClkBits; }; struct ClockBlock { static const uptr kSize = 512; static const uptr kTableSize = kSize / sizeof(u32); static const uptr kClockCount = kSize / sizeof(ClockElem); union { u32 table[kTableSize]; ClockElem clock[kClockCount]; }; ClockBlock() { } }; typedef DenseSlabAlloc ClockAlloc; typedef DenseSlabAllocCache ClockCache; // The clock that lives in sync variables (mutexes, atomics, etc). class SyncClock { public: SyncClock(); ~SyncClock(); uptr size() const { return size_; } u64 get(unsigned tid) const { return elem(tid).epoch; } void Resize(ClockCache *c, uptr nclk); void Reset(ClockCache *c); void DebugDump(int(*printf)(const char *s, ...)); private: friend struct ThreadClock; static const uptr kDirtyTids = 2; unsigned release_store_tid_; unsigned release_store_reused_; unsigned dirty_tids_[kDirtyTids]; // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc. // If size_ <= 64, then tab_ points to an array with 64 ClockElem's. // Otherwise, tab_ points to an array with 128 u32 elements, // each pointing to the second-level 512b block with 64 ClockElem's. ClockBlock *tab_; u32 tab_idx_; u32 size_; + void ResetImpl(); ClockElem &elem(unsigned tid) const; }; // The clock that lives in threads. struct ThreadClock { public: typedef DenseSlabAllocCache Cache; explicit ThreadClock(unsigned tid, unsigned reused = 0); u64 get(unsigned tid) const { DCHECK_LT(tid, kMaxTidInClock); return clk_[tid].epoch; } - void set(unsigned tid, u64 v); + void set(ClockCache *c, unsigned tid, u64 v); void set(u64 v) { DCHECK_GE(v, clk_[tid_].epoch); clk_[tid_].epoch = v; } void tick() { clk_[tid_].epoch++; } uptr size() const { return nclk_; } void acquire(ClockCache *c, const SyncClock *src); void release(ClockCache *c, SyncClock *dst) const; void acq_rel(ClockCache *c, SyncClock *dst); void ReleaseStore(ClockCache *c, SyncClock *dst) const; + void ResetCached(ClockCache *c); void DebugReset(); void DebugDump(int(*printf)(const char *s, ...)); private: static const uptr kDirtyTids = SyncClock::kDirtyTids; const unsigned tid_; const unsigned reused_; u64 last_acquire_; uptr nclk_; ClockElem clk_[kMaxTidInClock]; bool IsAlreadyAcquired(const SyncClock *src) const; void UpdateCurrentThread(SyncClock *dst) const; }; } // namespace __tsan #endif // TSAN_CLOCK_H Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_dense_alloc.h =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_dense_alloc.h (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_dense_alloc.h (revision 320961) @@ -1,137 +1,142 @@ //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects. // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc. // The only difference with traditional slab allocators is that DenseSlabAlloc // allocates/free indices of objects and provide a functionality to map // the index onto the real pointer. The index is u32, that is, 2 times smaller // than uptr (hense the Dense prefix). //===----------------------------------------------------------------------===// #ifndef TSAN_DENSE_ALLOC_H #define TSAN_DENSE_ALLOC_H #include "sanitizer_common/sanitizer_common.h" #include "tsan_defs.h" #include "tsan_mutex.h" namespace __tsan { class DenseSlabAllocCache { static const uptr kSize = 128; typedef u32 IndexT; uptr pos; IndexT cache[kSize]; template friend class DenseSlabAlloc; }; template class DenseSlabAlloc { public: typedef DenseSlabAllocCache Cache; typedef typename Cache::IndexT IndexT; - DenseSlabAlloc() { + explicit DenseSlabAlloc(const char *name) { // Check that kL1Size and kL2Size are sane. CHECK_EQ(kL1Size & (kL1Size - 1), 0); CHECK_EQ(kL2Size & (kL2Size - 1), 0); CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size); // Check that it makes sense to use the dense alloc. CHECK_GE(sizeof(T), sizeof(IndexT)); internal_memset(map_, 0, sizeof(map_)); freelist_ = 0; fillpos_ = 0; + name_ = name; } ~DenseSlabAlloc() { for (uptr i = 0; i < kL1Size; i++) { if (map_[i] != 0) UnmapOrDie(map_[i], kL2Size * sizeof(T)); } } IndexT Alloc(Cache *c) { if (c->pos == 0) Refill(c); return c->cache[--c->pos]; } void Free(Cache *c, IndexT idx) { DCHECK_NE(idx, 0); if (c->pos == Cache::kSize) Drain(c); c->cache[c->pos++] = idx; } T *Map(IndexT idx) { DCHECK_NE(idx, 0); DCHECK_LE(idx, kL1Size * kL2Size); return &map_[idx / kL2Size][idx % kL2Size]; } void FlushCache(Cache *c) { SpinMutexLock lock(&mtx_); while (c->pos) { IndexT idx = c->cache[--c->pos]; *(IndexT*)Map(idx) = freelist_; freelist_ = idx; } } void InitCache(Cache *c) { c->pos = 0; internal_memset(c->cache, 0, sizeof(c->cache)); } private: T *map_[kL1Size]; SpinMutex mtx_; IndexT freelist_; uptr fillpos_; + const char *name_; void Refill(Cache *c) { SpinMutexLock lock(&mtx_); if (freelist_ == 0) { if (fillpos_ == kL1Size) { - Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n"); + Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", + name_, kL1Size, kL2Size); Die(); } - T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator"); + VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", + name_, fillpos_, kL1Size, kL2Size); + T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); // Reserve 0 as invalid index. IndexT start = fillpos_ == 0 ? 1 : 0; for (IndexT i = start; i < kL2Size; i++) { new(batch + i) T; *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; } *(IndexT*)(batch + kL2Size - 1) = 0; freelist_ = fillpos_ * kL2Size + start; map_[fillpos_++] = batch; } for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) { IndexT idx = freelist_; c->cache[c->pos++] = idx; freelist_ = *(IndexT*)Map(idx); } } void Drain(Cache *c) { SpinMutexLock lock(&mtx_); for (uptr i = 0; i < Cache::kSize / 2; i++) { IndexT idx = c->cache[--c->pos]; *(IndexT*)Map(idx) = freelist_; freelist_ = idx; } } }; } // namespace __tsan #endif // TSAN_DENSE_ALLOC_H Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc (revision 320961) @@ -1,2637 +1,2636 @@ //===-- tsan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // FIXME: move as many interceptors as possible into // sanitizer_common/sanitizer_common_interceptors.inc //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "interception/interception.h" #include "tsan_interceptors.h" #include "tsan_interface.h" #include "tsan_platform.h" #include "tsan_suppressions.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_fd.h" using namespace __tsan; // NOLINT #if SANITIZER_FREEBSD || SANITIZER_MAC -#define __errno_location __error #define stdout __stdoutp #define stderr __stderrp #endif #if SANITIZER_ANDROID -#define __errno_location __errno #define mallopt(a, b) #endif #ifdef __mips__ const int kSigCount = 129; #else const int kSigCount = 65; #endif struct my_siginfo_t { // The size is determined by looking at sizeof of real siginfo_t on linux. u64 opaque[128 / sizeof(u64)]; }; #ifdef __mips__ struct ucontext_t { u64 opaque[768 / sizeof(u64) + 1]; }; #else struct ucontext_t { // The size is determined by looking at sizeof of real ucontext_t on linux. u64 opaque[936 / sizeof(u64) + 1]; }; #endif #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 #define PTHREAD_ABI_BASE "GLIBC_2.3.2" #elif defined(__aarch64__) || SANITIZER_PPC64V2 #define PTHREAD_ABI_BASE "GLIBC_2.17" #endif extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); extern "C" int pthread_setspecific(unsigned key, const void *v); DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) extern "C" void *pthread_self(); extern "C" void _exit(int status); -extern "C" int *__errno_location(); extern "C" int fileno_unlocked(void *stream); extern "C" int dirfd(void *dirp); #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID extern "C" int mallopt(int param, int value); #endif extern __sanitizer_FILE *stdout, *stderr; #if !SANITIZER_FREEBSD && !SANITIZER_MAC const int PTHREAD_MUTEX_RECURSIVE = 1; const int PTHREAD_MUTEX_RECURSIVE_NP = 1; #else const int PTHREAD_MUTEX_RECURSIVE = 2; const int PTHREAD_MUTEX_RECURSIVE_NP = 2; #endif -const int EINVAL = 22; -const int EBUSY = 16; -const int EOWNERDEAD = 130; #if !SANITIZER_FREEBSD && !SANITIZER_MAC const int EPOLL_CTL_ADD = 1; #endif const int SIGILL = 4; const int SIGABRT = 6; const int SIGFPE = 8; const int SIGSEGV = 11; const int SIGPIPE = 13; const int SIGTERM = 15; #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC const int SIGBUS = 10; const int SIGSYS = 12; #else const int SIGBUS = 7; const int SIGSYS = 31; #endif void *const MAP_FAILED = (void*)-1; #if !SANITIZER_MAC const int PTHREAD_BARRIER_SERIAL_THREAD = -1; #endif const int MAP_FIXED = 0x10; typedef long long_t; // NOLINT // From /usr/include/unistd.h # define F_ULOCK 0 /* Unlock a previously locked region. */ # define F_LOCK 1 /* Lock a region for exclusive use. */ # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ # define F_TEST 3 /* Test a region for other processes locks. */ -#define errno (*__errno_location()) - typedef void (*sighandler_t)(int sig); typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx); #if SANITIZER_ANDROID struct sigaction_t { u32 sa_flags; union { sighandler_t sa_handler; sigactionhandler_t sa_sigaction; }; __sanitizer_sigset_t sa_mask; void (*sa_restorer)(); }; #else struct sigaction_t { #ifdef __mips__ u32 sa_flags; #endif union { sighandler_t sa_handler; sigactionhandler_t sa_sigaction; }; #if SANITIZER_FREEBSD int sa_flags; __sanitizer_sigset_t sa_mask; #elif SANITIZER_MAC __sanitizer_sigset_t sa_mask; int sa_flags; #else __sanitizer_sigset_t sa_mask; #ifndef __mips__ int sa_flags; #endif void (*sa_restorer)(); #endif }; #endif const sighandler_t SIG_DFL = (sighandler_t)0; const sighandler_t SIG_IGN = (sighandler_t)1; const sighandler_t SIG_ERR = (sighandler_t)-1; #if SANITIZER_FREEBSD || SANITIZER_MAC const int SA_SIGINFO = 0x40; const int SIG_SETMASK = 3; #elif defined(__mips__) const int SA_SIGINFO = 8; const int SIG_SETMASK = 3; #else const int SA_SIGINFO = 4; const int SIG_SETMASK = 2; #endif #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ (!cur_thread()->is_inited) static sigaction_t sigactions[kSigCount]; namespace __tsan { struct SignalDesc { bool armed; bool sigaction; my_siginfo_t siginfo; ucontext_t ctx; }; struct ThreadSignalContext { int int_signal_send; atomic_uintptr_t in_blocking_func; atomic_uintptr_t have_pending_signals; SignalDesc pending_signals[kSigCount]; // emptyset and oldset are too big for stack. __sanitizer_sigset_t emptyset; __sanitizer_sigset_t oldset; }; // The object is 64-byte aligned, because we want hot data to be located in // a single cache line if possible (it's accessed in every interceptor). static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)]; LibIgnore *libignore() { return reinterpret_cast(&libignore_placeholder[0]); } void InitializeLibIgnore() { const SuppressionContext &supp = *Suppressions(); const uptr n = supp.SuppressionCount(); for (uptr i = 0; i < n; i++) { const Suppression *s = supp.SuppressionAt(i); if (0 == internal_strcmp(s->type, kSuppressionLib)) libignore()->AddIgnoredLibrary(s->templ); } if (flags()->ignore_noninstrumented_modules) libignore()->IgnoreNoninstrumentedModules(true); libignore()->OnLibraryLoaded(0); } } // namespace __tsan static ThreadSignalContext *SigCtx(ThreadState *thr) { ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; if (ctx == 0 && !thr->is_dead) { ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); thr->signal_ctx = ctx; } return ctx; } #if !SANITIZER_MAC static unsigned g_thread_finalize_key; #endif ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc) : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) { Initialize(thr); if (!thr_->is_inited) return; if (!thr_->ignore_interceptors) FuncEntry(thr, pc); DPrintf("#%d: intercept %s()\n", thr_->tid, fname); ignoring_ = !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses || libignore()->IsIgnored(pc, &in_ignored_lib_)); EnableIgnores(); } ScopedInterceptor::~ScopedInterceptor() { if (!thr_->is_inited) return; DisableIgnores(); if (!thr_->ignore_interceptors) { ProcessPendingSignals(thr_); FuncExit(thr_); CheckNoLocks(thr_); } } void ScopedInterceptor::EnableIgnores() { if (ignoring_) { - ThreadIgnoreBegin(thr_, pc_, false); + ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false); if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++; if (in_ignored_lib_) { DCHECK(!thr_->in_ignored_lib); thr_->in_ignored_lib = true; } } } void ScopedInterceptor::DisableIgnores() { if (ignoring_) { ThreadIgnoreEnd(thr_, pc_); if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--; if (in_ignored_lib_) { DCHECK(thr_->in_ignored_lib); thr_->in_ignored_lib = false; } } } #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) #if SANITIZER_FREEBSD # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) #else # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) #endif #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ MemoryAccessRange((thr), (pc), (uptr)(s), \ common_flags()->strict_string_checks ? (len) + 1 : (n), false) #define READ_STRING(thr, pc, s, n) \ READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) struct BlockingCall { explicit BlockingCall(ThreadState *thr) : thr(thr) , ctx(SigCtx(thr)) { for (;;) { atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0) break; atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); ProcessPendingSignals(thr); } // When we are in a "blocking call", we process signals asynchronously // (right when they arrive). In this context we do not expect to be // executing any user/runtime code. The known interceptor sequence when // this is not true is: pthread_join -> munmap(stack). It's fine // to ignore munmap in this case -- we handle stack shadow separately. thr->ignore_interceptors++; } ~BlockingCall() { thr->ignore_interceptors--; atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); } ThreadState *thr; ThreadSignalContext *ctx; }; TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { SCOPED_TSAN_INTERCEPTOR(sleep, sec); unsigned res = BLOCK_REAL(sleep)(sec); AfterSleep(thr, pc); return res; } TSAN_INTERCEPTOR(int, usleep, long_t usec) { SCOPED_TSAN_INTERCEPTOR(usleep, usec); int res = BLOCK_REAL(usleep)(usec); AfterSleep(thr, pc); return res; } TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); int res = BLOCK_REAL(nanosleep)(req, rem); AfterSleep(thr, pc); return res; } // The sole reason tsan wraps atexit callbacks is to establish synchronization // between callback setup and callback execution. struct AtExitCtx { void (*f)(); void *arg; }; static void at_exit_wrapper(void *arg) { ThreadState *thr = cur_thread(); uptr pc = 0; Acquire(thr, pc, (uptr)arg); AtExitCtx *ctx = (AtExitCtx*)arg; ((void(*)(void *arg))ctx->f)(ctx->arg); InternalFree(ctx); } static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), void *arg, void *dso); #if !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, atexit, void (*f)()) { if (cur_thread()->in_symbolizer) return 0; // We want to setup the atexit callback even if we are in ignored lib // or after fork. SCOPED_INTERCEPTOR_RAW(atexit, f); return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); } #endif TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { if (cur_thread()->in_symbolizer) return 0; SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); } static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), void *arg, void *dso) { AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); ctx->f = f; ctx->arg = arg; Release(thr, pc, (uptr)ctx); // Memory allocation in __cxa_atexit will race with free during exit, // because we do not see synchronization around atexit callback list. ThreadIgnoreBegin(thr, pc); int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso); ThreadIgnoreEnd(thr, pc); return res; } #if !SANITIZER_MAC static void on_exit_wrapper(int status, void *arg) { ThreadState *thr = cur_thread(); uptr pc = 0; Acquire(thr, pc, (uptr)arg); AtExitCtx *ctx = (AtExitCtx*)arg; ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); InternalFree(ctx); } TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { if (cur_thread()->in_symbolizer) return 0; SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); ctx->f = (void(*)())f; ctx->arg = arg; Release(thr, pc, (uptr)ctx); // Memory allocation in __cxa_atexit will race with free during exit, // because we do not see synchronization around atexit callback list. ThreadIgnoreBegin(thr, pc); int res = REAL(on_exit)(on_exit_wrapper, ctx); ThreadIgnoreEnd(thr, pc); return res; } #endif // Cleanup old bufs. static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { JmpBuf *buf = &thr->jmp_bufs[i]; if (buf->sp <= sp) { uptr sz = thr->jmp_bufs.Size(); internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); thr->jmp_bufs.PopBack(); i--; } } } static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { if (!thr->is_inited) // called from libc guts during bootstrap return; // Cleanup old bufs. JmpBufGarbageCollect(thr, sp); // Remember the buf. JmpBuf *buf = thr->jmp_bufs.PushBack(); buf->sp = sp; buf->mangled_sp = mangled_sp; buf->shadow_stack_pos = thr->shadow_stack_pos; ThreadSignalContext *sctx = SigCtx(thr); buf->int_signal_send = sctx ? sctx->int_signal_send : 0; buf->in_blocking_func = sctx ? atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : false; buf->in_signal_handler = atomic_load(&thr->in_signal_handler, memory_order_relaxed); } static void LongJmp(ThreadState *thr, uptr *env) { #ifdef __powerpc__ uptr mangled_sp = env[0]; -#elif SANITIZER_FREEBSD || SANITIZER_MAC +#elif SANITIZER_FREEBSD uptr mangled_sp = env[2]; +#elif SANITIZER_MAC +# ifdef __aarch64__ + uptr mangled_sp = env[13]; +# else + uptr mangled_sp = env[2]; +# endif #elif defined(SANITIZER_LINUX) # ifdef __aarch64__ uptr mangled_sp = env[13]; # elif defined(__mips64) uptr mangled_sp = env[1]; # else uptr mangled_sp = env[6]; # endif #endif // Find the saved buf by mangled_sp. for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { JmpBuf *buf = &thr->jmp_bufs[i]; if (buf->mangled_sp == mangled_sp) { CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); // Unwind the stack. while (thr->shadow_stack_pos > buf->shadow_stack_pos) FuncExit(thr); ThreadSignalContext *sctx = SigCtx(thr); if (sctx) { sctx->int_signal_send = buf->int_signal_send; atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, memory_order_relaxed); } atomic_store(&thr->in_signal_handler, buf->in_signal_handler, memory_order_relaxed); JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp return; } } Printf("ThreadSanitizer: can't find longjmp buf\n"); CHECK(0); } // FIXME: put everything below into a common extern "C" block? extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { SetJmp(cur_thread(), sp, mangled_sp); } #if SANITIZER_MAC TSAN_INTERCEPTOR(int, setjmp, void *env); TSAN_INTERCEPTOR(int, _setjmp, void *env); TSAN_INTERCEPTOR(int, sigsetjmp, void *env); #else // SANITIZER_MAC // Not called. Merely to satisfy TSAN_INTERCEPT(). extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor_setjmp(void *env); extern "C" int __interceptor_setjmp(void *env) { CHECK(0); return 0; } // FIXME: any reason to have a separate declaration? extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor__setjmp(void *env); extern "C" int __interceptor__setjmp(void *env) { CHECK(0); return 0; } extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor_sigsetjmp(void *env); extern "C" int __interceptor_sigsetjmp(void *env) { CHECK(0); return 0; } extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor___sigsetjmp(void *env); extern "C" int __interceptor___sigsetjmp(void *env) { CHECK(0); return 0; } extern "C" int setjmp(void *env); extern "C" int _setjmp(void *env); extern "C" int sigsetjmp(void *env); extern "C" int __sigsetjmp(void *env); DEFINE_REAL(int, setjmp, void *env) DEFINE_REAL(int, _setjmp, void *env) DEFINE_REAL(int, sigsetjmp, void *env) DEFINE_REAL(int, __sigsetjmp, void *env) #endif // SANITIZER_MAC TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) { // Note: if we call REAL(longjmp) in the context of ScopedInterceptor, // bad things will happen. We will jump over ScopedInterceptor dtor and can // leave thr->in_ignored_lib set. { SCOPED_INTERCEPTOR_RAW(longjmp, env, val); } LongJmp(cur_thread(), env); REAL(longjmp)(env, val); } TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) { { SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val); } LongJmp(cur_thread(), env); REAL(siglongjmp)(env, val); } #if !SANITIZER_MAC TSAN_INTERCEPTOR(void*, malloc, uptr size) { if (cur_thread()->in_symbolizer) return InternalAlloc(size); void *p = 0; { SCOPED_INTERCEPTOR_RAW(malloc, size); p = user_alloc(thr, pc, size); } invoke_malloc_hook(p, size); return p; } TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); return user_alloc(thr, pc, sz, align); } TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { if (cur_thread()->in_symbolizer) return InternalCalloc(size, n); void *p = 0; { SCOPED_INTERCEPTOR_RAW(calloc, size, n); p = user_calloc(thr, pc, size, n); } invoke_malloc_hook(p, n * size); return p; } TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { if (cur_thread()->in_symbolizer) return InternalRealloc(p, size); if (p) invoke_free_hook(p); { SCOPED_INTERCEPTOR_RAW(realloc, p, size); p = user_realloc(thr, pc, p, size); } invoke_malloc_hook(p, size); return p; } TSAN_INTERCEPTOR(void, free, void *p) { if (p == 0) return; if (cur_thread()->in_symbolizer) return InternalFree(p); invoke_free_hook(p); SCOPED_INTERCEPTOR_RAW(free, p); user_free(thr, pc, p); } TSAN_INTERCEPTOR(void, cfree, void *p) { if (p == 0) return; if (cur_thread()->in_symbolizer) return InternalFree(p); invoke_free_hook(p); SCOPED_INTERCEPTOR_RAW(cfree, p); user_free(thr, pc, p); } TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); return user_alloc_usable_size(p); } #endif TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT uptr srclen = internal_strlen(src); MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); return REAL(strcpy)(dst, src); // NOLINT } TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); uptr srclen = internal_strnlen(src, n); MemoryAccessRange(thr, pc, (uptr)dst, n, true); MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); return REAL(strncpy)(dst, src, n); } TSAN_INTERCEPTOR(char*, strdup, const char *str) { SCOPED_TSAN_INTERCEPTOR(strdup, str); // strdup will call malloc, so no instrumentation is required here. return REAL(strdup)(str); } static bool fix_mmap_addr(void **addr, long_t sz, int flags) { if (*addr) { if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { if (flags & MAP_FIXED) { - errno = EINVAL; + errno = errno_EINVAL; return false; } else { *addr = 0; } } } return true; } TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd, OFF_T off) { SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off); if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; void *res = REAL(mmap)(addr, sz, prot, flags, fd, off); if (res != MAP_FAILED) { if (fd > 0) FdAccess(thr, pc, fd); if (thr->ignore_reads_and_writes == 0) MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); else MemoryResetRange(thr, pc, (uptr)res, sz); } return res; } #if SANITIZER_LINUX TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd, OFF64_T off) { SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off); if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off); if (res != MAP_FAILED) { if (fd > 0) FdAccess(thr, pc, fd); if (thr->ignore_reads_and_writes == 0) MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); else MemoryResetRange(thr, pc, (uptr)res, sz); } return res; } #define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64) #else #define TSAN_MAYBE_INTERCEPT_MMAP64 #endif TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); if (sz != 0) { // If sz == 0, munmap will return EINVAL and don't unmap any memory. DontNeedShadowFor((uptr)addr, sz); ScopedGlobalProcessor sgp; ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz); } int res = REAL(munmap)(addr, sz); return res; } #if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(memalign, align, sz); return user_alloc(thr, pc, sz, align); } #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) #else #define TSAN_MAYBE_INTERCEPT_MEMALIGN #endif #if !SANITIZER_MAC TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(memalign, align, sz); return user_alloc(thr, pc, sz, align); } TSAN_INTERCEPTOR(void*, valloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(valloc, sz); return user_alloc(thr, pc, sz, GetPageSizeCached()); } #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(pvalloc, sz); sz = RoundUp(sz, GetPageSizeCached()); return user_alloc(thr, pc, sz, GetPageSizeCached()); } #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) #else #define TSAN_MAYBE_INTERCEPT_PVALLOC #endif #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); *memptr = user_alloc(thr, pc, sz, align); return 0; } #endif // __cxa_guard_acquire and friends need to be intercepted in a special way - // regular interceptors will break statically-linked libstdc++. Linux // interceptors are especially defined as weak functions (so that they don't // cause link errors when user defines them as well). So they silently // auto-disable themselves when such symbol is already present in the binary. If // we link libstdc++ statically, it will bring own __cxa_guard_acquire which // will silently replace our interceptor. That's why on Linux we simply export // these interceptors with INTERFACE_ATTRIBUTE. // On OS X, we don't support statically linking, so we just use a regular // interceptor. #if SANITIZER_MAC #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR #else #define STDCXX_INTERCEPTOR(rettype, name, ...) \ extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) #endif // Used in thread-safe function static initialization. STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); for (;;) { u32 cmp = atomic_load(g, memory_order_acquire); if (cmp == 0) { if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) return 1; } else if (cmp == 1) { Acquire(thr, pc, (uptr)g); return 0; } else { internal_sched_yield(); } } } STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); Release(thr, pc, (uptr)g); atomic_store(g, 1, memory_order_release); } STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); atomic_store(g, 0, memory_order_relaxed); } namespace __tsan { void DestroyThreadState() { ThreadState *thr = cur_thread(); Processor *proc = thr->proc(); ThreadFinish(thr); ProcUnwire(proc, thr); ProcDestroy(proc); ThreadSignalContext *sctx = thr->signal_ctx; if (sctx) { thr->signal_ctx = 0; UnmapOrDie(sctx, sizeof(*sctx)); } DTLS_Destroy(); cur_thread_finalize(); } } // namespace __tsan #if !SANITIZER_MAC static void thread_finalize(void *v) { uptr iter = (uptr)v; if (iter > 1) { if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { Printf("ThreadSanitizer: failed to set thread key\n"); Die(); } return; } DestroyThreadState(); } #endif struct ThreadParam { void* (*callback)(void *arg); void *param; atomic_uintptr_t tid; }; extern "C" void *__tsan_thread_start_func(void *arg) { ThreadParam *p = (ThreadParam*)arg; void* (*callback)(void *arg) = p->callback; void *param = p->param; int tid = 0; { ThreadState *thr = cur_thread(); // Thread-local state is not initialized yet. ScopedIgnoreInterceptors ignore; #if !SANITIZER_MAC ThreadIgnoreBegin(thr, 0); if (pthread_setspecific(g_thread_finalize_key, (void *)GetPthreadDestructorIterations())) { Printf("ThreadSanitizer: failed to set thread key\n"); Die(); } ThreadIgnoreEnd(thr, 0); #endif while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) internal_sched_yield(); Processor *proc = ProcCreate(); ProcWire(proc, thr); ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); atomic_store(&p->tid, 0, memory_order_release); } void *res = callback(param); // Prevent the callback from being tail called, // it mixes up stack traces. volatile int foo = 42; foo++; return res; } TSAN_INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), void * param) { SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); if (ctx->after_multithreaded_fork) { if (flags()->die_after_fork) { Report("ThreadSanitizer: starting new threads after multi-threaded " "fork is not supported. Dying (set die_after_fork=0 to override)\n"); Die(); } else { VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded " "fork is not supported (pid %d). Continuing because of " "die_after_fork=0, but you are on your own\n", internal_getpid()); } } __sanitizer_pthread_attr_t myattr; if (attr == 0) { pthread_attr_init(&myattr); attr = &myattr; } int detached = 0; REAL(pthread_attr_getdetachstate)(attr, &detached); AdjustStackSize(attr); ThreadParam p; p.callback = callback; p.param = param; atomic_store(&p.tid, 0, memory_order_relaxed); int res = -1; { // Otherwise we see false positives in pthread stack manipulation. ScopedIgnoreInterceptors ignore; ThreadIgnoreBegin(thr, pc); res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); ThreadIgnoreEnd(thr, pc); } if (res == 0) { int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached)); CHECK_NE(tid, 0); // Synchronization on p.tid serves two purposes: // 1. ThreadCreate must finish before the new thread starts. // Otherwise the new thread can call pthread_detach, but the pthread_t // identifier is not yet registered in ThreadRegistry by ThreadCreate. // 2. ThreadStart must finish before this thread continues. // Otherwise, this thread can call pthread_detach and reset thr->sync // before the new thread got a chance to acquire from it in ThreadStart. atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) internal_sched_yield(); } if (attr == &myattr) pthread_attr_destroy(&myattr); return res; } TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); int tid = ThreadTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_join)(th, ret); ThreadIgnoreEnd(thr, pc); if (res == 0) { ThreadJoin(thr, pc, tid); } return res; } DEFINE_REAL_PTHREAD_FUNCTIONS TSAN_INTERCEPTOR(int, pthread_detach, void *th) { SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); int tid = ThreadTid(thr, pc, (uptr)th); int res = REAL(pthread_detach)(th); if (res == 0) { ThreadDetach(thr, pc, tid); } return res; } // Problem: // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). // pthread_cond_t has different size in the different versions. // If call new REAL functions for old pthread_cond_t, they will corrupt memory // after pthread_cond_t (old cond is smaller). // If we call old REAL functions for new pthread_cond_t, we will lose some // functionality (e.g. old functions do not support waiting against // CLOCK_REALTIME). // Proper handling would require to have 2 versions of interceptors as well. // But this is messy, in particular requires linker scripts when sanitizer // runtime is linked into a shared library. // Instead we assume we don't have dynamic libraries built against old // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag // that allows to work with old libraries (but this mode does not support // some features, e.g. pthread_condattr_getpshared). static void *init_cond(void *c, bool force = false) { // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. // So we allocate additional memory on the side large enough to hold // any pthread_cond_t object. Always call new REAL functions, but pass // the aux object to them. // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes // first word of pthread_cond_t to zero. // It's all relevant only for linux. if (!common_flags()->legacy_pthread_cond) return c; atomic_uintptr_t *p = (atomic_uintptr_t*)c; uptr cond = atomic_load(p, memory_order_acquire); if (!force && cond != 0) return (void*)cond; void *newcond = WRAP(malloc)(pthread_cond_t_sz); internal_memset(newcond, 0, pthread_cond_t_sz); if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, memory_order_acq_rel)) return newcond; WRAP(free)(newcond); return (void*)cond; } struct CondMutexUnlockCtx { ScopedInterceptor *si; ThreadState *thr; uptr pc; void *m; }; static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { // pthread_cond_wait interceptor has enabled async signal delivery // (see BlockingCall below). Disable async signals since we are running // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run // since the thread is cancelled, so we have to manually execute them // (the thread still can run some user code due to pthread_cleanup_push). ThreadSignalContext *ctx = SigCtx(arg->thr); CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock); // Undo BlockingCall ctor effects. arg->thr->ignore_interceptors--; arg->si->~ScopedInterceptor(); } INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { void *cond = init_cond(c, true); SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); return REAL(pthread_cond_init)(cond, a); } static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, int (*fn)(void *c, void *m, void *abstime), void *c, void *m, void *t) { MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); MutexUnlock(thr, pc, (uptr)m); CondMutexUnlockCtx arg = {si, thr, pc, m}; int res = 0; // This ensures that we handle mutex lock even in case of pthread_cancel. // See test/tsan/cond_cancel.cc. { // Enable signal delivery while the thread is blocked. BlockingCall bc(thr); res = call_pthread_cancel_with_cleanup( fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg); } if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); return res; } INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL( pthread_cond_wait), cond, m, 0); } INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m, abstime); } #if SANITIZER_MAC INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m, void *reltime) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime); return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond, m, reltime); } #endif INTERCEPTOR(int, pthread_cond_signal, void *c) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); return REAL(pthread_cond_signal)(cond); } INTERCEPTOR(int, pthread_cond_broadcast, void *c) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); return REAL(pthread_cond_broadcast)(cond); } INTERCEPTOR(int, pthread_cond_destroy, void *c) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); int res = REAL(pthread_cond_destroy)(cond); if (common_flags()->legacy_pthread_cond) { // Free our aux cond and zero the pointer to not leave dangling pointers. WRAP(free)(cond); atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); } return res; } TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); int res = REAL(pthread_mutex_init)(m, a); if (res == 0) { u32 flagz = 0; if (a) { int type = 0; if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) if (type == PTHREAD_MUTEX_RECURSIVE || type == PTHREAD_MUTEX_RECURSIVE_NP) flagz |= MutexFlagWriteReentrant; } MutexCreate(thr, pc, (uptr)m, flagz); } return res; } TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); int res = REAL(pthread_mutex_destroy)(m); - if (res == 0 || res == EBUSY) { + if (res == 0 || res == errno_EBUSY) { MutexDestroy(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); int res = REAL(pthread_mutex_trylock)(m); - if (res == EOWNERDEAD) + if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); - if (res == 0 || res == EOWNERDEAD) + if (res == 0 || res == errno_EOWNERDEAD) MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); return res; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); int res = REAL(pthread_mutex_timedlock)(m, abstime); if (res == 0) { MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } #endif #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); int res = REAL(pthread_spin_init)(m, pshared); if (res == 0) { MutexCreate(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); int res = REAL(pthread_spin_destroy)(m); if (res == 0) { MutexDestroy(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); MutexPreLock(thr, pc, (uptr)m); int res = REAL(pthread_spin_lock)(m); if (res == 0) { MutexPostLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); int res = REAL(pthread_spin_trylock)(m); if (res == 0) { MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); MutexUnlock(thr, pc, (uptr)m); int res = REAL(pthread_spin_unlock)(m); return res; } #endif TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); int res = REAL(pthread_rwlock_init)(m, a); if (res == 0) { MutexCreate(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); int res = REAL(pthread_rwlock_destroy)(m); if (res == 0) { MutexDestroy(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); MutexPreReadLock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_rdlock)(m); if (res == 0) { MutexPostReadLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); int res = REAL(pthread_rwlock_tryrdlock)(m); if (res == 0) { MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); if (res == 0) { MutexPostReadLock(thr, pc, (uptr)m); } return res; } #endif TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); MutexPreLock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_wrlock)(m); if (res == 0) { MutexPostLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); int res = REAL(pthread_rwlock_trywrlock)(m); if (res == 0) { MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); if (res == 0) { MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } #endif TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); MutexReadOrWriteUnlock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_unlock)(m); return res; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); MemoryWrite(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_init)(b, a, count); return res; } TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); MemoryWrite(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_destroy)(b); return res; } TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); Release(thr, pc, (uptr)b); MemoryRead(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_wait)(b); MemoryRead(thr, pc, (uptr)b, kSizeLog1); if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { Acquire(thr, pc, (uptr)b); } return res; } #endif TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); if (o == 0 || f == 0) - return EINVAL; + return errno_EINVAL; atomic_uint32_t *a; if (!SANITIZER_MAC) a = static_cast(o); else // On OS X, pthread_once_t has a header with a long-sized signature. a = static_cast((void *)((char *)o + sizeof(long_t))); u32 v = atomic_load(a, memory_order_acquire); if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) { (*f)(); if (!thr->in_ignored_lib) Release(thr, pc, (uptr)o); atomic_store(a, 2, memory_order_release); } else { while (v != 2) { internal_sched_yield(); v = atomic_load(a, memory_order_acquire); } if (!thr->in_ignored_lib) Acquire(thr, pc, (uptr)o); } return 0; } #if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); return REAL(__fxstat)(version, fd, buf); } #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) #else #define TSAN_MAYBE_INTERCEPT___FXSTAT #endif TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); return REAL(fstat)(fd, buf); #else SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); return REAL(__fxstat)(0, fd, buf); #endif } #if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); return REAL(__fxstat64)(version, fd, buf); } #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) #else #define TSAN_MAYBE_INTERCEPT___FXSTAT64 #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); return REAL(__fxstat64)(0, fd, buf); } #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) #else #define TSAN_MAYBE_INTERCEPT_FSTAT64 #endif TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); READ_STRING(thr, pc, name, 0); int fd = REAL(open)(name, flags, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); return fd; } #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); READ_STRING(thr, pc, name, 0); int fd = REAL(open64)(name, flags, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) #else #define TSAN_MAYBE_INTERCEPT_OPEN64 #endif TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { SCOPED_TSAN_INTERCEPTOR(creat, name, mode); READ_STRING(thr, pc, name, 0); int fd = REAL(creat)(name, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); return fd; } #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); READ_STRING(thr, pc, name, 0); int fd = REAL(creat64)(name, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) #else #define TSAN_MAYBE_INTERCEPT_CREAT64 #endif TSAN_INTERCEPTOR(int, dup, int oldfd) { SCOPED_TSAN_INTERCEPTOR(dup, oldfd); int newfd = REAL(dup)(oldfd); if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) FdDup(thr, pc, oldfd, newfd, true); return newfd; } TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); int newfd2 = REAL(dup2)(oldfd, newfd); if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) FdDup(thr, pc, oldfd, newfd2, false); return newfd2; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); int newfd2 = REAL(dup3)(oldfd, newfd, flags); if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) FdDup(thr, pc, oldfd, newfd2, false); return newfd2; } #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); int fd = REAL(eventfd)(initval, flags); if (fd >= 0) FdEventCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) #else #define TSAN_MAYBE_INTERCEPT_EVENTFD #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); if (fd >= 0) FdClose(thr, pc, fd); fd = REAL(signalfd)(fd, mask, flags); if (fd >= 0) FdSignalCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) #else #define TSAN_MAYBE_INTERCEPT_SIGNALFD #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, inotify_init, int fake) { SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); int fd = REAL(inotify_init)(fake); if (fd >= 0) FdInotifyCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) #else #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, inotify_init1, int flags) { SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); int fd = REAL(inotify_init1)(flags); if (fd >= 0) FdInotifyCreate(thr, pc, fd); return fd; } #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) #else #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 #endif TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); int fd = REAL(socket)(domain, type, protocol); if (fd >= 0) FdSocketCreate(thr, pc, fd); return fd; } TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); int res = REAL(socketpair)(domain, type, protocol, fd); if (res == 0 && fd[0] >= 0 && fd[1] >= 0) FdPipeCreate(thr, pc, fd[0], fd[1]); return res; } TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); FdSocketConnecting(thr, pc, fd); int res = REAL(connect)(fd, addr, addrlen); if (res == 0 && fd >= 0) FdSocketConnect(thr, pc, fd); return res; } TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); int res = REAL(bind)(fd, addr, addrlen); if (fd > 0 && res == 0) FdAccess(thr, pc, fd); return res; } TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); int res = REAL(listen)(fd, backlog); if (fd > 0 && res == 0) FdAccess(thr, pc, fd); return res; } TSAN_INTERCEPTOR(int, close, int fd) { SCOPED_TSAN_INTERCEPTOR(close, fd); if (fd >= 0) FdClose(thr, pc, fd); return REAL(close)(fd); } #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, __close, int fd) { SCOPED_TSAN_INTERCEPTOR(__close, fd); if (fd >= 0) FdClose(thr, pc, fd); return REAL(__close)(fd); } #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) #else #define TSAN_MAYBE_INTERCEPT___CLOSE #endif // glibc guts #if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); int fds[64]; int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); for (int i = 0; i < cnt; i++) { if (fds[i] > 0) FdClose(thr, pc, fds[i]); } REAL(__res_iclose)(state, free_addr); } #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) #else #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE #endif TSAN_INTERCEPTOR(int, pipe, int *pipefd) { SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); int res = REAL(pipe)(pipefd); if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); return res; } #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); int res = REAL(pipe2)(pipefd, flags); if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); return res; } #endif TSAN_INTERCEPTOR(int, unlink, char *path) { SCOPED_TSAN_INTERCEPTOR(unlink, path); Release(thr, pc, File2addr(path)); int res = REAL(unlink)(path); return res; } TSAN_INTERCEPTOR(void*, tmpfile, int fake) { SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); void *res = REAL(tmpfile)(fake); if (res) { int fd = fileno_unlocked(res); if (fd >= 0) FdFileCreate(thr, pc, fd); } return res; } #if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); void *res = REAL(tmpfile64)(fake); if (res) { int fd = fileno_unlocked(res); if (fd >= 0) FdFileCreate(thr, pc, fd); } return res; } #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) #else #define TSAN_MAYBE_INTERCEPT_TMPFILE64 #endif static void FlushStreams() { // Flushing all the streams here may freeze the process if a child thread is // performing file stream operations at the same time. REAL(fflush)(stdout); REAL(fflush)(stderr); } TSAN_INTERCEPTOR(void, abort, int fake) { SCOPED_TSAN_INTERCEPTOR(abort, fake); FlushStreams(); REAL(abort)(fake); } TSAN_INTERCEPTOR(int, puts, const char *s) { SCOPED_TSAN_INTERCEPTOR(puts, s); MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false); return REAL(puts)(s); } TSAN_INTERCEPTOR(int, rmdir, char *path) { SCOPED_TSAN_INTERCEPTOR(rmdir, path); Release(thr, pc, Dir2addr(path)); int res = REAL(rmdir)(path); return res; } TSAN_INTERCEPTOR(int, closedir, void *dirp) { SCOPED_TSAN_INTERCEPTOR(closedir, dirp); if (dirp) { int fd = dirfd(dirp); FdClose(thr, pc, fd); } return REAL(closedir)(dirp); } #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, epoll_create, int size) { SCOPED_TSAN_INTERCEPTOR(epoll_create, size); int fd = REAL(epoll_create)(size); if (fd >= 0) FdPollCreate(thr, pc, fd); return fd; } TSAN_INTERCEPTOR(int, epoll_create1, int flags) { SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); int fd = REAL(epoll_create1)(flags); if (fd >= 0) FdPollCreate(thr, pc, fd); return fd; } TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); if (epfd >= 0) FdAccess(thr, pc, epfd); if (epfd >= 0 && fd >= 0) FdAccess(thr, pc, fd); if (op == EPOLL_CTL_ADD && epfd >= 0) FdRelease(thr, pc, epfd); int res = REAL(epoll_ctl)(epfd, op, fd, ev); return res; } TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); if (epfd >= 0) FdAccess(thr, pc, epfd); int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); if (res > 0 && epfd >= 0) FdAcquire(thr, pc, epfd); return res; } TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout, void *sigmask) { SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask); if (epfd >= 0) FdAccess(thr, pc, epfd); int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask); if (res > 0 && epfd >= 0) FdAcquire(thr, pc, epfd); return res; } #define TSAN_MAYBE_INTERCEPT_EPOLL \ TSAN_INTERCEPT(epoll_create); \ TSAN_INTERCEPT(epoll_create1); \ TSAN_INTERCEPT(epoll_ctl); \ TSAN_INTERCEPT(epoll_wait); \ TSAN_INTERCEPT(epoll_pwait) #else #define TSAN_MAYBE_INTERCEPT_EPOLL #endif // The following functions are intercepted merely to process pending signals. // If program blocks signal X, we must deliver the signal before the function // returns. Similarly, if program unblocks a signal (or returns from sigsuspend) // it's better to deliver the signal straight away. TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); return REAL(sigsuspend)(mask); } TSAN_INTERCEPTOR(int, sigblock, int mask) { SCOPED_TSAN_INTERCEPTOR(sigblock, mask); return REAL(sigblock)(mask); } TSAN_INTERCEPTOR(int, sigsetmask, int mask) { SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask); return REAL(sigsetmask)(mask); } TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset); return REAL(pthread_sigmask)(how, set, oldset); } namespace __tsan { static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, bool sigact, int sig, my_siginfo_t *info, void *uctx) { if (acquire) Acquire(thr, 0, (uptr)&sigactions[sig]); // Signals are generally asynchronous, so if we receive a signals when // ignores are enabled we should disable ignores. This is critical for sync // and interceptors, because otherwise we can miss syncronization and report // false races. int ignore_reads_and_writes = thr->ignore_reads_and_writes; int ignore_interceptors = thr->ignore_interceptors; int ignore_sync = thr->ignore_sync; if (!ctx->after_multithreaded_fork) { thr->ignore_reads_and_writes = 0; thr->fast_state.ClearIgnoreBit(); thr->ignore_interceptors = 0; thr->ignore_sync = 0; } // Ensure that the handler does not spoil errno. const int saved_errno = errno; errno = 99; // This code races with sigaction. Be careful to not read sa_sigaction twice. // Also need to remember pc for reporting before the call, // because the handler can reset it. volatile uptr pc = sigact ? (uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_handler; if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) { if (sigact) ((sigactionhandler_t)pc)(sig, info, uctx); else ((sighandler_t)pc)(sig); } if (!ctx->after_multithreaded_fork) { thr->ignore_reads_and_writes = ignore_reads_and_writes; if (ignore_reads_and_writes) thr->fast_state.SetIgnoreBit(); thr->ignore_interceptors = ignore_interceptors; thr->ignore_sync = ignore_sync; } // We do not detect errno spoiling for SIGTERM, // because some SIGTERM handlers do spoil errno but reraise SIGTERM, // tsan reports false positive in such case. // It's difficult to properly detect this situation (reraise), // because in async signal processing case (when handler is called directly // from rtl_generic_sighandler) we have not yet received the reraised // signal; and it looks too fragile to intercept all ways to reraise a signal. if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { VarSizeStackTrace stack; // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { rep.AddStack(stack, true); OutputReport(thr, rep); } } errno = saved_errno; } void ProcessPendingSignals(ThreadState *thr) { ThreadSignalContext *sctx = SigCtx(thr); if (sctx == 0 || atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) return; atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); internal_sigfillset(&sctx->emptyset); int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset); CHECK_EQ(res, 0); for (int sig = 0; sig < kSigCount; sig++) { SignalDesc *signal = &sctx->pending_signals[sig]; if (signal->armed) { signal->armed = false; CallUserSignalHandler(thr, false, true, signal->sigaction, sig, &signal->siginfo, &signal->ctx); } } res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0); CHECK_EQ(res, 0); atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); } } // namespace __tsan static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || // If we are sending signal to ourselves, we must process it now. (sctx && sig == sctx->int_signal_send); } void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, my_siginfo_t *info, void *ctx) { ThreadState *thr = cur_thread(); ThreadSignalContext *sctx = SigCtx(thr); if (sig < 0 || sig >= kSigCount) { VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); return; } // Don't mess with synchronous signals. const bool sync = is_sync_signal(sctx, sig); if (sync || // If we are in blocking function, we can safely process it now // (but check if we are in a recursive interceptor, // i.e. pthread_join()->munmap()). (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx); atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); } else { // Be very conservative with when we do acquire in this case. // It's unsafe to do acquire in async handlers, because ThreadState // can be in inconsistent state. // SIGSYS looks relatively safe -- it's synchronous and can actually // need some global state. bool acq = (sig == SIGSYS); CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx); } atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); return; } if (sctx == 0) return; SignalDesc *signal = &sctx->pending_signals[sig]; if (signal->armed == false) { signal->armed = true; signal->sigaction = sigact; if (info) internal_memcpy(&signal->siginfo, info, sizeof(*info)); if (ctx) internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed); } } static void rtl_sighandler(int sig) { rtl_generic_sighandler(false, sig, 0, 0); } static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) { rtl_generic_sighandler(true, sig, info, ctx); } TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { // Note: if we call REAL(sigaction) directly for any reason without proxying // the signal handler through rtl_sigaction, very bad things will happen. // The handler will run synchronously and corrupt tsan per-thread state. SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old); if (old) internal_memcpy(old, &sigactions[sig], sizeof(*old)); if (act == 0) return 0; // Copy act into sigactions[sig]. // Can't use struct copy, because compiler can emit call to memcpy. // Can't use internal_memcpy, because it copies byte-by-byte, // and signal handler reads the sa_handler concurrently. It it can read // some bytes from old value and some bytes from new value. // Use volatile to prevent insertion of memcpy. sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler; sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags; internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, sizeof(sigactions[sig].sa_mask)); #if !SANITIZER_FREEBSD && !SANITIZER_MAC sigactions[sig].sa_restorer = act->sa_restorer; #endif sigaction_t newact; internal_memcpy(&newact, act, sizeof(newact)); internal_sigfillset(&newact.sa_mask); if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) { if (newact.sa_flags & SA_SIGINFO) newact.sa_sigaction = rtl_sigaction; else newact.sa_handler = rtl_sighandler; } ReleaseStore(thr, pc, (uptr)&sigactions[sig]); int res = REAL(sigaction)(sig, &newact, 0); return res; } TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) { sigaction_t act; act.sa_handler = h; internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask)); act.sa_flags = 0; sigaction_t old; int res = sigaction(sig, &act, &old); if (res) return SIG_ERR; return old.sa_handler; } TSAN_INTERCEPTOR(int, raise, int sig) { SCOPED_TSAN_INTERCEPTOR(raise, sig); ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; sctx->int_signal_send = sig; int res = REAL(raise)(sig); CHECK_EQ(sctx->int_signal_send, sig); sctx->int_signal_send = prev; return res; } TSAN_INTERCEPTOR(int, kill, int pid, int sig) { SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; if (pid == (int)internal_getpid()) { sctx->int_signal_send = sig; } int res = REAL(kill)(pid, sig); if (pid == (int)internal_getpid()) { CHECK_EQ(sctx->int_signal_send, sig); sctx->int_signal_send = prev; } return res; } TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; if (tid == pthread_self()) { sctx->int_signal_send = sig; } int res = REAL(pthread_kill)(tid, sig); if (tid == pthread_self()) { CHECK_EQ(sctx->int_signal_send, sig); sctx->int_signal_send = prev; } return res; } TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); // It's intercepted merely to process pending signals. return REAL(gettimeofday)(tv, tz); } TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, void *hints, void *rv) { SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); // We miss atomic synchronization in getaddrinfo, // and can report false race between malloc and free // inside of getaddrinfo. So ignore memory accesses. ThreadIgnoreBegin(thr, pc); int res = REAL(getaddrinfo)(node, service, hints, rv); ThreadIgnoreEnd(thr, pc); return res; } TSAN_INTERCEPTOR(int, fork, int fake) { if (cur_thread()->in_symbolizer) return REAL(fork)(fake); SCOPED_INTERCEPTOR_RAW(fork, fake); ForkBefore(thr, pc); int pid; { // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and // we'll assert in CheckNoLocks() unless we ignore interceptors. ScopedIgnoreInterceptors ignore; pid = REAL(fork)(fake); } if (pid == 0) { // child ForkChildAfter(thr, pc); FdOnFork(thr, pc); } else if (pid > 0) { // parent ForkParentAfter(thr, pc); } else { // error ForkParentAfter(thr, pc); } return pid; } TSAN_INTERCEPTOR(int, vfork, int fake) { // Some programs (e.g. openjdk) call close for all file descriptors // in the child process. Under tsan it leads to false positives, because // address space is shared, so the parent process also thinks that // the descriptors are closed (while they are actually not). // This leads to false positives due to missed synchronization. // Strictly saying this is undefined behavior, because vfork child is not // allowed to call any functions other than exec/exit. But this is what // openjdk does, so we want to handle it. // We could disable interceptors in the child process. But it's not possible // to simply intercept and wrap vfork, because vfork child is not allowed // to return from the function that calls vfork, and that's exactly what // we would do. So this would require some assembly trickery as well. // Instead we simply turn vfork into fork. return WRAP(fork)(fake); } #if !SANITIZER_MAC && !SANITIZER_ANDROID typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data); struct dl_iterate_phdr_data { ThreadState *thr; uptr pc; dl_iterate_phdr_cb_t cb; void *data; }; static bool IsAppNotRodata(uptr addr) { return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; } static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data) { dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later // accessible in dl_iterate_phdr callback. But we don't see synchronization // inside of dynamic linker, so we "unpoison" it here in order to not // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough // because some libc functions call __libc_dlopen. if (info && IsAppNotRodata((uptr)info->dlpi_name)) MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, internal_strlen(info->dlpi_name)); int res = cbdata->cb(info, size, cbdata->data); // Perform the check one more time in case info->dlpi_name was overwritten // by user callback. if (info && IsAppNotRodata((uptr)info->dlpi_name)) MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, internal_strlen(info->dlpi_name)); return res; } TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); dl_iterate_phdr_data cbdata; cbdata.thr = thr; cbdata.pc = pc; cbdata.cb = cb; cbdata.data = data; int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); return res; } #endif static int OnExit(ThreadState *thr) { int status = Finalize(thr); FlushStreams(); return status; } struct TsanInterceptorContext { ThreadState *thr; const uptr caller_pc; const uptr pc; }; #if !SANITIZER_MAC static void HandleRecvmsg(ThreadState *thr, uptr pc, __sanitizer_msghdr *msg) { int fds[64]; int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); for (int i = 0; i < cnt; i++) FdEventCreate(thr, pc, fds[i]); } #endif #include "sanitizer_common/sanitizer_platform_interceptors.h" // Causes interceptor recursion (getaddrinfo() and fopen()) #undef SANITIZER_INTERCEPT_GETADDRINFO // There interceptors do not seem to be strictly necessary for tsan. // But we see cases where the interceptors consume 70% of execution time. // Memory blocks passed to fgetgrent_r are "written to" by tsan several times. // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each // function "writes to" the buffer. Then, the same memory is "written to" // twice, first as buf and then as pwbufp (both of them refer to the same // addresses). #undef SANITIZER_INTERCEPT_GETPWENT #undef SANITIZER_INTERCEPT_GETPWENT_R #undef SANITIZER_INTERCEPT_FGETPWENT #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS // We define our own. #if SANITIZER_INTERCEPT_TLS_GET_ADDR #define NEED_TLS_GET_ADDR #endif #undef SANITIZER_INTERCEPT_TLS_GET_ADDR #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ INTERCEPT_FUNCTION_VER(name, ver) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ true) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ false) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ ctx = (void *)&_ctx; \ (void) ctx; #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ ctx = (void *)&_ctx; \ (void) ctx; #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ Acquire(thr, pc, File2addr(path)); \ if (file) { \ int fd = fileno_unlocked(file); \ if (fd >= 0) FdFileCreate(thr, pc, fd); \ } #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ if (file) { \ int fd = fileno_unlocked(file); \ if (fd >= 0) FdClose(thr, pc, fd); \ } #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ libignore()->OnLibraryLoaded(filename) #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ libignore()->OnLibraryUnloaded() #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ Release(((TsanInterceptorContext *) ctx)->thr, pc, u) #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ OnExit(((TsanInterceptorContext *) ctx)->thr) #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #if !SANITIZER_MAC #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, msg) #endif #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (TsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ *end = t->tls_end(); \ } else { \ *begin = *end = 0; \ } #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() #include "sanitizer_common/sanitizer_common_interceptors.inc" #define TSAN_SYSCALL() \ ThreadState *thr = cur_thread(); \ if (thr->ignore_interceptors) \ return; \ ScopedSyscall scoped_syscall(thr) \ /**/ struct ScopedSyscall { ThreadState *thr; explicit ScopedSyscall(ThreadState *thr) : thr(thr) { Initialize(thr); } ~ScopedSyscall() { ProcessPendingSignals(thr); } }; #if !SANITIZER_FREEBSD && !SANITIZER_MAC static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { TSAN_SYSCALL(); MemoryAccessRange(thr, pc, p, s, write); } static void syscall_acquire(uptr pc, uptr addr) { TSAN_SYSCALL(); Acquire(thr, pc, addr); DPrintf("syscall_acquire(%p)\n", addr); } static void syscall_release(uptr pc, uptr addr) { TSAN_SYSCALL(); DPrintf("syscall_release(%p)\n", addr); Release(thr, pc, addr); } static void syscall_fd_close(uptr pc, int fd) { TSAN_SYSCALL(); FdClose(thr, pc, fd); } static USED void syscall_fd_acquire(uptr pc, int fd) { TSAN_SYSCALL(); FdAcquire(thr, pc, fd); DPrintf("syscall_fd_acquire(%p)\n", fd); } static USED void syscall_fd_release(uptr pc, int fd) { TSAN_SYSCALL(); DPrintf("syscall_fd_release(%p)\n", fd); FdRelease(thr, pc, fd); } static void syscall_pre_fork(uptr pc) { TSAN_SYSCALL(); ForkBefore(thr, pc); } static void syscall_post_fork(uptr pc, int pid) { TSAN_SYSCALL(); if (pid == 0) { // child ForkChildAfter(thr, pc); FdOnFork(thr, pc); } else if (pid > 0) { // parent ForkParentAfter(thr, pc); } else { // error ForkParentAfter(thr, pc); } } #endif #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #define COMMON_SYSCALL_ACQUIRE(addr) \ syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) #define COMMON_SYSCALL_RELEASE(addr) \ syscall_release(GET_CALLER_PC(), (uptr)(addr)) #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) #define COMMON_SYSCALL_PRE_FORK() \ syscall_pre_fork(GET_CALLER_PC()) #define COMMON_SYSCALL_POST_FORK(res) \ syscall_post_fork(GET_CALLER_PC(), res) #include "sanitizer_common/sanitizer_common_syscalls.inc" #ifdef NEED_TLS_GET_ADDR // Define own interceptor instead of sanitizer_common's for three reasons: // 1. It must not process pending signals. // Signal handlers may contain MOVDQA instruction (see below). // 2. It must be as simple as possible to not contain MOVDQA. // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which // is empty for tsan (meant only for msan). // Note: __tls_get_addr can be called with mis-aligned stack due to: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 // So the interceptor must work with mis-aligned stack, in particular, does not // execute MOVDQA with stack addresses. TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) { void *res = REAL(__tls_get_addr)(arg); ThreadState *thr = cur_thread(); if (!thr) return res; DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size); if (!dtv) return res; // New DTLS block has been allocated. MemoryResetRange(thr, 0, dtv->beg, dtv->size); return res; } #endif namespace __tsan { static void finalize(void *arg) { ThreadState *thr = cur_thread(); int status = Finalize(thr); // Make sure the output is not lost. FlushStreams(); if (status) Die(); } #if !SANITIZER_MAC && !SANITIZER_ANDROID static void unreachable() { Report("FATAL: ThreadSanitizer: unreachable called\n"); Die(); } #endif void InitializeInterceptors() { #if !SANITIZER_MAC // We need to setup it early, because functions like dlsym() can call it. REAL(memset) = internal_memset; REAL(memcpy) = internal_memcpy; #endif // Instruct libc malloc to consume less memory. #if SANITIZER_LINUX mallopt(1, 0); // M_MXFAST mallopt(-3, 32*1024); // M_MMAP_THRESHOLD #endif InitializeCommonInterceptors(); #if !SANITIZER_MAC // We can not use TSAN_INTERCEPT to get setjmp addr, // because it does &setjmp and setjmp is not present in some versions of libc. using __interception::GetRealFunctionAddress; GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0); GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0); GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); #endif TSAN_INTERCEPT(longjmp); TSAN_INTERCEPT(siglongjmp); TSAN_INTERCEPT(malloc); TSAN_INTERCEPT(__libc_memalign); TSAN_INTERCEPT(calloc); TSAN_INTERCEPT(realloc); TSAN_INTERCEPT(free); TSAN_INTERCEPT(cfree); TSAN_INTERCEPT(mmap); TSAN_MAYBE_INTERCEPT_MMAP64; TSAN_INTERCEPT(munmap); TSAN_MAYBE_INTERCEPT_MEMALIGN; TSAN_INTERCEPT(valloc); TSAN_MAYBE_INTERCEPT_PVALLOC; TSAN_INTERCEPT(posix_memalign); TSAN_INTERCEPT(strcpy); // NOLINT TSAN_INTERCEPT(strncpy); TSAN_INTERCEPT(strdup); TSAN_INTERCEPT(pthread_create); TSAN_INTERCEPT(pthread_join); TSAN_INTERCEPT(pthread_detach); TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); TSAN_INTERCEPT(pthread_mutex_init); TSAN_INTERCEPT(pthread_mutex_destroy); TSAN_INTERCEPT(pthread_mutex_trylock); TSAN_INTERCEPT(pthread_mutex_timedlock); TSAN_INTERCEPT(pthread_spin_init); TSAN_INTERCEPT(pthread_spin_destroy); TSAN_INTERCEPT(pthread_spin_lock); TSAN_INTERCEPT(pthread_spin_trylock); TSAN_INTERCEPT(pthread_spin_unlock); TSAN_INTERCEPT(pthread_rwlock_init); TSAN_INTERCEPT(pthread_rwlock_destroy); TSAN_INTERCEPT(pthread_rwlock_rdlock); TSAN_INTERCEPT(pthread_rwlock_tryrdlock); TSAN_INTERCEPT(pthread_rwlock_timedrdlock); TSAN_INTERCEPT(pthread_rwlock_wrlock); TSAN_INTERCEPT(pthread_rwlock_trywrlock); TSAN_INTERCEPT(pthread_rwlock_timedwrlock); TSAN_INTERCEPT(pthread_rwlock_unlock); TSAN_INTERCEPT(pthread_barrier_init); TSAN_INTERCEPT(pthread_barrier_destroy); TSAN_INTERCEPT(pthread_barrier_wait); TSAN_INTERCEPT(pthread_once); TSAN_INTERCEPT(fstat); TSAN_MAYBE_INTERCEPT___FXSTAT; TSAN_MAYBE_INTERCEPT_FSTAT64; TSAN_MAYBE_INTERCEPT___FXSTAT64; TSAN_INTERCEPT(open); TSAN_MAYBE_INTERCEPT_OPEN64; TSAN_INTERCEPT(creat); TSAN_MAYBE_INTERCEPT_CREAT64; TSAN_INTERCEPT(dup); TSAN_INTERCEPT(dup2); TSAN_INTERCEPT(dup3); TSAN_MAYBE_INTERCEPT_EVENTFD; TSAN_MAYBE_INTERCEPT_SIGNALFD; TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; TSAN_INTERCEPT(socket); TSAN_INTERCEPT(socketpair); TSAN_INTERCEPT(connect); TSAN_INTERCEPT(bind); TSAN_INTERCEPT(listen); TSAN_MAYBE_INTERCEPT_EPOLL; TSAN_INTERCEPT(close); TSAN_MAYBE_INTERCEPT___CLOSE; TSAN_MAYBE_INTERCEPT___RES_ICLOSE; TSAN_INTERCEPT(pipe); TSAN_INTERCEPT(pipe2); TSAN_INTERCEPT(unlink); TSAN_INTERCEPT(tmpfile); TSAN_MAYBE_INTERCEPT_TMPFILE64; TSAN_INTERCEPT(fread); TSAN_INTERCEPT(fwrite); TSAN_INTERCEPT(abort); TSAN_INTERCEPT(puts); TSAN_INTERCEPT(rmdir); TSAN_INTERCEPT(closedir); TSAN_INTERCEPT(sigaction); TSAN_INTERCEPT(signal); TSAN_INTERCEPT(sigsuspend); TSAN_INTERCEPT(sigblock); TSAN_INTERCEPT(sigsetmask); TSAN_INTERCEPT(pthread_sigmask); TSAN_INTERCEPT(raise); TSAN_INTERCEPT(kill); TSAN_INTERCEPT(pthread_kill); TSAN_INTERCEPT(sleep); TSAN_INTERCEPT(usleep); TSAN_INTERCEPT(nanosleep); TSAN_INTERCEPT(gettimeofday); TSAN_INTERCEPT(getaddrinfo); TSAN_INTERCEPT(fork); TSAN_INTERCEPT(vfork); #if !SANITIZER_ANDROID TSAN_INTERCEPT(dl_iterate_phdr); #endif TSAN_INTERCEPT(on_exit); TSAN_INTERCEPT(__cxa_atexit); TSAN_INTERCEPT(_exit); #ifdef NEED_TLS_GET_ADDR TSAN_INTERCEPT(__tls_get_addr); #endif #if !SANITIZER_MAC && !SANITIZER_ANDROID // Need to setup it, because interceptors check that the function is resolved. // But atexit is emitted directly into the module, so can't be resolved. REAL(atexit) = (int(*)(void(*)()))unreachable; #endif if (REAL(__cxa_atexit)(&finalize, 0, 0)) { Printf("ThreadSanitizer: failed to setup atexit callback\n"); Die(); } #if !SANITIZER_MAC if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Printf("ThreadSanitizer: failed to create thread key\n"); Die(); } #endif FdInit(); } } // namespace __tsan // Invisible barrier for tests. // There were several unsuccessful iterations for this functionality: // 1. Initially it was implemented in user code using // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on // MacOS. Futexes are linux-specific for this matter. // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic // "as-if synchronized via sleep" messages in reports which failed some // output tests. // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- // visible events, which lead to "failed to restore stack trace" failures. // Note that no_sanitize_thread attribute does not turn off atomic interception // so attaching it to the function defined in user code does not help. // That's why we now have what we have. extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(u64 *barrier, u32 count) { if (count >= (1 << 8)) { Printf("barrier_init: count is too large (%d)\n", count); Die(); } // 8 lsb is thread count, the remaining are count of entered threads. *barrier = count; } extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(u64 *barrier) { unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED); unsigned old_epoch = (old >> 8) / (old & 0xff); for (;;) { unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED); unsigned cur_epoch = (cur >> 8) / (cur & 0xff); if (cur_epoch != old_epoch) return; internal_sched_yield(); } } Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors_mac.cc (revision 320961) @@ -1,381 +1,388 @@ //===-- tsan_interceptors_mac.cc ------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Mac-specific interceptors. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_MAC #include "interception/interception.h" #include "tsan_interceptors.h" #include "tsan_interface.h" #include "tsan_interface_ann.h" #include + +#if defined(__has_include) && __has_include() #include +#endif // #if defined(__has_include) && __has_include() typedef long long_t; // NOLINT namespace __tsan { // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed, // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are // actually aliases of each other, and we cannot have different interceptors for // them, because they're actually the same function. Thus, we have to stay // conservative and treat the non-barrier versions as mo_acq_rel. static const morder kMacOrderBarrier = mo_acq_rel; static const morder kMacOrderNonBarrier = mo_acq_rel; #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ } #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ } #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, ptr); \ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ } #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ mo) \ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, ptr); \ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ } #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ kMacOrderNonBarrier) \ m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ kMacOrderBarrier) \ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ kMacOrderNonBarrier) \ m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \ kMacOrderBarrier) #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ kMacOrderNonBarrier) \ m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ kMacOrderBarrier) \ m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \ kMacOrderNonBarrier) \ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add, OSATOMIC_INTERCEPTOR_PLUS_X) OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add, OSATOMIC_INTERCEPTOR_PLUS_1) OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub, OSATOMIC_INTERCEPTOR_MINUS_1) OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and, OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor, OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ return tsan_atomic_f##_compare_exchange_strong( \ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ kMacOrderNonBarrier, kMacOrderNonBarrier); \ } \ \ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ t volatile *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ return tsan_atomic_f##_compare_exchange_strong( \ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ kMacOrderBarrier, kMacOrderNonBarrier); \ } OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64, long_t) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64, void *) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32, int32_t) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64, int64_t) #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ char *byte_ptr = ((char *)ptr) + (n >> 3); \ char bit = 0x80u >> (n & 7); \ char mask = clear ? ~bit : bit; \ char orig_byte = op((a8 *)byte_ptr, mask, mo); \ return orig_byte & bit; \ } #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false) OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and, true) TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset); __tsan_release(item); REAL(OSAtomicEnqueue)(list, item, offset); } TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset); void *item = REAL(OSAtomicDequeue)(list, offset); if (item) __tsan_acquire(item); return item; } // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X. #if !SANITIZER_IOS TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset); __tsan_release(item); REAL(OSAtomicFifoEnqueue)(list, item, offset); } TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset); void *item = REAL(OSAtomicFifoDequeue)(list, offset); if (item) __tsan_acquire(item); return item; } #endif TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(OSSpinLockLock)(lock); } SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock); REAL(OSSpinLockLock)(lock); Acquire(thr, pc, (uptr)lock); } TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(OSSpinLockTry)(lock); } SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock); bool result = REAL(OSSpinLockTry)(lock); if (result) Acquire(thr, pc, (uptr)lock); return result; } TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(OSSpinLockUnlock)(lock); } SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock); Release(thr, pc, (uptr)lock); REAL(OSSpinLockUnlock)(lock); } TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(os_lock_lock)(lock); } SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock); REAL(os_lock_lock)(lock); Acquire(thr, pc, (uptr)lock); } TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(os_lock_trylock)(lock); } SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock); bool result = REAL(os_lock_trylock)(lock); if (result) Acquire(thr, pc, (uptr)lock); return result; } TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { CHECK(!cur_thread()->is_dead); if (!cur_thread()->is_inited) { return REAL(os_lock_unlock)(lock); } SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock); Release(thr, pc, (uptr)lock); REAL(os_lock_unlock)(lock); } +#if defined(__has_include) && __has_include() + TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, xpc_connection_t connection, xpc_handler_t handler) { SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection, handler); Release(thr, pc, (uptr)connection); xpc_handler_t new_handler = ^(xpc_object_t object) { { SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler); Acquire(thr, pc, (uptr)connection); } handler(object); }; REAL(xpc_connection_set_event_handler)(connection, new_handler); } TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection, dispatch_block_t barrier) { SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier); Release(thr, pc, (uptr)connection); dispatch_block_t new_barrier = ^() { { SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier); Acquire(thr, pc, (uptr)connection); } barrier(); }; REAL(xpc_connection_send_barrier)(connection, new_barrier); } TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply, xpc_connection_t connection, xpc_object_t message, dispatch_queue_t replyq, xpc_handler_t handler) { SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection, message, replyq, handler); Release(thr, pc, (uptr)connection); xpc_handler_t new_handler = ^(xpc_object_t object) { { SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply); Acquire(thr, pc, (uptr)connection); } handler(object); }; REAL(xpc_connection_send_message_with_reply) (connection, message, replyq, new_handler); } TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection); Release(thr, pc, (uptr)connection); REAL(xpc_connection_cancel)(connection); } + +#endif // #if defined(__has_include) && __has_include() // On macOS, libc++ is always linked dynamically, so intercepting works the // usual way. #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR namespace { struct fake_shared_weak_count { volatile a64 shared_owners; volatile a64 shared_weak_owners; virtual void _unused_0x0() = 0; virtual void _unused_0x8() = 0; virtual void on_zero_shared() = 0; virtual void _unused_0x18() = 0; virtual void on_zero_shared_weak() = 0; }; } // namespace // The following code adds libc++ interceptors for: // void __shared_weak_count::__release_shared() _NOEXCEPT; // bool __shared_count::__release_shared() _NOEXCEPT; // Shared and weak pointers in C++ maintain reference counts via atomics in // libc++.dylib, which are TSan-invisible, and this leads to false positives in // destructor code. These interceptors re-implements the whole functions so that // the mo_acq_rel semantics of the atomic decrement are visible. // // Unfortunately, the interceptors cannot simply Acquire/Release some sync // object and call the original function, because it would have a race between // the sync and the destruction of the object. Calling both under a lock will // not work because the destructor can invoke this interceptor again (and even // in a different thread, so recursive locks don't help). STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv, fake_shared_weak_count *o) { if (!flags()->shared_ptr_interceptor) return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o); SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv, o); if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { Acquire(thr, pc, (uptr)&o->shared_owners); o->on_zero_shared(); if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) == 0) { Acquire(thr, pc, (uptr)&o->shared_weak_owners); o->on_zero_shared_weak(); } } } STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv, fake_shared_weak_count *o) { if (!flags()->shared_ptr_interceptor) return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o); SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o); if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { Acquire(thr, pc, (uptr)&o->shared_owners); o->on_zero_shared(); return true; } return false; } namespace { struct call_once_callback_args { void (*orig_func)(void *arg); void *orig_arg; void *flag; }; void call_once_callback_wrapper(void *arg) { call_once_callback_args *new_args = (call_once_callback_args *)arg; new_args->orig_func(new_args->orig_arg); __tsan_release(new_args->flag); } } // namespace // This adds a libc++ interceptor for: // void __call_once(volatile unsigned long&, void*, void(*)(void*)); // C++11 call_once is implemented via an internal function __call_once which is // inside libc++.dylib, and the atomic release store inside it is thus // TSan-invisible. To avoid false positives, this interceptor wraps the callback // function and performs an explicit Release after the user code has run. STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag, void *arg, void (*func)(void *arg)) { call_once_callback_args new_args = {func, arg, flag}; REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args, call_once_callback_wrapper); } } // namespace __tsan #endif // SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc (revision 320961) @@ -1,553 +1,553 @@ //===-- tsan_interface_ann.cc ---------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_interface_ann.h" #include "tsan_mutex.h" #include "tsan_report.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_flags.h" #include "tsan_platform.h" #include "tsan_vector.h" #define CALLERPC ((uptr)__builtin_return_address(0)) using namespace __tsan; // NOLINT namespace __tsan { class ScopedAnnotation { public: ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc) : thr_(thr) { FuncEntry(thr_, pc); DPrintf("#%d: annotation %s()\n", thr_->tid, aname); } ~ScopedAnnotation() { FuncExit(thr_); CheckNoLocks(thr_); } private: ThreadState *const thr_; }; #define SCOPED_ANNOTATION_RET(typ, ret) \ if (!flags()->enable_annotations) \ return ret; \ ThreadState *thr = cur_thread(); \ const uptr caller_pc = (uptr)__builtin_return_address(0); \ StatInc(thr, StatAnnotation); \ StatInc(thr, Stat##typ); \ ScopedAnnotation sa(thr, __func__, caller_pc); \ const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ /**/ #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, ) static const int kMaxDescLen = 128; struct ExpectRace { ExpectRace *next; ExpectRace *prev; atomic_uintptr_t hitcount; atomic_uintptr_t addcount; uptr addr; uptr size; char *file; int line; char desc[kMaxDescLen]; }; struct DynamicAnnContext { Mutex mtx; ExpectRace expect; ExpectRace benign; DynamicAnnContext() : mtx(MutexTypeAnnotations, StatMtxAnnotations) { } }; static DynamicAnnContext *dyn_ann_ctx; static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); static void AddExpectRace(ExpectRace *list, char *f, int l, uptr addr, uptr size, char *desc) { ExpectRace *race = list->next; for (; race != list; race = race->next) { if (race->addr == addr && race->size == size) { atomic_store_relaxed(&race->addcount, atomic_load_relaxed(&race->addcount) + 1); return; } } race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace)); race->addr = addr; race->size = size; race->file = f; race->line = l; race->desc[0] = 0; atomic_store_relaxed(&race->hitcount, 0); atomic_store_relaxed(&race->addcount, 1); if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) race->desc[i] = desc[i]; race->desc[i] = 0; } race->prev = list; race->next = list->next; race->next->prev = race; list->next = race; } static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) { for (ExpectRace *race = list->next; race != list; race = race->next) { uptr maxbegin = max(race->addr, addr); uptr minend = min(race->addr + race->size, addr + size); if (maxbegin < minend) return race; } return 0; } static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { ExpectRace *race = FindRace(list, addr, size); if (race == 0) return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); return true; } static void InitList(ExpectRace *list) { list->next = list; list->prev = list; } void InitializeDynamicAnnotations() { dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext; InitList(&dyn_ann_ctx->expect); InitList(&dyn_ann_ctx->benign); } bool IsExpectedReport(uptr addr, uptr size) { ReadLock lock(&dyn_ann_ctx->mtx); if (CheckContains(&dyn_ann_ctx->expect, addr, size)) return true; if (CheckContains(&dyn_ann_ctx->benign, addr, size)) return true; return false; } static void CollectMatchedBenignRaces(Vector *matched, int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { ExpectRace *list = &dyn_ann_ctx->benign; for (ExpectRace *race = list->next; race != list; race = race->next) { (*unique_count)++; const uptr cnt = atomic_load_relaxed(&(race->*counter)); if (cnt == 0) continue; *hit_count += cnt; uptr i = 0; for (; i < matched->Size(); i++) { ExpectRace *race0 = &(*matched)[i]; if (race->line == race0->line && internal_strcmp(race->file, race0->file) == 0 && internal_strcmp(race->desc, race0->desc) == 0) { atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed); break; } } if (i == matched->Size()) matched->PushBack(*race); } } void PrintMatchedBenignRaces() { Lock lock(&dyn_ann_ctx->mtx); int unique_count = 0; int hit_count = 0; int add_count = 0; Vector hit_matched(MBlockScopedBuf); CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count, &ExpectRace::hitcount); Vector add_matched(MBlockScopedBuf); CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count, &ExpectRace::addcount); if (hit_matched.Size()) { Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n", hit_count, (int)internal_getpid()); for (uptr i = 0; i < hit_matched.Size(); i++) { Printf("%d %s:%d %s\n", atomic_load_relaxed(&hit_matched[i].hitcount), hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); } } if (hit_matched.Size()) { Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique" " (pid=%d):\n", add_count, unique_count, (int)internal_getpid()); for (uptr i = 0; i < add_matched.Size(); i++) { Printf("%d %s:%d %s\n", atomic_load_relaxed(&add_matched[i].addcount), add_matched[i].file, add_matched[i].line, add_matched[i].desc); } } } static void ReportMissedExpectedRace(ExpectRace *race) { Printf("==================\n"); Printf("WARNING: ThreadSanitizer: missed expected data race\n"); Printf(" %s addr=%zx %s:%d\n", race->desc, race->addr, race->file, race->line); Printf("==================\n"); } } // namespace __tsan using namespace __tsan; // NOLINT extern "C" { void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); Release(thr, pc, addr); } void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); Acquire(thr, pc, addr); } void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignal); } void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignalAll); } void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsNotPHB); } void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) { SCOPED_ANNOTATION(AnnotateCondVarWait); } void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreate); MutexCreate(thr, pc, m, MutexFlagWriteReentrant); } void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit); } void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockDestroy); MutexDestroy(thr, pc, m); } void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockAcquired); if (is_w) MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); else MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); } void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockReleased); if (is_w) MutexUnlock(thr, pc, m); else MutexReadUnlock(thr, pc, m); } void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateTraceMemory); } void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushState); } void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, uptr size) { SCOPED_ANNOTATION(AnnotateNewMemory); } void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateNoOp); } void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { ExpectRace *race = dyn_ann_ctx->expect.next; if (atomic_load_relaxed(&race->hitcount) == 0) { ctx->nmissed_expected++; ReportMissedExpectedRace(race); } race->prev->next = race->next; race->next->prev = race->prev; internal_free(race); } } void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( char *f, int l, int enable) { SCOPED_ANNOTATION(AnnotateEnableRaceDetection); // FIXME: Reconsider this functionality later. It may be irrelevant. } void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar); } void INTERFACE_ATTRIBUTE AnnotatePCQGet( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQGet); } void INTERFACE_ATTRIBUTE AnnotatePCQPut( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQPut); } void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQDestroy); } void INTERFACE_ATTRIBUTE AnnotatePCQCreate( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQCreate); } void INTERFACE_ATTRIBUTE AnnotateExpectRace( char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateExpectRace); Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->expect, f, l, mem, 1, desc); DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l); } static void BenignRaceImpl( char *f, int l, uptr mem, uptr size, char *desc) { Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->benign, f, l, mem, size, desc); DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l); } // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm. void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( char *f, int l, uptr mem, uptr size, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); BenignRaceImpl(f, l, mem, size, desc); } void INTERFACE_ATTRIBUTE AnnotateBenignRace( char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRace); BenignRaceImpl(f, l, mem, 1, desc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); ThreadIgnoreBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); ThreadIgnoreEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); ThreadIgnoreBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); ThreadIgnoreEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); ThreadIgnoreSyncBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); ThreadIgnoreSyncEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotatePublishMemoryRange); } void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange); } void INTERFACE_ATTRIBUTE AnnotateThreadName( char *f, int l, char *name) { SCOPED_ANNOTATION(AnnotateThreadName); ThreadSetName(thr, name); } // We deliberately omit the implementation of WTFAnnotateHappensBefore() and // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate // atomic operations, which should be handled by ThreadSanitizer correctly. void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); } void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); } void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( char *f, int l, uptr mem, uptr sz, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); BenignRaceImpl(f, l, mem, sz, desc); } int INTERFACE_ATTRIBUTE RunningOnValgrind() { return flags()->running_on_valgrind; } double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { return 10.0; } const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { if (internal_strcmp(query, "pure_happens_before") == 0) return "1"; else return "0"; } void INTERFACE_ATTRIBUTE AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} void INTERFACE_ATTRIBUTE AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} // Note: the parameter is called flagz, because flags is already taken // by the global function that returns flags. INTERFACE_ATTRIBUTE void __tsan_mutex_create(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_create); MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask); } INTERFACE_ATTRIBUTE void __tsan_mutex_destroy(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_destroy); MutexDestroy(thr, pc, (uptr)m, flagz); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_lock(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_lock); if (!(flagz & MutexFlagTryLock)) { if (flagz & MutexFlagReadLock) MutexPreReadLock(thr, pc, (uptr)m); else MutexPreLock(thr, pc, (uptr)m); } - ThreadIgnoreBegin(thr, pc, false); - ThreadIgnoreSyncBegin(thr, pc, false); + ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); + ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) { SCOPED_ANNOTATION(__tsan_mutex_post_lock); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); if (!(flagz & MutexFlagTryLockFailed)) { if (flagz & MutexFlagReadLock) MutexPostReadLock(thr, pc, (uptr)m, flagz); else MutexPostLock(thr, pc, (uptr)m, flagz, rec); } } INTERFACE_ATTRIBUTE int __tsan_mutex_pre_unlock(void *m, unsigned flagz) { SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0); int ret = 0; if (flagz & MutexFlagReadLock) { CHECK(!(flagz & MutexFlagRecursiveUnlock)); MutexReadUnlock(thr, pc, (uptr)m); } else { ret = MutexUnlock(thr, pc, (uptr)m, flagz); } - ThreadIgnoreBegin(thr, pc, false); - ThreadIgnoreSyncBegin(thr, pc, false); + ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); + ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); return ret; } INTERFACE_ATTRIBUTE void __tsan_mutex_post_unlock(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_unlock); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_signal(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_signal); - ThreadIgnoreBegin(thr, pc, false); - ThreadIgnoreSyncBegin(thr, pc, false); + ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); + ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_signal(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_signal); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_divert(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_divert); // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal. ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_divert(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_divert); - ThreadIgnoreBegin(thr, pc, false); - ThreadIgnoreSyncBegin(thr, pc, false); + ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); + ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); } } // extern "C" Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_atomic.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_atomic.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_atomic.cc (revision 320961) @@ -1,950 +1,956 @@ //===-- tsan_interface_atomic.cc ------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// // ThreadSanitizer atomic operations are based on C++11/C1x standards. // For background see C++11 standard. A slightly older, publicly // available draft of the standard (not entirely up-to-date, but close enough // for casual browsing) is available here: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf // The following page contains more background information: // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_mutex.h" #include "tsan_flags.h" #include "tsan_interface.h" #include "tsan_rtl.h" using namespace __tsan; // NOLINT #if !SANITIZER_GO && __TSAN_HAS_INT128 // Protects emulation of 128-bit atomic operations. static StaticSpinMutex mutex128; #endif static bool IsLoadOrder(morder mo) { return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire || mo == mo_seq_cst; } static bool IsStoreOrder(morder mo) { return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; } static bool IsReleaseOrder(morder mo) { return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; } static bool IsAcquireOrder(morder mo) { return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel || mo == mo_seq_cst; } static bool IsAcqRelOrder(morder mo) { return mo == mo_acq_rel || mo == mo_seq_cst; } template T func_xchg(volatile T *v, T op) { T res = __sync_lock_test_and_set(v, op); // __sync_lock_test_and_set does not contain full barrier. __sync_synchronize(); return res; } template T func_add(volatile T *v, T op) { return __sync_fetch_and_add(v, op); } template T func_sub(volatile T *v, T op) { return __sync_fetch_and_sub(v, op); } template T func_and(volatile T *v, T op) { return __sync_fetch_and_and(v, op); } template T func_or(volatile T *v, T op) { return __sync_fetch_and_or(v, op); } template T func_xor(volatile T *v, T op) { return __sync_fetch_and_xor(v, op); } template T func_nand(volatile T *v, T op) { // clang does not support __sync_fetch_and_nand. T cmp = *v; for (;;) { T newv = ~(cmp & op); T cur = __sync_val_compare_and_swap(v, cmp, newv); if (cmp == cur) return cmp; cmp = cur; } } template T func_cas(volatile T *v, T cmp, T xch) { return __sync_val_compare_and_swap(v, cmp, xch); } // clang does not support 128-bit atomic ops. // Atomic ops are executed under tsan internal mutex, // here we assume that the atomic variables are not accessed // from non-instrumented code. #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \ && __TSAN_HAS_INT128 a128 func_xchg(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = op; return cmp; } a128 func_add(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = cmp + op; return cmp; } a128 func_sub(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = cmp - op; return cmp; } a128 func_and(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = cmp & op; return cmp; } a128 func_or(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = cmp | op; return cmp; } a128 func_xor(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = cmp ^ op; return cmp; } a128 func_nand(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; *v = ~(cmp & op); return cmp; } a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) { SpinMutexLock lock(&mutex128); a128 cur = *v; if (cur == cmp) *v = xch; return cur; } #endif template static int SizeLog() { if (sizeof(T) <= 1) return kSizeLog1; else if (sizeof(T) <= 2) return kSizeLog2; else if (sizeof(T) <= 4) return kSizeLog4; else return kSizeLog8; // For 16-byte atomics we also use 8-byte memory access, // this leads to false negatives only in very obscure cases. } #if !SANITIZER_GO static atomic_uint8_t *to_atomic(const volatile a8 *a) { return reinterpret_cast(const_cast(a)); } static atomic_uint16_t *to_atomic(const volatile a16 *a) { return reinterpret_cast(const_cast(a)); } #endif static atomic_uint32_t *to_atomic(const volatile a32 *a) { return reinterpret_cast(const_cast(a)); } static atomic_uint64_t *to_atomic(const volatile a64 *a) { return reinterpret_cast(const_cast(a)); } static memory_order to_mo(morder mo) { switch (mo) { case mo_relaxed: return memory_order_relaxed; case mo_consume: return memory_order_consume; case mo_acquire: return memory_order_acquire; case mo_release: return memory_order_release; case mo_acq_rel: return memory_order_acq_rel; case mo_seq_cst: return memory_order_seq_cst; } CHECK(0); return memory_order_seq_cst; } template static T NoTsanAtomicLoad(const volatile T *a, morder mo) { return atomic_load(to_atomic(a), to_mo(mo)); } #if __TSAN_HAS_INT128 && !SANITIZER_GO static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { SpinMutexLock lock(&mutex128); return *a; } #endif template -static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, - morder mo) { +static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { CHECK(IsLoadOrder(mo)); // This fast-path is critical for performance. // Assume the access is atomic. if (!IsAcquireOrder(mo)) { MemoryReadAtomic(thr, pc, (uptr)a, SizeLog()); return NoTsanAtomicLoad(a, mo); } - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false); - AcquireImpl(thr, pc, &s->clock); + // Don't create sync object if it does not exist yet. For example, an atomic + // pointer is initialized to nullptr and then periodically acquire-loaded. T v = NoTsanAtomicLoad(a, mo); - s->mtx.ReadUnlock(); + SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false); + if (s) { + AcquireImpl(thr, pc, &s->clock); + // Re-read under sync mutex because we need a consistent snapshot + // of the value and the clock we acquire. + v = NoTsanAtomicLoad(a, mo); + s->mtx.ReadUnlock(); + } MemoryReadAtomic(thr, pc, (uptr)a, SizeLog()); return v; } template static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { atomic_store(to_atomic(a), v, to_mo(mo)); } #if __TSAN_HAS_INT128 && !SANITIZER_GO static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { SpinMutexLock lock(&mutex128); *a = v; } #endif template static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { CHECK(IsStoreOrder(mo)); MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); // This fast-path is critical for performance. // Assume the access is atomic. // Strictly saying even relaxed store cuts off release sequence, // so must reset the clock. if (!IsReleaseOrder(mo)) { NoTsanAtomicStore(a, v, mo); return; } __sync_synchronize(); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreImpl(thr, pc, &s->clock); NoTsanAtomicStore(a, v, mo); s->mtx.Unlock(); } template static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); SyncVar *s = 0; if (mo != mo_relaxed) { s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) AcquireImpl(thr, pc, &s->clock); } v = F(a, v); if (s) s->mtx.Unlock(); return v; } template static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { return func_xchg(a, v); } template static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { return func_add(a, v); } template static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { return func_sub(a, v); } template static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { return func_and(a, v); } template static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { return func_or(a, v); } template static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { return func_xor(a, v); } template static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { return func_nand(a, v); } template static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return AtomicRMW(thr, pc, a, v, mo); } template static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); } #if __TSAN_HAS_INT128 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) { a128 old = *c; a128 cur = func_cas(a, old, v); if (cur == old) return true; *c = cur; return false; } #endif template static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { NoTsanAtomicCAS(a, &c, v, mo, fmo); return c; } template static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) { (void)fmo; // Unused because llvm does not pass it yet. MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); SyncVar *s = 0; bool write_lock = mo != mo_acquire && mo != mo_consume; if (mo != mo_relaxed) { s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) AcquireImpl(thr, pc, &s->clock); } T cc = *c; T pr = func_cas(a, cc, v); if (s) { if (write_lock) s->mtx.Unlock(); else s->mtx.ReadUnlock(); } if (pr == cc) return true; *c = pr; return false; } template static T AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo, morder fmo) { AtomicCAS(thr, pc, a, &c, v, mo, fmo); return c; } #if !SANITIZER_GO static void NoTsanAtomicFence(morder mo) { __sync_synchronize(); } static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { // FIXME(dvyukov): not implemented. __sync_synchronize(); } #endif // Interface functions follow. #if !SANITIZER_GO // C/C++ static morder convert_morder(morder mo) { if (flags()->force_seq_cst_atomics) return (morder)mo_seq_cst; // Filter out additional memory order flags: // MEMMODEL_SYNC = 1 << 15 // __ATOMIC_HLE_ACQUIRE = 1 << 16 // __ATOMIC_HLE_RELEASE = 1 << 17 // // HLE is an optimization, and we pretend that elision always fails. // MEMMODEL_SYNC is used when lowering __sync_ atomics, // since we use __sync_ atomics for actual atomic operations, // we can safely ignore it as well. It also subtly affects semantics, // but we don't model the difference. return (morder)(mo & 0x7fff); } #define SCOPED_ATOMIC(func, ...) \ ThreadState *const thr = cur_thread(); \ if (thr->ignore_sync || thr->ignore_interceptors) { \ ProcessPendingSignals(thr); \ return NoTsanAtomic##func(__VA_ARGS__); \ } \ const uptr callpc = (uptr)__builtin_return_address(0); \ uptr pc = StackTrace::GetCurrentPc(); \ mo = convert_morder(mo); \ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ ScopedAtomic sa(thr, callpc, a, mo, __func__); \ return Atomic##func(thr, pc, __VA_ARGS__); \ /**/ class ScopedAtomic { public: ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a, morder mo, const char *func) : thr_(thr) { FuncEntry(thr_, pc); DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo); } ~ScopedAtomic() { ProcessPendingSignals(thr_); FuncExit(thr_); } private: ThreadState *thr_; }; static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { StatInc(thr, StatAtomic); StatInc(thr, t); StatInc(thr, size == 1 ? StatAtomic1 : size == 2 ? StatAtomic2 : size == 4 ? StatAtomic4 : size == 8 ? StatAtomic8 : StatAtomic16); StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed : mo == mo_consume ? StatAtomicConsume : mo == mo_acquire ? StatAtomicAcquire : mo == mo_release ? StatAtomicRelease : mo == mo_acq_rel ? StatAtomicAcq_Rel : StatAtomicSeq_Cst); } extern "C" { SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } #endif SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #endif SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #endif SANITIZER_INTERFACE_ATTRIBUTE a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } SANITIZER_INTERFACE_ATTRIBUTE a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } #endif SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic_thread_fence(morder mo) { char* a = 0; SCOPED_ATOMIC(Fence, mo); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic_signal_fence(morder mo) { } } // extern "C" #else // #if !SANITIZER_GO // Go #define ATOMIC(func, ...) \ if (thr->ignore_sync) { \ NoTsanAtomic##func(__VA_ARGS__); \ } else { \ FuncEntry(thr, cpc); \ Atomic##func(thr, pc, __VA_ARGS__); \ FuncExit(thr); \ } \ /**/ #define ATOMIC_RET(func, ret, ...) \ if (thr->ignore_sync) { \ (ret) = NoTsanAtomic##func(__VA_ARGS__); \ } else { \ FuncEntry(thr, cpc); \ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \ FuncExit(thr); \ } \ /**/ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_compare_exchange( ThreadState *thr, uptr cpc, uptr pc, u8 *a) { a32 cur = 0; a32 cmp = *(a32*)(a+8); ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire); *(bool*)(a+16) = (cur == cmp); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_compare_exchange( ThreadState *thr, uptr cpc, uptr pc, u8 *a) { a64 cur = 0; a64 cmp = *(a64*)(a+8); ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire); *(bool*)(a+24) = (cur == cmp); } } // extern "C" #endif // #if !SANITIZER_GO Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_mman.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_mman.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_mman.cc (revision 320961) @@ -1,301 +1,303 @@ //===-- tsan_mman.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_mman.h" #include "tsan_rtl.h" #include "tsan_report.h" #include "tsan_flags.h" // May be overriden by front-end. SANITIZER_WEAK_DEFAULT_IMPL void __sanitizer_malloc_hook(void *ptr, uptr size) { (void)ptr; (void)size; } SANITIZER_WEAK_DEFAULT_IMPL void __sanitizer_free_hook(void *ptr) { (void)ptr; } namespace __tsan { struct MapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. DontNeedShadowFor(p, size); // Mark the corresponding meta shadow memory as not needed. // Note the block does not contain any meta info at this point // (this happens after free). const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; const uptr kPageSize = GetPageSizeCached() * kMetaRatio; // Block came from LargeMmapAllocator, so must be large. // We rely on this in the calculations below. CHECK_GE(size, 2 * kPageSize); uptr diff = RoundUp(p, kPageSize) - p; if (diff != 0) { p += diff; size -= diff; } diff = p + size - RoundDown(p + size, kPageSize); if (diff != 0) size -= diff; uptr p_meta = (uptr)MemToMeta(p); ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio); } }; static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); Allocator *allocator() { return reinterpret_cast(&allocator_placeholder); } struct GlobalProc { Mutex mtx; Processor *proc; GlobalProc() : mtx(MutexTypeGlobalProc, StatMtxGlobalProc) , proc(ProcCreate()) { } }; static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64); GlobalProc *global_proc() { return reinterpret_cast(&global_proc_placeholder); } ScopedGlobalProcessor::ScopedGlobalProcessor() { GlobalProc *gp = global_proc(); ThreadState *thr = cur_thread(); if (thr->proc()) return; // If we don't have a proc, use the global one. // There are currently only two known case where this path is triggered: // __interceptor_free // __nptl_deallocate_tsd // start_thread // clone // and: // ResetRange // __interceptor_munmap // __deallocate_stack // start_thread // clone // Ideally, we destroy thread state (and unwire proc) when a thread actually // exits (i.e. when we join/wait it). Then we would not need the global proc gp->mtx.Lock(); ProcWire(gp->proc, thr); } ScopedGlobalProcessor::~ScopedGlobalProcessor() { GlobalProc *gp = global_proc(); ThreadState *thr = cur_thread(); if (thr->proc() != gp->proc) return; ProcUnwire(gp->proc, thr); gp->mtx.Unlock(); } void InitializeAllocator() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator()->Init(common_flags()->allocator_release_to_os_interval_ms); } void InitializeAllocatorLate() { new(global_proc()) GlobalProc(); } void AllocatorProcStart(Processor *proc) { allocator()->InitCache(&proc->alloc_cache); internal_allocator()->InitCache(&proc->internal_alloc_cache); } void AllocatorProcFinish(Processor *proc) { allocator()->DestroyCache(&proc->alloc_cache); internal_allocator()->DestroyCache(&proc->internal_alloc_cache); } void AllocatorPrintStats() { allocator()->PrintStats(); } static void SignalUnsafeCall(ThreadState *thr, uptr pc) { if (atomic_load_relaxed(&thr->in_signal_handler) == 0 || !flags()->report_signal_unsafe) return; VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); rep.AddStack(stack, true); OutputReport(thr, rep); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) return Allocator::FailureHandler::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (p == 0) return 0; if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) SignalUnsafeCall(thr, pc); return p; } void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { if (CheckForCallocOverflow(size, n)) return Allocator::FailureHandler::OnBadRequest(); void *p = user_alloc(thr, pc, n * size); if (p) internal_memset(p, 0, n * size); return p; } void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { ScopedGlobalProcessor sgp; if (ctx && ctx->initialized) OnUserFree(thr, pc, (uptr)p, true); allocator()->Deallocate(&thr->proc()->alloc_cache, p); if (signal) SignalUnsafeCall(thr, pc); } void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); ctx->metamap.AllocBlock(thr, pc, p, sz); if (write && thr->ignore_reads_and_writes == 0) MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); else MemoryResetRange(thr, pc, (uptr)p, sz); } void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { CHECK_NE(p, (void*)0); uptr sz = ctx->metamap.FreeBlock(thr->proc(), p); DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); if (write && thr->ignore_reads_and_writes == 0) MemoryRangeFreed(thr, pc, (uptr)p, sz); } void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { // FIXME: Handle "shrinking" more efficiently, // it seems that some software actually does this. void *p2 = user_alloc(thr, pc, sz); if (p2 == 0) return 0; if (p) { uptr oldsz = user_alloc_usable_size(p); internal_memcpy(p2, p, min(oldsz, sz)); user_free(thr, pc, p); } return p2; } uptr user_alloc_usable_size(const void *p) { if (p == 0) return 0; MBlock *b = ctx->metamap.GetBlock((uptr)p); if (!b) return 0; // Not a valid pointer. if (b->siz == 0) return 1; // Zero-sized allocations are actually 1 byte. return b->siz; } void invoke_malloc_hook(void *ptr, uptr size) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_malloc_hook(ptr, size); RunMallocHooks(ptr, size); } void invoke_free_hook(void *ptr) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_free_hook(ptr); RunFreeHooks(ptr); } void *internal_alloc(MBlockType typ, uptr sz) { ThreadState *thr = cur_thread(); if (thr->nomalloc) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } return InternalAlloc(sz, &thr->proc()->internal_alloc_cache); } void internal_free(void *p) { ThreadState *thr = cur_thread(); if (thr->nomalloc) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } InternalFree(p, &thr->proc()->internal_alloc_cache); } } // namespace __tsan using namespace __tsan; extern "C" { uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; allocator()->GetStats(stats); return stats[AllocatorStatAllocated]; } uptr __sanitizer_get_heap_size() { uptr stats[AllocatorStatCount]; allocator()->GetStats(stats); return stats[AllocatorStatMapped]; } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *p) { return allocator()->GetBlockBegin(p) != 0; } uptr __sanitizer_get_allocated_size(const void *p) { return user_alloc_usable_size(p); } void __tsan_on_thread_idle() { ThreadState *thr = cur_thread(); + thr->clock.ResetCached(&thr->proc()->clock_cache); + thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); allocator()->SwallowCache(&thr->proc()->alloc_cache); internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache); ctx->metamap.OnProcIdle(thr->proc()); } } // extern "C" Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform.h =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform.h (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform.h (revision 320961) @@ -1,829 +1,860 @@ //===-- tsan_platform.h -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Platform-specific code. //===----------------------------------------------------------------------===// #ifndef TSAN_PLATFORM_H #define TSAN_PLATFORM_H #if !defined(__LP64__) && !defined(_WIN64) # error "Only 64-bit is supported" #endif #include "tsan_defs.h" #include "tsan_trace.h" namespace __tsan { #if !SANITIZER_GO #if defined(__x86_64__) /* C/C++ on linux/x86_64 and freebsd/x86_64 0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB) 0040 0000 0000 - 0100 0000 0000: - 0100 0000 0000 - 2000 0000 0000: shadow 2000 0000 0000 - 3000 0000 0000: - 3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) 4000 0000 0000 - 5500 0000 0000: - 5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels 5680 0000 0000 - 6000 0000 0000: - 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 7d00 0000 0000: - 7b00 0000 0000 - 7c00 0000 0000: heap 7c00 0000 0000 - 7e80 0000 0000: - 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack */ struct Mapping { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x340000000000ull; static const uptr kTraceMemBeg = 0x600000000000ull; static const uptr kTraceMemEnd = 0x620000000000ull; static const uptr kShadowBeg = 0x010000000000ull; static const uptr kShadowEnd = 0x200000000000ull; static const uptr kHeapMemBeg = 0x7b0000000000ull; static const uptr kHeapMemEnd = 0x7c0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x008000000000ull; static const uptr kMidAppMemBeg = 0x550000000000ull; static const uptr kMidAppMemEnd = 0x568000000000ull; static const uptr kHiAppMemBeg = 0x7e8000000000ull; static const uptr kHiAppMemEnd = 0x800000000000ull; static const uptr kAppMemMsk = 0x780000000000ull; static const uptr kAppMemXor = 0x040000000000ull; static const uptr kVdsoBeg = 0xf000000000000000ull; }; #define TSAN_MID_APP_RANGE 1 #elif defined(__mips64) /* C/C++ on linux/mips64 0100 0000 00 - 0200 0000 00: main binary 0200 0000 00 - 1400 0000 00: - 1400 0000 00 - 2400 0000 00: shadow 2400 0000 00 - 3000 0000 00: - 3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects) 4000 0000 00 - 6000 0000 00: - 6000 0000 00 - 6200 0000 00: traces 6200 0000 00 - fe00 0000 00: - fe00 0000 00 - ff00 0000 00: heap ff00 0000 00 - ff80 0000 00: - ff80 0000 00 - ffff ffff ff: modules and main thread stack */ struct Mapping { static const uptr kMetaShadowBeg = 0x4000000000ull; static const uptr kMetaShadowEnd = 0x5000000000ull; static const uptr kTraceMemBeg = 0xb000000000ull; static const uptr kTraceMemEnd = 0xb200000000ull; static const uptr kShadowBeg = 0x2400000000ull; static const uptr kShadowEnd = 0x4000000000ull; static const uptr kHeapMemBeg = 0xfe00000000ull; static const uptr kHeapMemEnd = 0xff00000000ull; static const uptr kLoAppMemBeg = 0x0100000000ull; static const uptr kLoAppMemEnd = 0x0200000000ull; static const uptr kMidAppMemBeg = 0xaa00000000ull; static const uptr kMidAppMemEnd = 0xab00000000ull; static const uptr kHiAppMemBeg = 0xff80000000ull; static const uptr kHiAppMemEnd = 0xffffffffffull; static const uptr kAppMemMsk = 0xf800000000ull; static const uptr kAppMemXor = 0x0800000000ull; static const uptr kVdsoBeg = 0xfffff00000ull; }; #define TSAN_MID_APP_RANGE 1 +#elif defined(__aarch64__) && defined(__APPLE__) +/* +C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM) +0000 0000 00 - 0100 0000 00: - (4 GB) +0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB) +0200 0000 00 - 0300 0000 00: heap (4 GB) +0300 0000 00 - 0400 0000 00: - (4 GB) +0400 0000 00 - 0c00 0000 00: shadow memory (32 GB) +0c00 0000 00 - 0d00 0000 00: - (4 GB) +0d00 0000 00 - 0e00 0000 00: metainfo (4 GB) +0e00 0000 00 - 0f00 0000 00: - (4 GB) +0f00 0000 00 - 1000 0000 00: traces (4 GB) +*/ +struct Mapping { + static const uptr kLoAppMemBeg = 0x0100000000ull; + static const uptr kLoAppMemEnd = 0x0200000000ull; + static const uptr kHeapMemBeg = 0x0200000000ull; + static const uptr kHeapMemEnd = 0x0300000000ull; + static const uptr kShadowBeg = 0x0400000000ull; + static const uptr kShadowEnd = 0x0c00000000ull; + static const uptr kMetaShadowBeg = 0x0d00000000ull; + static const uptr kMetaShadowEnd = 0x0e00000000ull; + static const uptr kTraceMemBeg = 0x0f00000000ull; + static const uptr kTraceMemEnd = 0x1000000000ull; + static const uptr kHiAppMemBeg = 0x1000000000ull; + static const uptr kHiAppMemEnd = 0x1000000000ull; + static const uptr kAppMemMsk = 0x0ull; + static const uptr kAppMemXor = 0x0ull; + static const uptr kVdsoBeg = 0x7000000000000000ull; +}; + #elif defined(__aarch64__) // AArch64 supports multiple VMA which leads to multiple address transformation // functions. To support these multiple VMAS transformations and mappings TSAN // runtime for AArch64 uses an external memory read (vmaSize) to select which // mapping to use. Although slower, it make a same instrumented binary run on // multiple kernels. /* C/C++ on linux/aarch64 (39-bit VMA) 0000 0010 00 - 0100 0000 00: main binary 0100 0000 00 - 0800 0000 00: - 0800 0000 00 - 2000 0000 00: shadow memory 2000 0000 00 - 3100 0000 00: - 3100 0000 00 - 3400 0000 00: metainfo 3400 0000 00 - 5500 0000 00: - 5500 0000 00 - 5600 0000 00: main binary (PIE) 5600 0000 00 - 6000 0000 00: - 6000 0000 00 - 6200 0000 00: traces 6200 0000 00 - 7d00 0000 00: - 7c00 0000 00 - 7d00 0000 00: heap 7d00 0000 00 - 7fff ffff ff: modules and main thread stack */ struct Mapping39 { static const uptr kLoAppMemBeg = 0x0000001000ull; static const uptr kLoAppMemEnd = 0x0100000000ull; static const uptr kShadowBeg = 0x0800000000ull; static const uptr kShadowEnd = 0x2000000000ull; static const uptr kMetaShadowBeg = 0x3100000000ull; static const uptr kMetaShadowEnd = 0x3400000000ull; static const uptr kMidAppMemBeg = 0x5500000000ull; static const uptr kMidAppMemEnd = 0x5600000000ull; static const uptr kTraceMemBeg = 0x6000000000ull; static const uptr kTraceMemEnd = 0x6200000000ull; static const uptr kHeapMemBeg = 0x7c00000000ull; static const uptr kHeapMemEnd = 0x7d00000000ull; static const uptr kHiAppMemBeg = 0x7e00000000ull; static const uptr kHiAppMemEnd = 0x7fffffffffull; static const uptr kAppMemMsk = 0x7800000000ull; static const uptr kAppMemXor = 0x0200000000ull; static const uptr kVdsoBeg = 0x7f00000000ull; }; /* C/C++ on linux/aarch64 (42-bit VMA) 00000 0010 00 - 01000 0000 00: main binary 01000 0000 00 - 10000 0000 00: - 10000 0000 00 - 20000 0000 00: shadow memory 20000 0000 00 - 26000 0000 00: - 26000 0000 00 - 28000 0000 00: metainfo 28000 0000 00 - 2aa00 0000 00: - 2aa00 0000 00 - 2ab00 0000 00: main binary (PIE) 2ab00 0000 00 - 36200 0000 00: - 36200 0000 00 - 36240 0000 00: traces 36240 0000 00 - 3e000 0000 00: - 3e000 0000 00 - 3f000 0000 00: heap 3f000 0000 00 - 3ffff ffff ff: modules and main thread stack */ struct Mapping42 { static const uptr kLoAppMemBeg = 0x00000001000ull; static const uptr kLoAppMemEnd = 0x01000000000ull; static const uptr kShadowBeg = 0x10000000000ull; static const uptr kShadowEnd = 0x20000000000ull; static const uptr kMetaShadowBeg = 0x26000000000ull; static const uptr kMetaShadowEnd = 0x28000000000ull; static const uptr kMidAppMemBeg = 0x2aa00000000ull; static const uptr kMidAppMemEnd = 0x2ab00000000ull; static const uptr kTraceMemBeg = 0x36200000000ull; static const uptr kTraceMemEnd = 0x36400000000ull; static const uptr kHeapMemBeg = 0x3e000000000ull; static const uptr kHeapMemEnd = 0x3f000000000ull; static const uptr kHiAppMemBeg = 0x3f000000000ull; static const uptr kHiAppMemEnd = 0x3ffffffffffull; static const uptr kAppMemMsk = 0x3c000000000ull; static const uptr kAppMemXor = 0x04000000000ull; static const uptr kVdsoBeg = 0x37f00000000ull; }; struct Mapping48 { static const uptr kLoAppMemBeg = 0x0000000001000ull; static const uptr kLoAppMemEnd = 0x0000200000000ull; static const uptr kShadowBeg = 0x0002000000000ull; static const uptr kShadowEnd = 0x0004000000000ull; static const uptr kMetaShadowBeg = 0x0005000000000ull; static const uptr kMetaShadowEnd = 0x0006000000000ull; static const uptr kMidAppMemBeg = 0x0aaaa00000000ull; static const uptr kMidAppMemEnd = 0x0aaaf00000000ull; static const uptr kTraceMemBeg = 0x0f06000000000ull; static const uptr kTraceMemEnd = 0x0f06200000000ull; static const uptr kHeapMemBeg = 0x0ffff00000000ull; static const uptr kHeapMemEnd = 0x0ffff00000000ull; static const uptr kHiAppMemBeg = 0x0ffff00000000ull; static const uptr kHiAppMemEnd = 0x1000000000000ull; static const uptr kAppMemMsk = 0x0fff800000000ull; static const uptr kAppMemXor = 0x0000800000000ull; static const uptr kVdsoBeg = 0xffff000000000ull; }; // Indicates the runtime will define the memory regions at runtime. #define TSAN_RUNTIME_VMA 1 // Indicates that mapping defines a mid range memory segment. #define TSAN_MID_APP_RANGE 1 #elif defined(__powerpc64__) // PPC64 supports multiple VMA which leads to multiple address transformation // functions. To support these multiple VMAS transformations and mappings TSAN // runtime for PPC64 uses an external memory read (vmaSize) to select which // mapping to use. Although slower, it make a same instrumented binary run on // multiple kernels. /* C/C++ on linux/powerpc64 (44-bit VMA) 0000 0000 0100 - 0001 0000 0000: main binary 0001 0000 0000 - 0001 0000 0000: - 0001 0000 0000 - 0b00 0000 0000: shadow 0b00 0000 0000 - 0b00 0000 0000: - 0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects) 0d00 0000 0000 - 0d00 0000 0000: - 0d00 0000 0000 - 0f00 0000 0000: traces 0f00 0000 0000 - 0f00 0000 0000: - 0f00 0000 0000 - 0f50 0000 0000: heap 0f50 0000 0000 - 0f60 0000 0000: - 0f60 0000 0000 - 1000 0000 0000: modules and main thread stack */ struct Mapping44 { static const uptr kMetaShadowBeg = 0x0b0000000000ull; static const uptr kMetaShadowEnd = 0x0d0000000000ull; static const uptr kTraceMemBeg = 0x0d0000000000ull; static const uptr kTraceMemEnd = 0x0f0000000000ull; static const uptr kShadowBeg = 0x000100000000ull; static const uptr kShadowEnd = 0x0b0000000000ull; static const uptr kLoAppMemBeg = 0x000000000100ull; static const uptr kLoAppMemEnd = 0x000100000000ull; static const uptr kHeapMemBeg = 0x0f0000000000ull; static const uptr kHeapMemEnd = 0x0f5000000000ull; static const uptr kHiAppMemBeg = 0x0f6000000000ull; static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits static const uptr kAppMemMsk = 0x0f0000000000ull; static const uptr kAppMemXor = 0x002100000000ull; static const uptr kVdsoBeg = 0x3c0000000000000ull; }; /* C/C++ on linux/powerpc64 (46-bit VMA) 0000 0000 1000 - 0100 0000 0000: main binary 0100 0000 0000 - 0200 0000 0000: - 0100 0000 0000 - 1000 0000 0000: shadow 1000 0000 0000 - 1000 0000 0000: - 1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects) 2000 0000 0000 - 2000 0000 0000: - 2000 0000 0000 - 2200 0000 0000: traces 2200 0000 0000 - 3d00 0000 0000: - 3d00 0000 0000 - 3e00 0000 0000: heap 3e00 0000 0000 - 3e80 0000 0000: - 3e80 0000 0000 - 4000 0000 0000: modules and main thread stack */ struct Mapping46 { static const uptr kMetaShadowBeg = 0x100000000000ull; static const uptr kMetaShadowEnd = 0x200000000000ull; static const uptr kTraceMemBeg = 0x200000000000ull; static const uptr kTraceMemEnd = 0x220000000000ull; static const uptr kShadowBeg = 0x010000000000ull; static const uptr kShadowEnd = 0x100000000000ull; static const uptr kHeapMemBeg = 0x3d0000000000ull; static const uptr kHeapMemEnd = 0x3e0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x010000000000ull; static const uptr kHiAppMemBeg = 0x3e8000000000ull; static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits static const uptr kAppMemMsk = 0x3c0000000000ull; static const uptr kAppMemXor = 0x020000000000ull; static const uptr kVdsoBeg = 0x7800000000000000ull; }; // Indicates the runtime will define the memory regions at runtime. #define TSAN_RUNTIME_VMA 1 #endif #elif SANITIZER_GO && !SANITIZER_WINDOWS /* Go on linux, darwin and freebsd 0000 0000 1000 - 0000 1000 0000: executable 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - 2000 0000 0000 - 2380 0000 0000: shadow 2380 0000 0000 - 3000 0000 0000: - 3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) 4000 0000 0000 - 6000 0000 0000: - 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 8000 0000 0000: - */ struct Mapping { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x400000000000ull; static const uptr kTraceMemBeg = 0x600000000000ull; static const uptr kTraceMemEnd = 0x620000000000ull; static const uptr kShadowBeg = 0x200000000000ull; static const uptr kShadowEnd = 0x238000000000ull; static const uptr kAppMemBeg = 0x000000001000ull; static const uptr kAppMemEnd = 0x00e000000000ull; }; #elif SANITIZER_GO && SANITIZER_WINDOWS /* Go on windows 0000 0000 1000 - 0000 1000 0000: executable 0000 1000 0000 - 00f8 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 0100 0000 0000: - 0100 0000 0000 - 0500 0000 0000: shadow 0500 0000 0000 - 0560 0000 0000: - 0560 0000 0000 - 0760 0000 0000: traces 0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) 07d0 0000 0000 - 8000 0000 0000: - */ struct Mapping { static const uptr kMetaShadowBeg = 0x076000000000ull; static const uptr kMetaShadowEnd = 0x07d000000000ull; static const uptr kTraceMemBeg = 0x056000000000ull; static const uptr kTraceMemEnd = 0x076000000000ull; static const uptr kShadowBeg = 0x010000000000ull; static const uptr kShadowEnd = 0x050000000000ull; static const uptr kAppMemBeg = 0x000000001000ull; static const uptr kAppMemEnd = 0x00e000000000ull; }; #else # error "Unknown platform" #endif #ifdef TSAN_RUNTIME_VMA extern uptr vmaSize; #endif enum MappingType { MAPPING_LO_APP_BEG, MAPPING_LO_APP_END, MAPPING_HI_APP_BEG, MAPPING_HI_APP_END, #ifdef TSAN_MID_APP_RANGE MAPPING_MID_APP_BEG, MAPPING_MID_APP_END, #endif MAPPING_HEAP_BEG, MAPPING_HEAP_END, MAPPING_APP_BEG, MAPPING_APP_END, MAPPING_SHADOW_BEG, MAPPING_SHADOW_END, MAPPING_META_SHADOW_BEG, MAPPING_META_SHADOW_END, MAPPING_TRACE_BEG, MAPPING_TRACE_END, MAPPING_VDSO_BEG, }; template uptr MappingImpl(void) { switch (Type) { #if !SANITIZER_GO case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg; case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd; # ifdef TSAN_MID_APP_RANGE case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg; case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd; # endif case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg; case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd; case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg; case MAPPING_HEAP_END: return Mapping::kHeapMemEnd; case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg; #else case MAPPING_APP_BEG: return Mapping::kAppMemBeg; case MAPPING_APP_END: return Mapping::kAppMemEnd; #endif case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg; case MAPPING_SHADOW_END: return Mapping::kShadowEnd; case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg; case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd; case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg; case MAPPING_TRACE_END: return Mapping::kTraceMemEnd; } } template uptr MappingArchImpl(void) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return MappingImpl(); case 42: return MappingImpl(); case 48: return MappingImpl(); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MappingImpl(); else return MappingImpl(); DCHECK(0); #else return MappingImpl(); #endif } #if !SANITIZER_GO ALWAYS_INLINE uptr LoAppMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr LoAppMemEnd(void) { return MappingArchImpl(); } #ifdef TSAN_MID_APP_RANGE ALWAYS_INLINE uptr MidAppMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr MidAppMemEnd(void) { return MappingArchImpl(); } #endif ALWAYS_INLINE uptr HeapMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr HeapMemEnd(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr HiAppMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr HiAppMemEnd(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr VdsoBeg(void) { return MappingArchImpl(); } #else ALWAYS_INLINE uptr AppMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr AppMemEnd(void) { return MappingArchImpl(); } #endif static inline bool GetUserRegion(int i, uptr *start, uptr *end) { switch (i) { default: return false; #if !SANITIZER_GO case 0: *start = LoAppMemBeg(); *end = LoAppMemEnd(); return true; case 1: *start = HiAppMemBeg(); *end = HiAppMemEnd(); return true; case 2: *start = HeapMemBeg(); *end = HeapMemEnd(); return true; # ifdef TSAN_MID_APP_RANGE case 3: *start = MidAppMemBeg(); *end = MidAppMemEnd(); return true; # endif #else case 0: *start = AppMemBeg(); *end = AppMemEnd(); return true; #endif } } ALWAYS_INLINE uptr ShadowBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr ShadowEnd(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr MetaShadowBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr MetaShadowEnd(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr TraceMemBeg(void) { return MappingArchImpl(); } ALWAYS_INLINE uptr TraceMemEnd(void) { return MappingArchImpl(); } template bool IsAppMemImpl(uptr mem) { #if !SANITIZER_GO return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) || # ifdef TSAN_MID_APP_RANGE (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) || # endif (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) || (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd); #else return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd; #endif } ALWAYS_INLINE bool IsAppMem(uptr mem) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return IsAppMemImpl(mem); case 42: return IsAppMemImpl(mem); case 48: return IsAppMemImpl(mem); } DCHECK(0); return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsAppMemImpl(mem); else return IsAppMemImpl(mem); DCHECK(0); #else return IsAppMemImpl(mem); #endif } template bool IsShadowMemImpl(uptr mem) { return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd; } ALWAYS_INLINE bool IsShadowMem(uptr mem) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return IsShadowMemImpl(mem); case 42: return IsShadowMemImpl(mem); case 48: return IsShadowMemImpl(mem); } DCHECK(0); return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsShadowMemImpl(mem); else return IsShadowMemImpl(mem); DCHECK(0); #else return IsShadowMemImpl(mem); #endif } template bool IsMetaMemImpl(uptr mem) { return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd; } ALWAYS_INLINE bool IsMetaMem(uptr mem) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return IsMetaMemImpl(mem); case 42: return IsMetaMemImpl(mem); case 48: return IsMetaMemImpl(mem); } DCHECK(0); return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsMetaMemImpl(mem); else return IsMetaMemImpl(mem); DCHECK(0); #else return IsMetaMemImpl(mem); #endif } template uptr MemToShadowImpl(uptr x) { DCHECK(IsAppMem(x)); #if !SANITIZER_GO return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1))) ^ Mapping::kAppMemXor) * kShadowCnt; #else # ifndef SANITIZER_WINDOWS return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg; # else return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg; # endif #endif } ALWAYS_INLINE uptr MemToShadow(uptr x) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return MemToShadowImpl(x); case 42: return MemToShadowImpl(x); case 48: return MemToShadowImpl(x); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MemToShadowImpl(x); else return MemToShadowImpl(x); DCHECK(0); #else return MemToShadowImpl(x); #endif } template u32 *MemToMetaImpl(uptr x) { DCHECK(IsAppMem(x)); #if !SANITIZER_GO return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) / kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg); #else # ifndef SANITIZER_WINDOWS return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg); # else return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg); # endif #endif } ALWAYS_INLINE u32 *MemToMeta(uptr x) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return MemToMetaImpl(x); case 42: return MemToMetaImpl(x); case 48: return MemToMetaImpl(x); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MemToMetaImpl(x); else return MemToMetaImpl(x); DCHECK(0); #else return MemToMetaImpl(x); #endif } template uptr ShadowToMemImpl(uptr s) { DCHECK(IsShadowMem(s)); #if !SANITIZER_GO // The shadow mapping is non-linear and we've lost some bits, so we don't have // an easy way to restore the original app address. But the mapping is a // bijection, so we try to restore the address as belonging to low/mid/high // range consecutively and see if shadow->app->shadow mapping gives us the // same address. uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor; if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd && MemToShadow(p) == s) return p; # ifdef TSAN_MID_APP_RANGE p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) + (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk); if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd && MemToShadow(p) == s) return p; # endif return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk; #else // #if !SANITIZER_GO # ifndef SANITIZER_WINDOWS return (s & ~Mapping::kShadowBeg) / kShadowCnt; # else return (s - Mapping::kShadowBeg) / kShadowCnt; # endif // SANITIZER_WINDOWS #endif } ALWAYS_INLINE uptr ShadowToMem(uptr s) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return ShadowToMemImpl(s); case 42: return ShadowToMemImpl(s); case 48: return ShadowToMemImpl(s); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return ShadowToMemImpl(s); else return ShadowToMemImpl(s); DCHECK(0); #else return ShadowToMemImpl(s); #endif } // The additional page is to catch shadow stack overflow as paging fault. // Windows wants 64K alignment for mmaps. const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1); template uptr GetThreadTraceImpl(int tid) { uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize; DCHECK_LT(p, Mapping::kTraceMemEnd); return p; } ALWAYS_INLINE uptr GetThreadTrace(int tid) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return GetThreadTraceImpl(tid); case 42: return GetThreadTraceImpl(tid); case 48: return GetThreadTraceImpl(tid); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return GetThreadTraceImpl(tid); else return GetThreadTraceImpl(tid); DCHECK(0); #else return GetThreadTraceImpl(tid); #endif } template uptr GetThreadTraceHeaderImpl(int tid) { uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize + kTraceSize * sizeof(Event); DCHECK_LT(p, Mapping::kTraceMemEnd); return p; } ALWAYS_INLINE uptr GetThreadTraceHeader(int tid) { -#ifdef __aarch64__ +#if defined(__aarch64__) && !defined(__APPLE__) switch (vmaSize) { case 39: return GetThreadTraceHeaderImpl(tid); case 42: return GetThreadTraceHeaderImpl(tid); case 48: return GetThreadTraceHeaderImpl(tid); } DCHECK(0); return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return GetThreadTraceHeaderImpl(tid); else return GetThreadTraceHeaderImpl(tid); DCHECK(0); #else return GetThreadTraceHeaderImpl(tid); #endif } void InitializePlatform(); void InitializePlatformEarly(); void CheckAndProtect(); void InitializeShadowMemoryPlatform(); void FlushShadowMemory(); void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); int ExtractResolvFDs(void *state, int *fds, int nfd); int ExtractRecvmsgFDs(void *msg, int *fds, int nfd); void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size); int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg); void DestroyThreadState(); } // namespace __tsan #endif // TSAN_PLATFORM_H Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc (revision 320961) @@ -1,407 +1,404 @@ //===-- tsan_platform_linux.cc --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Linux- and FreeBSD-specific code. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_flags.h" #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #include #include #endif #include #include #include #include #include #include #include -#include #include #include #if SANITIZER_LINUX #define __need_res_state #include #endif #ifdef sa_handler # undef sa_handler #endif #ifdef sa_sigaction # undef sa_sigaction #endif #if SANITIZER_FREEBSD extern "C" void *__libc_stack_end; void *__libc_stack_end = 0; #endif #if SANITIZER_LINUX && defined(__aarch64__) void InitializeGuardPtr() __attribute__((visibility("hidden"))); #endif namespace __tsan { #ifdef TSAN_RUNTIME_VMA // Runtime detected VMA size. uptr vmaSize; #endif enum { MemTotal = 0, MemShadow = 1, MemMeta = 2, MemFile = 3, MemMmap = 4, MemTrace = 5, MemHeap = 6, MemOther = 7, MemCount = 8, }; void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem, uptr stats_size) { mem[MemTotal] += rss; if (p >= ShadowBeg() && p < ShadowEnd()) mem[MemShadow] += rss; else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) mem[MemMeta] += rss; #if !SANITIZER_GO else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #else else if (p >= AppMemBeg() && p < AppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #endif else if (p >= TraceMemBeg() && p < TraceMemEnd()) mem[MemTrace] += rss; else mem[MemOther] += rss; } void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { uptr mem[MemCount]; internal_memset(mem, 0, sizeof(mem[0]) * MemCount); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); StackDepotStats *stacks = StackDepotGetStats(); internal_snprintf(buf, buf_size, "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20, stacks->allocated >> 20, stacks->n_uniq_ids, nlive, nthread); } #if SANITIZER_LINUX void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, void *argument) { ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); } #endif void FlushShadowMemory() { #if SANITIZER_LINUX StopTheWorld(FlushShadowMemoryCallback, 0); #endif } #if !SANITIZER_GO // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { // First create temp file. const char *tmpdir = GetEnv("TMPDIR"); if (tmpdir == 0) tmpdir = GetEnv("TEST_TMPDIR"); #ifdef P_tmpdir if (tmpdir == 0) tmpdir = P_tmpdir; #endif if (tmpdir == 0) return; char name[256]; internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", tmpdir, (int)internal_getpid()); uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); if (internal_iserror(openrv)) return; internal_unlink(name); // Unlink it now, so that we can reuse the buffer. fd_t fd = openrv; // Fill the file with kShadowRodata. const uptr kMarkerSize = 512 * 1024 / sizeof(u64); InternalScopedBuffer marker(kMarkerSize); // volatile to prevent insertion of memset for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) *p = kShadowRodata; internal_write(fd, marker.data(), marker.size()); // Map the file into memory. uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); if (internal_iserror(page)) { internal_close(fd); return; } // Map the file into shadow of .rodata sections. MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr start, end, offset, prot; // Reusing the buffer 'name'. - while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { - if (name[0] != 0 && name[0] != '[' - && (prot & MemoryMappingLayout::kProtectionRead) - && (prot & MemoryMappingLayout::kProtectionExecute) - && !(prot & MemoryMappingLayout::kProtectionWrite) - && IsAppMem(start)) { + MemoryMappedSegment segment(name, ARRAY_SIZE(name)); + while (proc_maps.Next(&segment)) { + if (segment.filename[0] != 0 && segment.filename[0] != '[' && + segment.IsReadable() && segment.IsExecutable() && + !segment.IsWritable() && IsAppMem(segment.start)) { // Assume it's .rodata - char *shadow_start = (char*)MemToShadow(start); - char *shadow_end = (char*)MemToShadow(end); + char *shadow_start = (char *)MemToShadow(segment.start); + char *shadow_end = (char *)MemToShadow(segment.end); for (char *p = shadow_start; p < shadow_end; p += marker.size()) { internal_mmap(p, Min(marker.size(), shadow_end - p), PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); } } } internal_close(fd); } void InitializeShadowMemoryPlatform() { MapRodata(); } #endif // #if !SANITIZER_GO void InitializePlatformEarly() { #ifdef TSAN_RUNTIME_VMA vmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); #if defined(__aarch64__) if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize); Die(); } #elif defined(__powerpc64__) if (vmaSize != 44 && vmaSize != 46) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize); Die(); } #endif #endif } void InitializePlatform() { DisableCoreDumperIfNecessary(); // Go maps shadow memory lazily and works fine with limited address space. // Unlimited stack is not a problem as well, because the executable // is not compiled with -pie. if (!SANITIZER_GO) { bool reexec = false; // TSan doesn't play well with unlimited stack size (as stack // overlaps with shadow memory). If we detect unlimited stack size, // we re-exec the program with limited stack size as a best effort. if (StackSizeIsUnlimited()) { const uptr kMaxStackSize = 32 * 1024 * 1024; VReport(1, "Program is run with unlimited stack size, which wouldn't " "work with ThreadSanitizer.\n" "Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize); SetStackSizeLimitInBytes(kMaxStackSize); reexec = true; } if (!AddressSpaceIsUnlimited()) { Report("WARNING: Program is run with limited virtual address space," " which wouldn't work with ThreadSanitizer.\n"); Report("Re-execing with unlimited virtual address space.\n"); SetAddressSpaceUnlimited(); reexec = true; } #if SANITIZER_LINUX && defined(__aarch64__) // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in // linux kernel, the random gap between stack and mapped area is increased // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover // this big range, we should disable randomized virtual space on aarch64. int old_personality = personality(0xffffffff); if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { VReport(1, "WARNING: Program is run with randomized virtual address " "space, which wouldn't work with ThreadSanitizer.\n" "Re-execing with fixed virtual address space.\n"); CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); reexec = true; } // Initialize the guard pointer used in {sig}{set,long}jump. InitializeGuardPtr(); #endif if (reexec) ReExec(); } #if !SANITIZER_GO CheckAndProtect(); InitTlsSize(); #endif } #if !SANITIZER_GO // Extract file descriptors passed to glibc internal __res_iclose function. // This is required to properly "close" the fds, because we do not see internal // closes within glibc. The code is a pure hack. int ExtractResolvFDs(void *state, int *fds, int nfd) { #if SANITIZER_LINUX && !SANITIZER_ANDROID int cnt = 0; __res_state *statp = (__res_state*)state; for (int i = 0; i < MAXNS && cnt < nfd; i++) { if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) fds[cnt++] = statp->_u._ext.nssocks[i]; } return cnt; #else return 0; #endif } // Extract file descriptors passed via UNIX domain sockets. // This is requried to properly handle "open" of these fds. // see 'man recvmsg' and 'man 3 cmsg'. int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { int res = 0; msghdr *msg = (msghdr*)msgp; struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) continue; int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); for (int i = 0; i < n; i++) { fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; if (res == nfd) return res; } } return res; } void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { // Check that the thr object is in tls; const uptr thr_beg = (uptr)thr; const uptr thr_end = (uptr)thr + sizeof(*thr); CHECK_GE(thr_beg, tls_addr); CHECK_LE(thr_beg, tls_addr + tls_size); CHECK_GE(thr_end, tls_addr); CHECK_LE(thr_end, tls_addr + tls_size); // Since the thr object is huge, skip it. MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr); MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end, tls_addr + tls_size - thr_end); } // Note: this function runs with async signals enabled, // so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { // pthread_cleanup_push/pop are hardcore macros mess. // We can't intercept nor call them w/o including pthread.h. int res; pthread_cleanup_push(cleanup, arg); res = fn(c, m, abstime); pthread_cleanup_pop(0); return res; } #endif #if !SANITIZER_GO void ReplaceSystemMalloc() { } #endif #if !SANITIZER_GO #if SANITIZER_ANDROID // On Android, one thread can call intercepted functions after // DestroyThreadState(), so add a fake thread state for "dead" threads. static ThreadState *dead_thread_state = nullptr; ThreadState *cur_thread() { ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { thr = reinterpret_cast(MmapOrDie(sizeof(ThreadState), "ThreadState")); *get_android_tls_ptr() = reinterpret_cast(thr); if (dead_thread_state == nullptr) { dead_thread_state = reinterpret_cast( MmapOrDie(sizeof(ThreadState), "ThreadState")); dead_thread_state->fast_state.SetIgnoreBit(); dead_thread_state->ignore_interceptors = 1; dead_thread_state->is_dead = true; *const_cast(&dead_thread_state->tid) = -1; CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), PROT_READ)); } } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } return thr; } void cur_thread_finalize() { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr != dead_thread_state) { *get_android_tls_ptr() = reinterpret_cast(dead_thread_state); UnmapOrDie(thr, sizeof(ThreadState)); } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } #endif // SANITIZER_ANDROID #endif // if !SANITIZER_GO } // namespace __tsan #endif // SANITIZER_LINUX || SANITIZER_FREEBSD Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_mac.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_mac.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_mac.cc (revision 320961) @@ -1,289 +1,297 @@ //===-- tsan_platform_mac.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Mac-specific code. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_MAC #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_flags.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace __tsan { #if !SANITIZER_GO static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) { atomic_uintptr_t *a = (atomic_uintptr_t *)dst; void *val = (void *)atomic_load_relaxed(a); atomic_signal_fence(memory_order_acquire); // Turns the previous load into // acquire wrt signals. if (UNLIKELY(val == nullptr)) { val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); CHECK(val); void *cmp = nullptr; if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val, memory_order_acq_rel)) { internal_munmap(val, size); val = cmp; } } return val; } // On OS X, accessing TLVs via __thread or manually by using pthread_key_* is // problematic, because there are several places where interceptors are called // when TLVs are not accessible (early process startup, thread cleanup, ...). // The following provides a "poor man's TLV" implementation, where we use the // shadow memory of the pointer returned by pthread_self() to store a pointer to // the ThreadState object. The main thread's ThreadState is stored separately // in a static variable, because we need to access it even before the // shadow memory is set up. static uptr main_thread_identity = 0; ALIGNED(64) static char main_thread_state[sizeof(ThreadState)]; ThreadState **cur_thread_location() { ThreadState **thread_identity = (ThreadState **)pthread_self(); return ((uptr)thread_identity == main_thread_identity) ? nullptr : thread_identity; } ThreadState *cur_thread() { ThreadState **thr_state_loc = cur_thread_location(); if (thr_state_loc == nullptr || main_thread_identity == 0) { return (ThreadState *)&main_thread_state; } ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc); ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate( (uptr *)fake_tls, sizeof(ThreadState)); return thr; } // TODO(kuba.brecka): This is not async-signal-safe. In particular, we call // munmap first and then clear `fake_tls`; if we receive a signal in between, // handler will try to access the unmapped ThreadState. void cur_thread_finalize() { ThreadState **thr_state_loc = cur_thread_location(); if (thr_state_loc == nullptr) { // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to // exit the main thread. Let's keep the main thread's ThreadState. return; } ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc); internal_munmap(*fake_tls, sizeof(ThreadState)); *fake_tls = nullptr; } #endif void FlushShadowMemory() { } static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) { vm_address_t address = start; vm_address_t end_address = end; uptr resident_pages = 0; uptr dirty_pages = 0; while (address < end_address) { vm_size_t vm_region_size; mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT; vm_region_extended_info_data_t vm_region_info; mach_port_t object_name; kern_return_t ret = vm_region_64( mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&vm_region_info, &count, &object_name); if (ret != KERN_SUCCESS) break; resident_pages += vm_region_info.pages_resident; dirty_pages += vm_region_info.pages_dirtied; address += vm_region_size; } *res = resident_pages * GetPageSizeCached(); *dirty = dirty_pages * GetPageSizeCached(); } void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { uptr shadow_res, shadow_dirty; uptr meta_res, meta_dirty; uptr trace_res, trace_dirty; RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty); RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty); RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty); #if !SANITIZER_GO uptr low_res, low_dirty; uptr high_res, high_dirty; uptr heap_res, heap_dirty; RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty); RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty); RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty); #else // !SANITIZER_GO uptr app_res, app_dirty; RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty); #endif StackDepotStats *stacks = StackDepotGetStats(); internal_snprintf(buf, buf_size, "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" #if !SANITIZER_GO "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" #else // !SANITIZER_GO "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" #endif "stacks: %ld unique IDs, %ld kB allocated\n" "threads: %ld total, %ld live\n" "------------------------------\n", ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, #if !SANITIZER_GO LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024, #else // !SANITIZER_GO AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024, #endif stacks->n_uniq_ids, stacks->allocated / 1024, nthread, nlive); } #if !SANITIZER_GO void InitializeShadowMemoryPlatform() { } // On OS X, GCD worker threads are created without a call to pthread_create. We // need to properly register these threads with ThreadCreate and ThreadStart. // These threads don't have a parent thread, as they are created "spuriously". // We're using a libpthread API that notifies us about a newly created thread. // The `thread == pthread_self()` check indicates this is actually a worker // thread. If it's just a regular thread, this hook is called on the parent // thread. typedef void (*pthread_introspection_hook_t)(unsigned int event, pthread_t thread, void *addr, size_t size); extern "C" pthread_introspection_hook_t pthread_introspection_hook_install( pthread_introspection_hook_t hook); static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1; static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3; static pthread_introspection_hook_t prev_pthread_introspection_hook; static void my_pthread_introspection_hook(unsigned int event, pthread_t thread, void *addr, size_t size) { if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) { if (thread == pthread_self()) { // The current thread is a newly created GCD worker thread. ThreadState *thr = cur_thread(); Processor *proc = ProcCreate(); ProcWire(proc, thr); ThreadState *parent_thread_state = nullptr; // No parent. int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true); CHECK_NE(tid, 0); ThreadStart(thr, tid, GetTid(), /*workerthread*/ true); } } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) { if (thread == pthread_self()) { ThreadState *thr = cur_thread(); if (thr->tctx) { DestroyThreadState(); } } } if (prev_pthread_introspection_hook != nullptr) prev_pthread_introspection_hook(event, thread, addr, size); } #endif void InitializePlatformEarly() { +#if defined(__aarch64__) + uptr max_vm = GetMaxVirtualAddress() + 1; + if (max_vm != Mapping::kHiAppMemEnd) { + Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n", + max_vm, Mapping::kHiAppMemEnd); + Die(); + } +#endif } void InitializePlatform() { DisableCoreDumperIfNecessary(); #if !SANITIZER_GO CheckAndProtect(); CHECK_EQ(main_thread_identity, 0); main_thread_identity = (uptr)pthread_self(); prev_pthread_introspection_hook = pthread_introspection_hook_install(&my_pthread_introspection_hook); #endif } #if !SANITIZER_GO void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { // The pointer to the ThreadState object is stored in the shadow memory // of the tls. uptr tls_end = tls_addr + tls_size; ThreadState **thr_state_loc = cur_thread_location(); if (thr_state_loc == nullptr) { MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size); } else { uptr thr_state_start = (uptr)thr_state_loc; uptr thr_state_end = thr_state_start + sizeof(uptr); CHECK_GE(thr_state_start, tls_addr); CHECK_LE(thr_state_start, tls_addr + tls_size); CHECK_GE(thr_state_end, tls_addr); CHECK_LE(thr_state_end, tls_addr + tls_size); MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_state_start - tls_addr); MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end, tls_end - thr_state_end); } } #endif #if !SANITIZER_GO // Note: this function runs with async signals enabled, // so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { // pthread_cleanup_push/pop are hardcore macros mess. // We can't intercept nor call them w/o including pthread.h. int res; pthread_cleanup_push(cleanup, arg); res = fn(c, m, abstime); pthread_cleanup_pop(0); return res; } #endif } // namespace __tsan #endif // SANITIZER_MAC Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_posix.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_posix.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_posix.cc (revision 320961) @@ -1,151 +1,158 @@ //===-- tsan_platform_posix.cc --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // POSIX-specific code. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_POSIX #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "tsan_platform.h" #include "tsan_rtl.h" namespace __tsan { #if !SANITIZER_GO void InitializeShadowMemory() { // Map memory shadow. uptr shadow = (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow"); if (shadow != ShadowBeg()) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: Make sure to compile with -fPIE and " "to link with -pie (%p, %p).\n", shadow, ShadowBeg()); Die(); } // This memory range is used for thread stacks and large user mmaps. // Frequently a thread uses only a small part of stack and similarly // a program uses a small part of large mmap. On some programs // we see 20% memory usage reduction without huge pages for this range. // FIXME: don't use constants here. #if defined(__x86_64__) const uptr kMadviseRangeBeg = 0x7f0000000000ull; const uptr kMadviseRangeSize = 0x010000000000ull; #elif defined(__mips64) const uptr kMadviseRangeBeg = 0xff00000000ull; const uptr kMadviseRangeSize = 0x0100000000ull; +#elif defined(__aarch64__) && defined(__APPLE__) + uptr kMadviseRangeBeg = LoAppMemBeg(); + uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg(); #elif defined(__aarch64__) uptr kMadviseRangeBeg = 0; uptr kMadviseRangeSize = 0; if (vmaSize == 39) { kMadviseRangeBeg = 0x7d00000000ull; kMadviseRangeSize = 0x0300000000ull; } else if (vmaSize == 42) { kMadviseRangeBeg = 0x3f000000000ull; kMadviseRangeSize = 0x01000000000ull; } else { DCHECK(0); } #elif defined(__powerpc64__) uptr kMadviseRangeBeg = 0; uptr kMadviseRangeSize = 0; if (vmaSize == 44) { kMadviseRangeBeg = 0x0f60000000ull; kMadviseRangeSize = 0x0010000000ull; } else if (vmaSize == 46) { kMadviseRangeBeg = 0x3f0000000000ull; kMadviseRangeSize = 0x010000000000ull; } else { DCHECK(0); } #endif NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), kMadviseRangeSize * kShadowMultiplier); // Meta shadow is compressing and we don't flush it, // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. // On one program it reduces memory consumption from 5GB to 2.5GB. NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg()); if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); DPrintf("memory shadow: %zx-%zx (%zuGB)\n", ShadowBeg(), ShadowEnd(), (ShadowEnd() - ShadowBeg()) >> 30); // Map meta shadow. uptr meta_size = MetaShadowEnd() - MetaShadowBeg(); uptr meta = (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow"); if (meta != MetaShadowBeg()) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: Make sure to compile with -fPIE and " "to link with -pie (%p, %p).\n", meta, MetaShadowBeg()); Die(); } if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(meta, meta_size); DPrintf("meta shadow: %zx-%zx (%zuGB)\n", meta, meta + meta_size, meta_size >> 30); InitializeShadowMemoryPlatform(); } static void ProtectRange(uptr beg, uptr end) { CHECK_LE(beg, end); if (beg == end) return; if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) { Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); Printf("FATAL: Make sure you are not using unlimited stack\n"); Die(); } } void CheckAndProtect() { // Ensure that the binary is indeed compiled with -pie. MemoryMappingLayout proc_maps(true); - uptr p, end, prot; - while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) { - if (IsAppMem(p)) + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + if (IsAppMem(segment.start)) continue; + if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue; + if (segment.protection == 0) // Zero page or mprotected. continue; - if (p >= HeapMemEnd() && - p < HeapEnd()) - continue; - if (prot == 0) // Zero page or mprotected. - continue; - if (p >= VdsoBeg()) // vdso + if (segment.start >= VdsoBeg()) // vdso break; - Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); + Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", + segment.start, segment.end); Die(); } +#if defined(__aarch64__) && defined(__APPLE__) + ProtectRange(HeapMemEnd(), ShadowBeg()); + ProtectRange(ShadowEnd(), MetaShadowBeg()); + ProtectRange(MetaShadowEnd(), TraceMemBeg()); +#else ProtectRange(LoAppMemEnd(), ShadowBeg()); ProtectRange(ShadowEnd(), MetaShadowBeg()); #ifdef TSAN_MID_APP_RANGE ProtectRange(MetaShadowEnd(), MidAppMemBeg()); ProtectRange(MidAppMemEnd(), TraceMemBeg()); #else ProtectRange(MetaShadowEnd(), TraceMemBeg()); #endif // Memory for traces is mapped lazily in MapThreadTrace. // Protect the whole range for now, so that user does not map something here. ProtectRange(TraceMemBeg(), TraceMemEnd()); ProtectRange(TraceMemEnd(), HeapMemBeg()); ProtectRange(HeapEnd(), HiAppMemBeg()); +#endif } #endif } // namespace __tsan #endif // SANITIZER_POSIX Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc (revision 320961) @@ -1,1054 +1,1055 @@ //===-- tsan_rtl.cc -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Main file (entry points) for the TSan run-time. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "tsan_defs.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_suppressions.h" #include "tsan_symbolize.h" #include "ubsan/ubsan_init.h" #ifdef __SSE3__ // transitively includes , // and it's prohibited to include std headers into tsan runtime. // So we do this dirty trick. #define _MM_MALLOC_H_INCLUDED #define __MM_MALLOC_H #include typedef __m128i m128; #endif volatile int __tsan_resumed = 0; extern "C" void __tsan_resume() { __tsan_resumed = 1; } namespace __tsan { #if !SANITIZER_GO && !SANITIZER_MAC __attribute__((tls_model("initial-exec"))) THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); #endif static char ctx_placeholder[sizeof(Context)] ALIGNED(64); Context *ctx; // Can be overriden by a front-end. #ifdef TSAN_EXTERNAL_HOOKS bool OnFinalize(bool failed); void OnInitialize(); #else SANITIZER_WEAK_CXX_DEFAULT_IMPL bool OnFinalize(bool failed) { return failed; } SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnInitialize() {} #endif static char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ThreadContextBase *CreateThreadContext(u32 tid) { // Map thread trace when context is created. char name[50]; internal_snprintf(name, sizeof(name), "trace %u", tid); MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); const uptr hdr = GetThreadTraceHeader(tid); internal_snprintf(name, sizeof(name), "trace header %u", tid); MapThreadTrace(hdr, sizeof(Trace), name); new((void*)hdr) Trace(); // We are going to use only a small part of the trace with the default // value of history_size. However, the constructor writes to the whole trace. // Unmap the unused part. uptr hdr_end = hdr + sizeof(Trace); hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); hdr_end = RoundUp(hdr_end, GetPageSizeCached()); if (hdr_end < hdr + sizeof(Trace)) UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); return new(mem) ThreadContext(tid); } #if !SANITIZER_GO static const u32 kThreadQuarantineSize = 16; #else static const u32 kThreadQuarantineSize = 64; #endif Context::Context() : initialized() , report_mtx(MutexTypeReport, StatMtxReport) , nreported() , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) - , fired_suppressions(8) { + , fired_suppressions(8) + , clock_alloc("clock allocator") { } // The objects are allocated in TLS, so one may rely on zero-initialization. ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, unsigned reuse_count, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size) : fast_state(tid, epoch) // Do not touch these, rely on zero initialization, // they may be accessed before the ctor. // , ignore_reads_and_writes() // , ignore_interceptors() , clock(tid, reuse_count) #if !SANITIZER_GO , jmp_bufs(MBlockJmpBuf) #endif , tid(tid) , unique_id(unique_id) , stk_addr(stk_addr) , stk_size(stk_size) , tls_addr(tls_addr) , tls_size(tls_size) #if !SANITIZER_GO , last_sleep_clock(tid) #endif { } #if !SANITIZER_GO static void MemoryProfiler(Context *ctx, fd_t fd, int i) { uptr n_threads; uptr n_running_threads; ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); InternalScopedBuffer buf(4096); WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } static void BackgroundThread(void *arg) { // This is a non-initialized non-user thread, nothing to see here. // We don't use ScopedIgnoreInterceptors, because we want ignores to be // enabled even when the thread function exits (e.g. during pthread thread // shutdown code). cur_thread()->ignore_interceptors++; const u64 kMs2Ns = 1000 * 1000; fd_t mprof_fd = kInvalidFd; if (flags()->profile_memory && flags()->profile_memory[0]) { if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { mprof_fd = 1; } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { mprof_fd = 2; } else { InternalScopedString filename(kMaxPathLength); filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); fd_t fd = OpenFile(filename.data(), WrOnly); if (fd == kInvalidFd) { Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", &filename[0]); } else { mprof_fd = fd; } } } u64 last_flush = NanoTime(); uptr last_rss = 0; for (int i = 0; atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; i++) { SleepForMillis(100); u64 now = NanoTime(); // Flush memory if requested. if (flags()->flush_memory_ms > 0) { if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); FlushShadowMemory(); last_flush = NanoTime(); } } // GetRSS can be expensive on huge programs, so don't do it every 100ms. if (flags()->memory_limit_mb > 0) { uptr rss = GetRSS(); uptr limit = uptr(flags()->memory_limit_mb) << 20; VPrintf(1, "ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); if (2 * rss > limit + last_rss) { VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); FlushShadowMemory(); rss = GetRSS(); VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); } last_rss = rss; } // Write memory profile if requested. if (mprof_fd != kInvalidFd) MemoryProfiler(ctx, mprof_fd, i); // Flush symbolizer cache if requested. if (flags()->flush_symbolizer_ms > 0) { u64 last = atomic_load(&ctx->last_symbolize_time_ns, memory_order_relaxed); if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { Lock l(&ctx->report_mtx); SpinMutexLock l2(&CommonSanitizerReportMutex); SymbolizeFlush(); atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); } } } } static void StartBackgroundThread() { ctx->background_thread = internal_start_thread(&BackgroundThread, 0); } #ifndef __mips__ static void StopBackgroundThread() { atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); internal_join_thread(ctx->background_thread); ctx->background_thread = 0; } #endif #endif void DontNeedShadowFor(uptr addr, uptr size) { ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); } void MapShadow(uptr addr, uptr size) { // Global data is not 64K aligned, but there are no adjacent mappings, // so we can get away with unaligned mapping. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment const uptr kPageSize = GetPageSizeCached(); uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"); // Meta shadow is 2:1, so tread carefully. static bool data_mapped = false; static uptr mapped_meta_end = 0; uptr meta_begin = (uptr)MemToMeta(addr); uptr meta_end = (uptr)MemToMeta(addr + size); meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (!data_mapped) { // First call maps data+bss. data_mapped = true; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); } else { // Mapping continous heap. // Windows wants 64K alignment. meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (meta_end <= mapped_meta_end) return; if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); mapped_meta_end = meta_end; } VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", addr, addr+size, meta_begin, meta_end); } void MapThreadTrace(uptr addr, uptr size, const char *name) { DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); CHECK_GE(addr, TraceMemBeg()); CHECK_LE(addr + size, TraceMemEnd()); CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); if (addr1 != addr) { Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", addr, size, addr1); Die(); } } static void CheckShadowMapping() { uptr beg, end; for (int i = 0; GetUserRegion(i, &beg, &end); i++) { // Skip cases for empty regions (heap definition for architectures that // do not use 64-bit allocator). if (beg == end) continue; VPrintf(3, "checking shadow region %p-%p\n", beg, end); uptr prev = 0; for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { const uptr p = RoundDown(p0 + x, kShadowCell); if (p < beg || p >= end) continue; const uptr s = MemToShadow(p); const uptr m = (uptr)MemToMeta(p); VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); CHECK(IsAppMem(p)); CHECK(IsShadowMem(s)); CHECK_EQ(p, ShadowToMem(s)); CHECK(IsMetaMem(m)); if (prev) { // Ensure that shadow and meta mappings are linear within a single // user range. Lots of code that processes memory ranges assumes it. const uptr prev_s = MemToShadow(prev); const uptr prev_m = (uptr)MemToMeta(prev); CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); CHECK_EQ((m - prev_m) / kMetaShadowSize, (p - prev) / kMetaShadowCell); } prev = p; } } } } void Initialize(ThreadState *thr) { // Thread safe because done before all threads exist. static bool is_initialized = false; if (is_initialized) return; is_initialized = true; // We are not ready to handle interceptors yet. ScopedIgnoreInterceptors ignore; SanitizerToolName = "ThreadSanitizer"; // Install tool-specific callbacks in sanitizer_common. SetCheckFailedCallback(TsanCheckFailed); ctx = new(ctx_placeholder) Context; const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"); CacheBinaryName(); InitializeFlags(&ctx->flags, options); AvoidCVE_2016_2143(); InitializePlatformEarly(); #if !SANITIZER_GO // Re-exec ourselves if we need to set additional env or command line args. MaybeReexec(); InitializeAllocator(); ReplaceSystemMalloc(); #endif if (common_flags()->detect_deadlocks) ctx->dd = DDetector::Create(flags()); Processor *proc = ProcCreate(); ProcWire(proc, thr); InitializeInterceptors(); CheckShadowMapping(); InitializePlatform(); InitializeMutex(); InitializeDynamicAnnotations(); #if !SANITIZER_GO InitializeShadowMemory(); InitializeAllocatorLate(); #endif // Setup correct file descriptor for error reports. __sanitizer_set_report_path(common_flags()->log_path); InitializeSuppressions(); #if !SANITIZER_GO InitializeLibIgnore(); Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); // On MIPS, TSan initialization is run before // __pthread_initialize_minimal_internal() is finished, so we can not spawn // new threads. #ifndef __mips__ StartBackgroundThread(); SetSandboxingCallback(StopBackgroundThread); #endif #endif VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", (int)internal_getpid()); // Initialize thread 0. int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); #if TSAN_CONTAINS_UBSAN __ubsan::InitAsPlugin(); #endif ctx->initialized = true; #if !SANITIZER_GO Symbolizer::LateInitialize(); #endif if (flags()->stop_on_start) { Printf("ThreadSanitizer is suspended at startup (pid %d)." " Call __tsan_resume().\n", (int)internal_getpid()); while (__tsan_resumed == 0) {} } OnInitialize(); } int Finalize(ThreadState *thr) { bool failed = false; if (common_flags()->print_module_map == 1) PrintModuleMap(); if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) SleepForMillis(flags()->atexit_sleep_ms); // Wait for pending reports. ctx->report_mtx.Lock(); CommonSanitizerReportMutex.Lock(); CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); #if !SANITIZER_GO if (Verbosity()) AllocatorPrintStats(); #endif ThreadFinalize(thr); if (ctx->nreported) { failed = true; #if !SANITIZER_GO Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); #else Printf("Found %d data race(s)\n", ctx->nreported); #endif } if (ctx->nmissed_expected) { failed = true; Printf("ThreadSanitizer: missed %d expected races\n", ctx->nmissed_expected); } if (common_flags()->print_suppressions) PrintMatchedSuppressions(); #if !SANITIZER_GO if (flags()->print_benign) PrintMatchedBenignRaces(); #endif failed = OnFinalize(failed); #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); StatOutput(ctx->stat); #endif return failed ? common_flags()->exitcode : 0; } #if !SANITIZER_GO void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); } void ForkParentAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } void ForkChildAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); uptr nthread = 0; ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); VPrintf(1, "ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread); if (nthread == 1) { StartBackgroundThread(); } else { // We've just forked a multi-threaded process. We cannot reasonably function // after that (some mutexes may be locked before fork). So just enable // ignores for everything in the hope that we will exec soon. ctx->after_multithreaded_fork = true; thr->ignore_interceptors++; ThreadIgnoreBegin(thr, pc); ThreadIgnoreSyncBegin(thr, pc); } } #endif #if SANITIZER_GO NOINLINE void GrowShadowStack(ThreadState *thr) { const int sz = thr->shadow_stack_end - thr->shadow_stack; const int newsz = 2 * sz; uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, newsz * sizeof(uptr)); internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); internal_free(thr->shadow_stack); thr->shadow_stack = newstack; thr->shadow_stack_pos = newstack + sz; thr->shadow_stack_end = newstack + newsz; } #endif u32 CurrentStackId(ThreadState *thr, uptr pc) { if (!thr->is_inited) // May happen during bootstrap. return 0; if (pc != 0) { #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) GrowShadowStack(thr); #endif thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } u32 id = StackDepotPut( StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); if (pc != 0) thr->shadow_stack_pos--; return id; } void TraceSwitch(ThreadState *thr) { thr->nomalloc++; Trace *thr_trace = ThreadTrace(thr->tid); Lock l(&thr_trace->mtx); unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); TraceHeader *hdr = &thr_trace->headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); ObtainCurrentStack(thr, 0, &hdr->stack0); hdr->mset0 = thr->mset; thr->nomalloc--; } Trace *ThreadTrace(int tid) { return (Trace*)GetThreadTraceHeader(tid); } uptr TraceTopPC(ThreadState *thr) { Event *events = (Event*)GetThreadTrace(thr->tid); uptr pc = events[thr->fast_state.GetTracePos()]; return pc; } uptr TraceSize() { return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); } uptr TraceParts() { return TraceSize() / kTracePartSize; } #if !SANITIZER_GO extern "C" void __tsan_trace_switch() { TraceSwitch(cur_thread()); } extern "C" void __tsan_report_race() { ReportRace(cur_thread()); } #endif ALWAYS_INLINE Shadow LoadShadow(u64 *p) { u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); return Shadow(raw); } ALWAYS_INLINE void StoreShadow(u64 *sp, u64 s) { atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); } ALWAYS_INLINE void StoreIfNotYetStored(u64 *sp, u64 *s) { StoreShadow(sp, *s); *s = 0; } ALWAYS_INLINE void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) { thr->racy_state[0] = cur.raw(); thr->racy_state[1] = old.raw(); thr->racy_shadow_addr = shadow_mem; #if !SANITIZER_GO HACKY_CALL(__tsan_report_race); #else ReportRace(thr); #endif } static inline bool HappensBefore(Shadow old, ThreadState *thr) { return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); } ALWAYS_INLINE void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); // This potentially can live in an MMX/SSE scratch register. // The required intrinsics are: // __m128i _mm_move_epi64(__m128i*); // _mm_storel_epi64(u64*, __m128i); u64 store_word = cur.raw(); // scan all the shadow values and dispatch to 4 categories: // same, replace, candidate and race (see comments below). // we consider only 3 cases regarding access sizes: // equal, intersect and not intersect. initially I considered // larger and smaller as well, it allowed to replace some // 'candidates' with 'same' or 'replace', but I think // it's just not worth it (performance- and complexity-wise). Shadow old(0); // It release mode we manually unroll the loop, // because empirically gcc generates better code this way. // However, we can't afford unrolling in debug mode, because the function // consumes almost 4K of stack. Gtest gives only 4K of stack to death test // threads, which is not enough for the unrolled loop. #if SANITIZER_DEBUG for (int idx = 0; idx < 4; idx++) { #include "tsan_update_shadow_word_inl.h" } #else int idx = 0; #include "tsan_update_shadow_word_inl.h" idx = 1; #include "tsan_update_shadow_word_inl.h" idx = 2; #include "tsan_update_shadow_word_inl.h" idx = 3; #include "tsan_update_shadow_word_inl.h" #endif // we did not find any races and had already stored // the current access info, so we are done if (LIKELY(store_word == 0)) return; // choose a random candidate slot and replace it StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); StatInc(thr, StatShadowReplace); return; RACE: HandleRace(thr, shadow_mem, cur, old); return; } void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, int size, bool kAccessIsWrite, bool kIsAtomic) { while (size) { int size1 = 1; int kAccessSizeLog = kSizeLog1; if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { size1 = 8; kAccessSizeLog = kSizeLog8; } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { size1 = 4; kAccessSizeLog = kSizeLog4; } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { size1 = 2; kAccessSizeLog = kSizeLog2; } MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); addr += size1; size -= size1; } } ALWAYS_INLINE bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { Shadow cur(a); for (uptr i = 0; i < kShadowCnt; i++) { Shadow old(LoadShadow(&s[i])); if (Shadow::Addr0AndSizeAreEqual(cur, old) && old.TidWithIgnore() == cur.TidWithIgnore() && old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() && old.IsRead() <= cur.IsRead()) return true; } return false; } #if defined(__SSE3__) #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) ALWAYS_INLINE bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { // This is an optimized version of ContainsSameAccessSlow. // load current access into access[0:63] const m128 access = _mm_cvtsi64_si128(a); // duplicate high part of access in addr0: // addr0[0:31] = access[32:63] // addr0[32:63] = access[32:63] // addr0[64:95] = access[32:63] // addr0[96:127] = access[32:63] const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); // load 4 shadow slots const m128 shadow0 = _mm_load_si128((__m128i*)s); const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); // load high parts of 4 shadow slots into addr_vect: // addr_vect[0:31] = shadow0[32:63] // addr_vect[32:63] = shadow0[96:127] // addr_vect[64:95] = shadow1[32:63] // addr_vect[96:127] = shadow1[96:127] m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); if (!is_write) { // set IsRead bit in addr_vect const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); addr_vect = _mm_or_si128(addr_vect, rw_mask); } // addr0 == addr_vect? const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); // epoch1[0:63] = sync_epoch const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); // epoch[0:31] = sync_epoch[0:31] // epoch[32:63] = sync_epoch[0:31] // epoch[64:95] = sync_epoch[0:31] // epoch[96:127] = sync_epoch[0:31] const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); // load low parts of shadow cell epochs into epoch_vect: // epoch_vect[0:31] = shadow0[0:31] // epoch_vect[32:63] = shadow0[64:95] // epoch_vect[64:95] = shadow1[0:31] // epoch_vect[96:127] = shadow1[64:95] const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); // epoch_vect >= sync_epoch? const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); // addr_res & epoch_res const m128 res = _mm_and_si128(addr_res, epoch_res); // mask[0] = res[7] // mask[1] = res[15] // ... // mask[15] = res[127] const int mask = _mm_movemask_epi8(res); return mask != 0; } #endif ALWAYS_INLINE bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { #if defined(__SSE3__) bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); // NOTE: this check can fail if the shadow is concurrently mutated // by other threads. But it still can be useful if you modify // ContainsSameAccessFast and want to ensure that it's not completely broken. // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); return res; #else return ContainsSameAccessSlow(s, a, sync_epoch, is_write); #endif } ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { u64 *shadow_mem = (u64*)MemToShadow(addr); DPrintf2("#%d: MemoryAccess: @%p %p size=%d" " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", (int)thr->fast_state.tid(), (void*)pc, (void*)addr, (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2], (uptr)shadow_mem[3]); #if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } if (!IsShadowMem((uptr)shadow_mem)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); DCHECK(IsShadowMem((uptr)shadow_mem)); } #endif if (!SANITIZER_GO && *shadow_mem == kShadowRodata) { // Access to .rodata section, no races here. // Measurements show that it can be 10-20% of all memory accesses. StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopRodata); return; } FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopIgnored); return; } Shadow cur(fast_state); cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); cur.SetWrite(kAccessIsWrite); cur.SetAtomic(kIsAtomic); if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite))) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } if (kCollectHistory) { fast_state.IncrementEpoch(); thr->fast_state = fast_state; TraceAddEvent(thr, fast_state, EventTypeMop, pc); cur.IncrementEpoch(); } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } // Called by MemoryAccessRange in tsan_rtl_thread.cc ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite))) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, u64 val) { (void)thr; (void)pc; if (size == 0) return; // FIXME: fix me. uptr offset = addr % kShadowCell; if (offset) { offset = kShadowCell - offset; if (size <= offset) return; addr += offset; size -= offset; } DCHECK_EQ(addr % 8, 0); // If a user passes some insane arguments (memset(0)), // let it just crash as usual. if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) return; // Don't want to touch lots of shadow memory. // If a program maps 10MB stack, there is no need reset the whole range. size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); // UnmapOrDie/MmapFixedNoReserve does not work on Windows. if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { u64 *p = (u64*)MemToShadow(addr); CHECK(IsShadowMem((uptr)p)); CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); // FIXME: may overwrite a part outside the region for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { p[i++] = val; for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0; } } else { // The region is big, reset only beginning and end. const uptr kPageSize = GetPageSizeCached(); u64 *begin = (u64*)MemToShadow(addr); u64 *end = begin + size / kShadowCell * kShadowCnt; u64 *p = begin; // Set at least first kPageSize/2 to page boundary. while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } // Reset middle part. u64 *p1 = p; p = RoundDown(end, kPageSize); UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); // Set the ending. while (p < end) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } } } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { MemoryRangeSet(thr, pc, addr, size, 0); } void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { // Processing more than 1k (4k of shadow) is expensive, // can cause excessive memory consumption (user does not necessary touch // the whole range) and most likely unnecessary. if (size > 1024) size = 1024; CHECK_EQ(thr->is_freeing, false); thr->is_freeing = true; MemoryAccessRange(thr, pc, addr, size, true); thr->is_freeing = false; if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.MarkAsFreed(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } ALWAYS_INLINE USED void FuncEntry(ThreadState *thr, uptr pc) { StatInc(thr, StatFuncEnter); DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); } // Shadow stack maintenance can be replaced with // stack unwinding during trace switch (which presumably must be faster). DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) GrowShadowStack(thr); #endif thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } ALWAYS_INLINE USED void FuncExit(ThreadState *thr) { StatInc(thr, StatFuncExit); DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); } DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #endif thr->shadow_stack_pos--; } void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); thr->ignore_reads_and_writes++; CHECK_GT(thr->ignore_reads_and_writes, 0); thr->fast_state.SetIgnoreBit(); #if !SANITIZER_GO if (save_stack && !ctx->after_multithreaded_fork) thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); CHECK_GT(thr->ignore_reads_and_writes, 0); thr->ignore_reads_and_writes--; if (thr->ignore_reads_and_writes == 0) { thr->fast_state.ClearIgnoreBit(); #if !SANITIZER_GO thr->mop_ignore_set.Reset(); #endif } } #if !SANITIZER_GO extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __tsan_testonly_shadow_stack_current_size() { ThreadState *thr = cur_thread(); return thr->shadow_stack_pos - thr->shadow_stack; } #endif void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); thr->ignore_sync++; CHECK_GT(thr->ignore_sync, 0); #if !SANITIZER_GO if (save_stack && !ctx->after_multithreaded_fork) thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); CHECK_GT(thr->ignore_sync, 0); thr->ignore_sync--; #if !SANITIZER_GO if (thr->ignore_sync == 0) thr->sync_ignore_set.Reset(); #endif } bool MD5Hash::operator==(const MD5Hash &other) const { return hash[0] == other.hash[0] && hash[1] == other.hash[1]; } #if SANITIZER_DEBUG void build_consistency_debug() {} #else void build_consistency_release() {} #endif #if TSAN_COLLECT_STATS void build_consistency_stats() {} #else void build_consistency_nostats() {} #endif } // namespace __tsan #if !SANITIZER_GO // Must be included in this file to make sure everything is inlined. #include "tsan_interface_inl.h" #endif Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_aarch64.S =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_aarch64.S (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_aarch64.S (revision 320961) @@ -1,258 +1,337 @@ +// The content of this file is AArch64-only: +#if defined(__aarch64__) + #include "sanitizer_common/sanitizer_asm.h" +#if !defined(__APPLE__) .section .bss .type __tsan_pointer_chk_guard, %object -.size __tsan_pointer_chk_guard, 8 +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__tsan_pointer_chk_guard)) __tsan_pointer_chk_guard: .zero 8 +#endif +#if defined(__APPLE__) +.align 2 + +.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers +.long _setjmp$non_lazy_ptr +_setjmp$non_lazy_ptr: +.indirect_symbol _setjmp +.long 0 + +.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers +.long __setjmp$non_lazy_ptr +__setjmp$non_lazy_ptr: +.indirect_symbol __setjmp +.long 0 + +.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers +.long _sigsetjmp$non_lazy_ptr +_sigsetjmp$non_lazy_ptr: +.indirect_symbol _sigsetjmp +.long 0 +#endif + +#if !defined(__APPLE__) .section .text +#else +.section __TEXT,__text +.align 3 +#endif +#if !defined(__APPLE__) // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp // functions) by XORing them with a random guard pointer. For AArch64 it is a // global variable rather than a TCB one (as for x86_64/powerpc) and althought // its value is exported by the loader, it lies within a private GLIBC // namespace (meaning it should be only used by GLIBC itself and the ABI is // not stable). So InitializeGuardPtr obtains the pointer guard value by // issuing a setjmp and checking the resulting pointers values against the // original ones. -.hidden _Z18InitializeGuardPtrv +ASM_HIDDEN(_Z18InitializeGuardPtrv) .global _Z18InitializeGuardPtrv -.type _Z18InitializeGuardPtrv, @function +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv)) _Z18InitializeGuardPtrv: CFI_STARTPROC // Allocates a jmp_buf for the setjmp call. stp x29, x30, [sp, -336]! CFI_DEF_CFA_OFFSET (336) CFI_OFFSET (29, -336) CFI_OFFSET (30, -328) add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) add x0, x29, 24 // Call libc setjmp that mangle the stack pointer value adrp x1, :got:_ZN14__interception12real__setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE] ldr x1, [x1] blr x1 // glibc setjmp mangles both the frame pointer (FP, pc+4 on blr) and the // stack pointer (SP). FP will be placed on ((uintptr*)jmp_buf)[11] and // SP at ((uintptr*)jmp_buf)[13]. // The mangle operation is just 'value' xor 'pointer guard value' and // if we know the original value (SP) and the expected one, we can derive // the guard pointer value. mov x0, sp // Loads the mangled SP pointer. ldr x1, [x29, 128] eor x0, x0, x1 adrp x2, __tsan_pointer_chk_guard str x0, [x2, #:lo12:__tsan_pointer_chk_guard] ldp x29, x30, [sp], 336 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) ret CFI_ENDPROC -.size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv)) +#endif -.hidden __tsan_setjmp +ASM_HIDDEN(__tsan_setjmp) .comm _ZN14__interception11real_setjmpE,8,8 -.type setjmp, @function -setjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp): CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf str x19, [sp, 16] CFI_OFFSET (19, -16) mov x19, x0 +#if !defined(__APPLE__) // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 +#else + add x0, x29, 32 + mov x1, x0 +#endif // call tsan interceptor - bl __tsan_setjmp + bl ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter mov x0, x19 ldr x19, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) // tail jump to libc setjmp +#if !defined(__APPLE__) adrp x1, :got:_ZN14__interception11real_setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE] ldr x1, [x1] +#else + adrp x1, _setjmp$non_lazy_ptr@page + add x1, x1, _setjmp$non_lazy_ptr@pageoff + ldr x1, [x1] +#endif br x1 CFI_ENDPROC -.size setjmp, .-setjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) .comm _ZN14__interception12real__setjmpE,8,8 -.globl _setjmp -.type _setjmp, @function -_setjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp): CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf str x19, [sp, 16] CFI_OFFSET (19, -16) mov x19, x0 +#if !defined(__APPLE__) // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 +#else + add x0, x29, 32 + mov x1, x0 +#endif // call tsan interceptor - bl __tsan_setjmp + bl ASM_TSAN_SYMBOL(__tsan_setjmp) // Restore jmp_buf parameter mov x0, x19 ldr x19, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) // tail jump to libc setjmp +#if !defined(__APPLE__) adrp x1, :got:_ZN14__interception12real__setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE] ldr x1, [x1] +#else + adrp x1, __setjmp$non_lazy_ptr@page + add x1, x1, __setjmp$non_lazy_ptr@pageoff + ldr x1, [x1] +#endif br x1 CFI_ENDPROC -.size _setjmp, .-_setjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) .comm _ZN14__interception14real_sigsetjmpE,8,8 -.globl sigsetjmp -.type sigsetjmp, @function -sigsetjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp): CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf and savesigs stp x19, x20, [sp, 16] CFI_OFFSET (19, -16) CFI_OFFSET (20, -8) mov w20, w1 mov x19, x0 +#if !defined(__APPLE__) // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 +#else + add x0, x29, 32 + mov x1, x0 +#endif // call tsan interceptor - bl __tsan_setjmp + bl ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter mov w1, w20 mov x0, x19 ldp x19, x20, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (29) CFI_RESTORE (19) CFI_RESTORE (20) CFI_DEF_CFA (31, 0) // tail jump to libc sigsetjmp +#if !defined(__APPLE__) adrp x2, :got:_ZN14__interception14real_sigsetjmpE ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE] ldr x2, [x2] +#else + adrp x2, _sigsetjmp$non_lazy_ptr@page + add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff + ldr x2, [x2] +#endif br x2 CFI_ENDPROC -.size sigsetjmp, .-sigsetjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) +#if !defined(__APPLE__) .comm _ZN14__interception16real___sigsetjmpE,8,8 -.globl __sigsetjmp -.type __sigsetjmp, @function -__sigsetjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp): CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf and savesigs stp x19, x20, [sp, 16] CFI_OFFSET (19, -16) CFI_OFFSET (20, -8) mov w20, w1 mov x19, x0 +#if !defined(__APPLE__) // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 +#endif // call tsan interceptor - bl __tsan_setjmp + bl ASM_TSAN_SYMBOL(__tsan_setjmp) mov w1, w20 mov x0, x19 ldp x19, x20, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (29) CFI_RESTORE (19) CFI_RESTORE (20) CFI_DEF_CFA (31, 0) // tail jump to libc __sigsetjmp +#if !defined(__APPLE__) adrp x2, :got:_ZN14__interception16real___sigsetjmpE ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE] ldr x2, [x2] +#else + adrp x2, ASM_TSAN_SYMBOL(__sigsetjmp)@page + add x2, x2, ASM_TSAN_SYMBOL(__sigsetjmp)@pageoff +#endif br x2 CFI_ENDPROC -.size __sigsetjmp, .-__sigsetjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) +#endif #if defined(__linux__) /* We do not need executable stack. */ .section .note.GNU-stack,"",@progbits +#endif + #endif Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_amd64.S =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_amd64.S (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_amd64.S (revision 320961) @@ -1,359 +1,365 @@ +// The content of this file is x86_64-only: +#if defined(__x86_64__) + #include "sanitizer_common/sanitizer_asm.h" + #if !defined(__APPLE__) .section .text #else .section __TEXT,__text #endif ASM_HIDDEN(__tsan_trace_switch) .globl ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk) ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk): CFI_STARTPROC # Save scratch registers. push %rax CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rax, 0) push %rcx CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rcx, 0) push %rdx CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdx, 0) push %rsi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rsi, 0) push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) push %r8 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r8, 0) push %r9 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r9, 0) push %r10 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r10, 0) push %r11 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r11, 0) # Align stack frame. push %rbx # non-scratch CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rbx, 0) mov %rsp, %rbx # save current rsp CFI_DEF_CFA_REGISTER(%rbx) shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp call ASM_TSAN_SYMBOL(__tsan_trace_switch) # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp CFI_DEF_CFA_REGISTER(%rsp) pop %rbx CFI_ADJUST_CFA_OFFSET(-8) # Restore scratch registers. pop %r11 CFI_ADJUST_CFA_OFFSET(-8) pop %r10 CFI_ADJUST_CFA_OFFSET(-8) pop %r9 CFI_ADJUST_CFA_OFFSET(-8) pop %r8 CFI_ADJUST_CFA_OFFSET(-8) pop %rdi CFI_ADJUST_CFA_OFFSET(-8) pop %rsi CFI_ADJUST_CFA_OFFSET(-8) pop %rdx CFI_ADJUST_CFA_OFFSET(-8) pop %rcx CFI_ADJUST_CFA_OFFSET(-8) pop %rax CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rax) CFI_RESTORE(%rbx) CFI_RESTORE(%rcx) CFI_RESTORE(%rdx) CFI_RESTORE(%rsi) CFI_RESTORE(%rdi) CFI_RESTORE(%r8) CFI_RESTORE(%r9) CFI_RESTORE(%r10) CFI_RESTORE(%r11) ret CFI_ENDPROC ASM_HIDDEN(__tsan_report_race) .globl ASM_TSAN_SYMBOL(__tsan_report_race_thunk) ASM_TSAN_SYMBOL(__tsan_report_race_thunk): CFI_STARTPROC # Save scratch registers. push %rax CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rax, 0) push %rcx CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rcx, 0) push %rdx CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdx, 0) push %rsi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rsi, 0) push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) push %r8 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r8, 0) push %r9 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r9, 0) push %r10 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r10, 0) push %r11 CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%r11, 0) # Align stack frame. push %rbx # non-scratch CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rbx, 0) mov %rsp, %rbx # save current rsp CFI_DEF_CFA_REGISTER(%rbx) shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp call ASM_TSAN_SYMBOL(__tsan_report_race) # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp CFI_DEF_CFA_REGISTER(%rsp) pop %rbx CFI_ADJUST_CFA_OFFSET(-8) # Restore scratch registers. pop %r11 CFI_ADJUST_CFA_OFFSET(-8) pop %r10 CFI_ADJUST_CFA_OFFSET(-8) pop %r9 CFI_ADJUST_CFA_OFFSET(-8) pop %r8 CFI_ADJUST_CFA_OFFSET(-8) pop %rdi CFI_ADJUST_CFA_OFFSET(-8) pop %rsi CFI_ADJUST_CFA_OFFSET(-8) pop %rdx CFI_ADJUST_CFA_OFFSET(-8) pop %rcx CFI_ADJUST_CFA_OFFSET(-8) pop %rax CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rax) CFI_RESTORE(%rbx) CFI_RESTORE(%rcx) CFI_RESTORE(%rdx) CFI_RESTORE(%rsi) CFI_RESTORE(%rdi) CFI_RESTORE(%r8) CFI_RESTORE(%r9) CFI_RESTORE(%r10) CFI_RESTORE(%r11) ret CFI_ENDPROC ASM_HIDDEN(__tsan_setjmp) #if !defined(__APPLE__) .comm _ZN14__interception11real_setjmpE,8,8 #endif .globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp) ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp): CFI_STARTPROC // save env parameter push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) // obtain %rsp #if defined(__FreeBSD__) lea 8(%rsp), %rdi mov %rdi, %rsi #elif defined(__APPLE__) lea 16(%rsp), %rdi mov %rdi, %rsi #elif defined(__linux__) lea 16(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi #else # error "Unknown platform" #endif // call tsan interceptor call ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc setjmp movl $0, %eax #if !defined(__APPLE__) movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) #else jmp ASM_TSAN_SYMBOL(setjmp) #endif CFI_ENDPROC ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) .comm _ZN14__interception12real__setjmpE,8,8 .globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp) ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp): CFI_STARTPROC // save env parameter push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) // obtain %rsp #if defined(__FreeBSD__) lea 8(%rsp), %rdi mov %rdi, %rsi #elif defined(__APPLE__) lea 16(%rsp), %rdi mov %rdi, %rsi #elif defined(__linux__) lea 16(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi #else # error "Unknown platform" #endif // call tsan interceptor call ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc setjmp movl $0, %eax #if !defined(__APPLE__) movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) #else jmp ASM_TSAN_SYMBOL(_setjmp) #endif CFI_ENDPROC ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) .comm _ZN14__interception14real_sigsetjmpE,8,8 .globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp) ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp): CFI_STARTPROC // save env parameter push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) // save savesigs parameter push %rsi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rsi, 0) // align stack frame sub $8, %rsp CFI_ADJUST_CFA_OFFSET(8) // obtain %rsp #if defined(__FreeBSD__) lea 24(%rsp), %rdi mov %rdi, %rsi #elif defined(__APPLE__) lea 32(%rsp), %rdi mov %rdi, %rsi #elif defined(__linux__) lea 32(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi #else # error "Unknown platform" #endif // call tsan interceptor call ASM_TSAN_SYMBOL(__tsan_setjmp) // unalign stack frame add $8, %rsp CFI_ADJUST_CFA_OFFSET(-8) // restore savesigs parameter pop %rsi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rsi) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc sigsetjmp movl $0, %eax #if !defined(__APPLE__) movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) #else jmp ASM_TSAN_SYMBOL(sigsetjmp) #endif CFI_ENDPROC ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) #if !defined(__APPLE__) .comm _ZN14__interception16real___sigsetjmpE,8,8 .globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp) ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp): CFI_STARTPROC // save env parameter push %rdi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rdi, 0) // save savesigs parameter push %rsi CFI_ADJUST_CFA_OFFSET(8) CFI_REL_OFFSET(%rsi, 0) // align stack frame sub $8, %rsp CFI_ADJUST_CFA_OFFSET(8) // obtain %rsp #if defined(__FreeBSD__) lea 24(%rsp), %rdi mov %rdi, %rsi #else lea 32(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi #endif // call tsan interceptor call ASM_TSAN_SYMBOL(__tsan_setjmp) // unalign stack frame add $8, %rsp CFI_ADJUST_CFA_OFFSET(-8) // restore savesigs parameter pop %rsi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rsi) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc sigsetjmp movl $0, %eax movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) CFI_ENDPROC ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) #endif // !defined(__APPLE__) #if defined(__FreeBSD__) || defined(__linux__) /* We do not need executable stack. */ .section .note.GNU-stack,"",@progbits +#endif + #endif Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc (revision 320961) @@ -1,538 +1,538 @@ //===-- tsan_rtl_mutex.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include #include #include "tsan_rtl.h" #include "tsan_flags.h" #include "tsan_sync.h" #include "tsan_report.h" #include "tsan_symbolize.h" #include "tsan_platform.h" namespace __tsan { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r); struct Callback : DDCallback { ThreadState *thr; uptr pc; Callback(ThreadState *thr, uptr pc) : thr(thr) , pc(pc) { DDCallback::pt = thr->proc()->dd_pt; DDCallback::lt = thr->dd_lt; } u32 Unwind() override { return CurrentStackId(thr, pc); } int UniqueTid() override { return thr->unique_id; } }; void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) { Callback cb(thr, pc); ctx->dd->MutexInit(&cb, &s->dd); s->dd.ctx = s->GetId(); } static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, uptr addr, u64 mid) { // In Go, these misuses are either impossible, or detected by std lib, // or false positives (e.g. unlock in a different thread). if (SANITIZER_GO) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(typ); rep.AddMutex(mid); VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); } void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexCreate); if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->SetFlags(flagz & MutexCreationFlagMask); if (!SANITIZER_GO && s->creation_stack_id == 0) s->creation_stack_id = CurrentStackId(thr, pc); s->mtx.Unlock(); } void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); StatInc(thr, StatMutexDestroy); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s == 0) return; if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) { // Destroy is no-op for linker-initialized mutexes. s->mtx.Unlock(); return; } if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ctx->dd->MutexDestroy(&cb, &s->dd); ctx->dd->MutexInit(&cb, &s->dd); } bool unlock_locked = false; if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); unlock_locked = true; } u64 mid = s->GetId(); u32 last_lock = s->last_lock; if (!unlock_locked) s->Reset(thr->proc()); // must not reset it before the report is printed s->mtx.Unlock(); if (unlock_locked) { ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(mid); VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); rep.AddStack(trace); FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s != 0) { s->Reset(thr->proc()); s->mtx.Unlock(); } } thr->mset.Remove(mid); // Imitate a memory write to catch unlock-destroy races. // Do this outside of sync mutex, because it can report a race which locks // sync mutexes. if (IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } // s will be destroyed and freed in MetaMap::FreeBlock. } void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); if (s->owner_tid != thr->tid) { Callback cb(thr, pc); ctx->dd->MutexBeforeLock(&cb, &s->dd, true); s->mtx.ReadUnlock(); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } else { s->mtx.ReadUnlock(); } } } void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", thr->tid, addr, flagz, rec); if (flagz & MutexFlagRecursiveLock) CHECK_GT(rec, 0); else rec = 1; if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); bool report_double_lock = false; if (s->owner_tid == SyncVar::kInvalidTid) { CHECK_EQ(s->recursion, 0); s->owner_tid = thr->tid; s->last_lock = thr->fast_state.raw(); } else if (s->owner_tid == thr->tid) { CHECK_GT(s->recursion, 0); } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_double_lock = true; } const bool first = s->recursion == 0; s->recursion += rec; if (first) { StatInc(thr, StatMutexLock); AcquireImpl(thr, pc, &s->clock); AcquireImpl(thr, pc, &s->read_clock); } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) { StatInc(thr, StatMutexRecLock); } thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); bool pre_lock = false; if (first && common_flags()->detect_deadlocks) { pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); Callback cb(thr, pc); if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, true); ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. s = 0; if (report_double_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); if (first && pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); int rec = 0; bool report_bad_unlock = false; if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } else { rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1; s->recursion -= rec; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; ReleaseStoreImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } } thr->mset.Del(s->GetId(), true); if (common_flags()->detect_deadlocks && s->recursion == 0 && !report_bad_unlock) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks && !report_bad_unlock) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } return rec; } void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); Callback cb(thr, pc); ctx->dd->MutexBeforeLock(&cb, &s->dd, false); s->mtx.ReadUnlock(); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); bool report_bad_lock = false; if (s->owner_tid != SyncVar::kInvalidTid) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_lock = true; } } AcquireImpl(thr, pc, &s->clock); s->last_lock = thr->fast_state.raw(); thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); bool pre_lock = false; if (common_flags()->detect_deadlocks) { pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); Callback cb(thr, pc); if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, false); ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.ReadUnlock(); // Can't touch s after this point. s = 0; if (report_bad_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); if (pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadUnlock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); bool report_bad_unlock = false; if (s->owner_tid != SyncVar::kInvalidTid) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } ReleaseImpl(thr, pc, &s->read_clock); if (common_flags()->detect_deadlocks && s->recursion == 0) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. thr->mset.Del(mid, false); if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid); if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); bool write = true; bool report_bad_unlock = false; if (s->owner_tid == SyncVar::kInvalidTid) { // Seems to be read unlock. write = false; StatInc(thr, StatMutexReadUnlock); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); ReleaseImpl(thr, pc, &s->read_clock); } else if (s->owner_tid == thr->tid) { // Seems to be write unlock. thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); CHECK_GT(s->recursion, 0); s->recursion--; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; ReleaseImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } } else if (!s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } thr->mset.Del(s->GetId(), write); if (common_flags()->detect_deadlocks && s->recursion == 0) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = SyncVar::kInvalidTid; s->recursion = 0; s->mtx.Unlock(); } void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); u64 mid = s->GetId(); s->mtx.Unlock(); ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid); } void Acquire(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false); if (!s) return; AcquireImpl(thr, pc, &s->clock); s->mtx.ReadUnlock(); } static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast(arg); ThreadContext *tctx = static_cast(tctx_base); + u64 epoch = tctx->epoch1; if (tctx->status == ThreadStatusRunning) - thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch()); - else - thr->clock.set(tctx->tid, tctx->epoch1); + epoch = tctx->thr->fast_state.epoch(); + thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch); } void AcquireGlobal(ThreadState *thr, uptr pc) { DPrintf("#%d: AcquireGlobal\n", thr->tid); if (thr->ignore_sync) return; ThreadRegistryLock l(ctx->thread_registry); ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateClockCallback, thr); } void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseImpl(thr, pc, &s->clock); s->mtx.Unlock(); } void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreImpl(thr, pc, &s->clock); s->mtx.Unlock(); } #if !SANITIZER_GO static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast(arg); ThreadContext *tctx = static_cast(tctx_base); + u64 epoch = tctx->epoch1; if (tctx->status == ThreadStatusRunning) - thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch()); - else - thr->last_sleep_clock.set(tctx->tid, tctx->epoch1); + epoch = tctx->thr->fast_state.epoch(); + thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch); } void AfterSleep(ThreadState *thr, uptr pc) { DPrintf("#%d: AfterSleep %zx\n", thr->tid); if (thr->ignore_sync) return; thr->last_sleep_stack_id = CurrentStackId(thr, pc); ThreadRegistryLock l(ctx->thread_registry); ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateSleepClockCallback, thr); } #endif void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->clock.acquire(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncAcquire); } void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncRelease); } void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.ReleaseStore(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncRelease); } void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.acq_rel(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncRelease); } void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeDeadlock); for (int i = 0; i < r->n; i++) { rep.AddMutex(r->loop[i].mtx_ctx0); rep.AddUniqueTid((int)r->loop[i].thr_ctx); rep.AddThread((int)r->loop[i].thr_ctx); } uptr dummy_pc = 0x42; for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; if (stk && stk != 0xffffffff) { rep.AddStack(StackDepotGet(stk), true); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), // but we should still produce some stack trace in the report. rep.AddStack(StackTrace(&dummy_pc, 1), true); } } } OutputReport(thr, rep); } } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc (revision 320961) @@ -1,736 +1,736 @@ //===-- tsan_rtl_report.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_suppressions.h" #include "tsan_symbolize.h" #include "tsan_report.h" #include "tsan_sync.h" #include "tsan_mman.h" #include "tsan_flags.h" #include "tsan_fd.h" namespace __tsan { using namespace __sanitizer; // NOLINT static ReportStack *SymbolizeStack(StackTrace trace); void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { // There is high probability that interceptors will check-fail as well, // on the other hand there is no sense in processing interceptors // since we are going to die soon. ScopedIgnoreInterceptors ignore; #if !SANITIZER_GO cur_thread()->ignore_sync++; cur_thread()->ignore_reads_and_writes++; #endif Printf("FATAL: ThreadSanitizer CHECK failed: " "%s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line, cond, (uptr)v1, (uptr)v2); PrintCurrentStackSlow(StackTrace::GetCurrentPc()); Die(); } // Can be overriden by an application/test to intercept reports. #ifdef TSAN_EXTERNAL_HOOKS bool OnReport(const ReportDesc *rep, bool suppressed); #else SANITIZER_WEAK_CXX_DEFAULT_IMPL bool OnReport(const ReportDesc *rep, bool suppressed) { (void)rep; return suppressed; } #endif SANITIZER_WEAK_DEFAULT_IMPL void __tsan_on_report(const ReportDesc *rep) { (void)rep; } static void StackStripMain(SymbolizedStack *frames) { SymbolizedStack *last_frame = nullptr; SymbolizedStack *last_frame2 = nullptr; for (SymbolizedStack *cur = frames; cur; cur = cur->next) { last_frame2 = last_frame; last_frame = cur; } if (last_frame2 == 0) return; #if !SANITIZER_GO const char *last = last_frame->info.function; const char *last2 = last_frame2->info.function; // Strip frame above 'main' if (last2 && 0 == internal_strcmp(last2, "main")) { last_frame->ClearAll(); last_frame2->next = nullptr; // Strip our internal thread start routine. } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { last_frame->ClearAll(); last_frame2->next = nullptr; // Strip global ctors init. } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { last_frame->ClearAll(); last_frame2->next = nullptr; // If both are 0, then we probably just failed to symbolize. } else if (last || last2) { // Ensure that we recovered stack completely. Trimmed stack // can actually happen if we do not instrument some code, // so it's only a debug print. However we must try hard to not miss it // due to our fault. DPrintf("Bottom stack frame is missed\n"); } #else // The last frame always point into runtime (gosched0, goexit0, runtime.main). last_frame->ClearAll(); last_frame2->next = nullptr; #endif } ReportStack *SymbolizeStackId(u32 stack_id) { if (stack_id == 0) return 0; StackTrace stack = StackDepotGet(stack_id); if (stack.trace == nullptr) return nullptr; return SymbolizeStack(stack); } static ReportStack *SymbolizeStack(StackTrace trace) { if (trace.size == 0) return 0; SymbolizedStack *top = nullptr; for (uptr si = 0; si < trace.size; si++) { const uptr pc = trace.trace[si]; uptr pc1 = pc; // We obtain the return address, but we're interested in the previous // instruction. if ((pc & kExternalPCBit) == 0) pc1 = StackTrace::GetPreviousInstructionPc(pc); SymbolizedStack *ent = SymbolizeCode(pc1); CHECK_NE(ent, 0); SymbolizedStack *last = ent; while (last->next) { last->info.address = pc; // restore original pc for report last = last->next; } last->info.address = pc; // restore original pc for report last->next = top; top = ent; } StackStripMain(top); ReportStack *stack = ReportStack::New(); stack->frames = top; return stack; } ScopedReport::ScopedReport(ReportType typ, uptr tag) { ctx->thread_registry->CheckLocked(); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); rep_ = new(mem) ReportDesc; rep_->typ = typ; rep_->tag = tag; ctx->report_mtx.Lock(); CommonSanitizerReportMutex.Lock(); } ScopedReport::~ScopedReport() { CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); DestroyAndFree(rep_); } void ScopedReport::AddStack(StackTrace stack, bool suppressable) { ReportStack **rs = rep_->stacks.PushBack(); *rs = SymbolizeStack(stack); (*rs)->suppressable = suppressable; } void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack, const MutexSet *mset) { void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); ReportMop *mop = new(mem) ReportMop; rep_->mops.PushBack(mop); mop->tid = s.tid(); mop->addr = addr + s.addr0(); mop->size = s.size(); mop->write = s.IsWrite(); mop->atomic = s.IsAtomic(); mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; if (mop->stack) mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); u64 mid = this->AddMutex(d.id); ReportMopMutex mtx = {mid, d.write}; mop->mset.PushBack(mtx); } } void ScopedReport::AddUniqueTid(int unique_tid) { rep_->unique_tids.PushBack(unique_tid); } void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { for (uptr i = 0; i < rep_->threads.Size(); i++) { if ((u32)rep_->threads[i]->id == tctx->tid) return; } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); ReportThread *rt = new(mem) ReportThread; rep_->threads.PushBack(rt); rt->id = tctx->tid; rt->os_id = tctx->os_id; rt->running = (tctx->status == ThreadStatusRunning); rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->workerthread = tctx->workerthread; rt->stack = 0; rt->stack = SymbolizeStackId(tctx->creation_stack_id); if (rt->stack) rt->stack->suppressable = suppressable; } #if !SANITIZER_GO static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { int unique_id = *(int *)arg; return tctx->unique_id == (u32)unique_id; } static ThreadContext *FindThreadByUidLocked(int unique_id) { ctx->thread_registry->CheckLocked(); return static_cast( ctx->thread_registry->FindThreadContextLocked( FindThreadByUidLockedCallback, &unique_id)); } static ThreadContext *FindThreadByTidLocked(int tid) { ctx->thread_registry->CheckLocked(); return static_cast( ctx->thread_registry->GetThreadLocked(tid)); } static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { uptr addr = (uptr)arg; ThreadContext *tctx = static_cast(tctx_base); if (tctx->status != ThreadStatusRunning) return false; ThreadState *thr = tctx->thr; CHECK(thr); return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); } ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { ctx->thread_registry->CheckLocked(); ThreadContext *tctx = static_cast( ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, (void*)addr)); if (!tctx) return 0; ThreadState *thr = tctx->thr; CHECK(thr); *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); return tctx; } #endif void ScopedReport::AddThread(int unique_tid, bool suppressable) { #if !SANITIZER_GO if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) AddThread(tctx, suppressable); #endif } void ScopedReport::AddMutex(const SyncVar *s) { for (uptr i = 0; i < rep_->mutexes.Size(); i++) { if (rep_->mutexes[i]->id == s->uid) return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = s->uid; rm->addr = s->addr; rm->destroyed = false; rm->stack = SymbolizeStackId(s->creation_stack_id); } u64 ScopedReport::AddMutex(u64 id) { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); // Check that the mutex is still alive. // Another mutex can be created at the same address, // so check uid as well. if (s && s->CheckId(uid)) { mid = s->uid; AddMutex(s); } else { AddDeadMutex(id); } if (s) s->mtx.Unlock(); return mid; } void ScopedReport::AddDeadMutex(u64 id) { for (uptr i = 0; i < rep_->mutexes.Size(); i++) { if (rep_->mutexes[i]->id == id) return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = id; rm->addr = 0; rm->destroyed = true; rm->stack = 0; } void ScopedReport::AddLocation(uptr addr, uptr size) { if (addr == 0) return; #if !SANITIZER_GO int fd = -1; - int creat_tid = -1; + int creat_tid = kInvalidTid; u32 creat_stack = 0; if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { ReportLocation *loc = ReportLocation::New(ReportLocationFD); loc->fd = fd; loc->tid = creat_tid; loc->stack = SymbolizeStackId(creat_stack); rep_->locs.PushBack(loc); ThreadContext *tctx = FindThreadByUidLocked(creat_tid); if (tctx) AddThread(tctx); return; } MBlock *b = 0; Allocator *a = allocator(); if (a->PointerIsMine((void*)addr)) { void *block_begin = a->GetBlockBegin((void*)addr); if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin); } if (b != 0) { ThreadContext *tctx = FindThreadByTidLocked(b->tid); ReportLocation *loc = ReportLocation::New(ReportLocationHeap); loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = tctx ? tctx->tid : b->tid; loc->stack = SymbolizeStackId(b->stk); rep_->locs.PushBack(loc); if (tctx) AddThread(tctx); return; } bool is_stack = false; if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { ReportLocation *loc = ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); loc->tid = tctx->tid; rep_->locs.PushBack(loc); AddThread(tctx); } #endif if (ReportLocation *loc = SymbolizeData(addr)) { loc->suppressable = true; rep_->locs.PushBack(loc); return; } } #if !SANITIZER_GO void ScopedReport::AddSleep(u32 stack_id) { rep_->sleep = SymbolizeStackId(stack_id); } #endif void ScopedReport::SetCount(int count) { rep_->count = count; } const ReportDesc *ScopedReport::GetReport() const { return rep_; } void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, MutexSet *mset, uptr *tag) { // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. Trace* trace = ThreadTrace(tid); ReadLock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) return; CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); const u64 epoch0 = RoundDown(epoch, TraceSize()); const u64 eend = epoch % TraceSize(); const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); Vector stack(MBlockReportStack); stack.Resize(hdr->stack0.size + 64); for (uptr i = 0; i < hdr->stack0.size; i++) { stack[i] = hdr->stack0.trace[i]; DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); } if (mset) *mset = hdr->mset0; uptr pos = hdr->stack0.size; Event *events = (Event*)GetThreadTrace(tid); for (uptr i = ebegin; i <= eend; i++) { Event ev = events[i]; EventType typ = (EventType)(ev >> 61); uptr pc = (uptr)(ev & ((1ull << 61) - 1)); DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { if (stack.Size() < pos + 2) stack.Resize(pos + 2); stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { if (pos > 0) pos--; } if (mset) { if (typ == EventTypeLock) { mset->Add(pc, true, epoch0 + i); } else if (typ == EventTypeUnlock) { mset->Del(pc, true); } else if (typ == EventTypeRLock) { mset->Add(pc, false, epoch0 + i); } else if (typ == EventTypeRUnlock) { mset->Del(pc, false); } } for (uptr j = 0; j <= pos; j++) DPrintf2(" #%zu: %zx\n", j, stack[j]); } if (pos == 0 && stack[0] == 0) return; pos++; stk->Init(&stack[0], pos); ExtractTagFromStack(stk, tag); } static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; bool equal_address = false; RacyAddress ra0 = {addr_min, addr_max}; { ReadLock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { if (hash == ctx->racy_stacks[i]) { VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n"); equal_stack = true; break; } } } if (flags()->suppress_equal_addresses) { for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { RacyAddress ra2 = ctx->racy_addresses[i]; uptr maxbeg = max(ra0.addr_min, ra2.addr_min); uptr minend = min(ra0.addr_max, ra2.addr_max); if (maxbeg < minend) { VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); equal_address = true; break; } } } } if (!equal_stack && !equal_address) return false; if (!equal_stack) { Lock lock(&ctx->racy_mtx); ctx->racy_stacks.PushBack(hash); } if (!equal_address) { Lock lock(&ctx->racy_mtx); ctx->racy_addresses.PushBack(ra0); } return true; } static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { Lock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { RacyStacks hash; hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); ctx->racy_stacks.PushBack(hash); } if (flags()->suppress_equal_addresses) { RacyAddress ra0 = {addr_min, addr_max}; ctx->racy_addresses.PushBack(ra0); } } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { if (!flags()->report_bugs || thr->suppress_reports) return false; atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); thr->current_report = rep; Suppression *supp = 0; uptr pc_or_addr = 0; for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); if (pc_or_addr != 0) { Lock lock(&ctx->fired_suppressions_mtx); FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; ctx->fired_suppressions.push_back(s); } { bool old_is_freeing = thr->is_freeing; thr->is_freeing = false; bool suppressed = OnReport(rep, pc_or_addr != 0); thr->is_freeing = old_is_freeing; if (suppressed) { thr->current_report = nullptr; return false; } } PrintReport(rep); __tsan_on_report(rep); ctx->nreported++; if (flags()->halt_on_error) Die(); thr->current_report = nullptr; return true; } bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { if (ctx->fired_suppressions[k].type != type) continue; for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; if (trace.trace[j] == s->pc_or_addr) { if (s->supp) atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } } return false; } static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { if (ctx->fired_suppressions[k].type != type) continue; FiredSuppression *s = &ctx->fired_suppressions[k]; if (addr == s->pc_or_addr) { if (s->supp) atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } return false; } static bool RaceBetweenAtomicAndFree(ThreadState *thr) { Shadow s0(thr->racy_state[0]); Shadow s1(thr->racy_state[1]); CHECK(!(s0.IsAtomic() && s1.IsAtomic())); if (!s0.IsAtomic() && !s1.IsAtomic()) return true; if (s0.IsAtomic() && s1.IsFreed()) return true; if (s1.IsAtomic() && thr->is_freeing) return true; return false; } void ReportRace(ThreadState *thr) { CheckNoLocks(thr); // Symbolizer makes lots of intercepted calls. If we try to process them, // at best it will cause deadlocks on internal mutexes. ScopedIgnoreInterceptors ignore; if (!flags()->report_bugs) return; if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) return; bool freed = false; { Shadow s(thr->racy_state[1]); freed = s.GetFreedAndReset(); thr->racy_state[1] = s.raw(); } uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); uptr addr_min = 0; uptr addr_max = 0; { uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); addr_min = min(a0, a1); addr_max = max(e0, e1); if (IsExpectedReport(addr_min, addr_max - addr_min)) return; } ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) typ = ReportTypeVptrUseAfterFree; else if (thr->is_vptr_access) typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; if (IsFiredSuppression(ctx, typ, addr)) return; const uptr kMop = 2; VarSizeStackTrace traces[kMop]; uptr tags[kMop] = {kExternalTagNone}; const uptr toppc = TraceTopPC(thr); ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); if (IsFiredSuppression(ctx, typ, traces[0])) return; // MutexSet is too large to live on stack. Vector mset_buffer(MBlockScopedBuf); mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); Shadow s2(thr->racy_state[1]); RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); if (IsFiredSuppression(ctx, typ, traces[1])) return; if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; // If any of the accesses has a tag, treat this as an "external" race. uptr tag = kExternalTagNone; for (uptr i = 0; i < kMop; i++) { if (tags[i] != kExternalTagNone) { typ = ReportTypeExternalRace; tag = tags[i]; break; } } ThreadRegistryLock l0(ctx->thread_registry); ScopedReport rep(typ, tag); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); rep.AddMemoryAccess(addr, tags[i], s, traces[i], i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { FastState s(thr->racy_state[i]); ThreadContext *tctx = static_cast( ctx->thread_registry->GetThreadLocked(s.tid())); if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) continue; rep.AddThread(tctx); } rep.AddLocation(addr_min, addr_max - addr_min); #if !SANITIZER_GO { // NOLINT Shadow s(thr->racy_state[1]); if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) rep.AddSleep(thr->last_sleep_stack_id); } #endif if (!OutputReport(thr, rep)) return; AddRacyStacks(thr, traces, addr_min, addr_max); } void PrintCurrentStack(ThreadState *thr, uptr pc) { VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); PrintStack(SymbolizeStack(trace)); } // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes // __sanitizer_print_stack_trace exists in the actual unwinded stack, but // tail-call to PrintCurrentStackSlow breaks this assumption because // __sanitizer_print_stack_trace disappears after tail-call. // However, this solution is not reliable enough, please see dvyukov's comment // http://reviews.llvm.org/D19148#406208 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. ALWAYS_INLINE void PrintCurrentStackSlow(uptr pc) { #if !SANITIZER_GO BufferedStackTrace *ptrace = new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) BufferedStackTrace(); ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); for (uptr i = 0; i < ptrace->size / 2; i++) { uptr tmp = ptrace->trace_buffer[i]; ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; ptrace->trace_buffer[ptrace->size - i - 1] = tmp; } PrintStack(SymbolizeStack(*ptrace)); #endif } } // namespace __tsan using namespace __tsan; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() { PrintCurrentStackSlow(StackTrace::GetCurrentPc()); } } // extern "C" Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_thread.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_thread.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_thread.cc (revision 320961) @@ -1,397 +1,401 @@ //===-- tsan_rtl_thread.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_platform.h" #include "tsan_report.h" #include "tsan_sync.h" namespace __tsan { // ThreadContext implementation. ThreadContext::ThreadContext(int tid) : ThreadContextBase(tid) , thr() , sync() , epoch0() , epoch1() { } #if !SANITIZER_GO ThreadContext::~ThreadContext() { } #endif void ThreadContext::OnDead() { CHECK_EQ(sync.size(), 0); } void ThreadContext::OnJoined(void *arg) { ThreadState *caller_thr = static_cast(arg); AcquireImpl(caller_thr, 0, &sync); sync.Reset(&caller_thr->proc()->clock_cache); } struct OnCreatedArgs { ThreadState *thr; uptr pc; }; void ThreadContext::OnCreated(void *arg) { thr = 0; if (tid == 0) return; OnCreatedArgs *args = static_cast(arg); if (!args->thr) // GCD workers don't have a parent thread. return; args->thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); ReleaseImpl(args->thr, 0, &sync); creation_stack_id = CurrentStackId(args->thr, args->pc); if (reuse_count == 0) StatInc(args->thr, StatThreadMaxTid); } void ThreadContext::OnReset() { CHECK_EQ(sync.size(), 0); uptr trace_p = GetThreadTrace(tid); ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event)); //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace)); } void ThreadContext::OnDetached(void *arg) { ThreadState *thr1 = static_cast(arg); sync.Reset(&thr1->proc()->clock_cache); } struct OnStartedArgs { ThreadState *thr; uptr stk_addr; uptr stk_size; uptr tls_addr; uptr tls_size; }; void ThreadContext::OnStarted(void *arg) { OnStartedArgs *args = static_cast(arg); thr = args->thr; // RoundUp so that one trace part does not contain events // from different threads. epoch0 = RoundUp(epoch1 + 1, kTracePartSize); epoch1 = (u64)-1; new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); #if !SANITIZER_GO thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0]; thr->shadow_stack_pos = thr->shadow_stack; thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize; #else // Setup dynamic shadow stack. const int kInitStackSize = 8; thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, kInitStackSize * sizeof(uptr)); thr->shadow_stack_pos = thr->shadow_stack; thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; #endif if (common_flags()->detect_deadlocks) thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); thr->fast_state.SetHistorySize(flags()->history_size); // Commit switch to the new part of the trace. // TraceAddEvent will reset stack0/mset0 in the new part for us. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); thr->fast_synch_epoch = epoch0; AcquireImpl(thr, 0, &sync); StatInc(thr, StatSyncAcquire); sync.Reset(&thr->proc()->clock_cache); thr->is_inited = true; DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " "tls_addr=%zx tls_size=%zx\n", tid, (uptr)epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); } void ThreadContext::OnFinished() { #if SANITIZER_GO internal_free(thr->shadow_stack); thr->shadow_stack = nullptr; thr->shadow_stack_pos = nullptr; thr->shadow_stack_end = nullptr; #endif if (!detached) { thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseImpl(thr, 0, &sync); } epoch1 = thr->fast_state.epoch(); if (common_flags()->detect_deadlocks) ctx->dd->DestroyLogicalThread(thr->dd_lt); + thr->clock.ResetCached(&thr->proc()->clock_cache); +#if !SANITIZER_GO + thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); +#endif thr->~ThreadState(); #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); #endif thr = 0; } #if !SANITIZER_GO struct ThreadLeak { ThreadContext *tctx; int count; }; static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { Vector &leaks = *(Vector*)arg; ThreadContext *tctx = static_cast(tctx_base); if (tctx->detached || tctx->status != ThreadStatusFinished) return; for (uptr i = 0; i < leaks.Size(); i++) { if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) { leaks[i].count++; return; } } ThreadLeak leak = {tctx, 1}; leaks.PushBack(leak); } #endif #if !SANITIZER_GO static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) { if (tctx->tid == 0) { Printf("ThreadSanitizer: main thread finished with ignores enabled\n"); } else { Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled," " created at:\n", tctx->tid, tctx->name); PrintStack(SymbolizeStackId(tctx->creation_stack_id)); } Printf(" One of the following ignores was not ended" " (in order of probability)\n"); for (uptr i = 0; i < set->Size(); i++) { Printf(" Ignore was enabled at:\n"); PrintStack(SymbolizeStackId(set->At(i))); } Die(); } static void ThreadCheckIgnore(ThreadState *thr) { if (ctx->after_multithreaded_fork) return; if (thr->ignore_reads_and_writes) ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set); if (thr->ignore_sync) ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set); } #else static void ThreadCheckIgnore(ThreadState *thr) {} #endif void ThreadFinalize(ThreadState *thr) { ThreadCheckIgnore(thr); #if !SANITIZER_GO if (!flags()->report_thread_leaks) return; ThreadRegistryLock l(ctx->thread_registry); Vector leaks(MBlockScopedBuf); ctx->thread_registry->RunCallbackForEachThreadLocked( MaybeReportThreadLeak, &leaks); for (uptr i = 0; i < leaks.Size(); i++) { ScopedReport rep(ReportTypeThreadLeak); rep.AddThread(leaks[i].tctx, true); rep.SetCount(leaks[i].count); OutputReport(thr, rep); } #endif } int ThreadCount(ThreadState *thr) { uptr result; ctx->thread_registry->GetNumberOfThreads(0, 0, &result); return (int)result; } int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { StatInc(thr, StatThreadCreate); OnCreatedArgs args = { thr, pc }; u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers. int tid = ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args); DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid); StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); return tid; } void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread) { uptr stk_addr = 0; uptr stk_size = 0; uptr tls_addr = 0; uptr tls_size = 0; #if !SANITIZER_GO GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); if (tid) { if (stk_addr && stk_size) MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size); } #endif ThreadRegistry *tr = ctx->thread_registry; OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; tr->StartThread(tid, os_id, workerthread, &args); tr->Lock(); thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid); tr->Unlock(); #if !SANITIZER_GO if (ctx->after_multithreaded_fork) { thr->ignore_interceptors++; ThreadIgnoreBegin(thr, 0); ThreadIgnoreSyncBegin(thr, 0); } #endif } void ThreadFinish(ThreadState *thr) { ThreadCheckIgnore(thr); StatInc(thr, StatThreadFinish); if (thr->stk_addr && thr->stk_size) DontNeedShadowFor(thr->stk_addr, thr->stk_size); if (thr->tls_addr && thr->tls_size) DontNeedShadowFor(thr->tls_addr, thr->tls_size); thr->is_dead = true; ctx->thread_registry->FinishThread(thr->tid); } static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { uptr uid = (uptr)arg; if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { tctx->user_id = 0; return true; } return false; } int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); return res; } void ThreadJoin(ThreadState *thr, uptr pc, int tid) { CHECK_GT(tid, 0); CHECK_LT(tid, kMaxTid); DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); ctx->thread_registry->JoinThread(tid, thr); } void ThreadDetach(ThreadState *thr, uptr pc, int tid) { CHECK_GT(tid, 0); CHECK_LT(tid, kMaxTid); ctx->thread_registry->DetachThread(tid, thr); } void ThreadSetName(ThreadState *thr, const char *name) { ctx->thread_registry->SetThreadName(thr->tid, name); } void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size, bool is_write) { if (size == 0) return; u64 *shadow_mem = (u64*)MemToShadow(addr); DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid, (void*)pc, (void*)addr, (int)size, is_write); #if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } if (!IsAppMem(addr + size - 1)) { Printf("Access to non app mem %zx\n", addr + size - 1); DCHECK(IsAppMem(addr + size - 1)); } if (!IsShadowMem((uptr)shadow_mem)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); DCHECK(IsShadowMem((uptr)shadow_mem)); } if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); } #endif StatInc(thr, StatMopRange); if (*shadow_mem == kShadowRodata) { DCHECK(!is_write); // Access to .rodata section, no races here. // Measurements show that it can be 10-20% of all memory accesses. StatInc(thr, StatMopRangeRodata); return; } FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) return; fast_state.IncrementEpoch(); thr->fast_state = fast_state; TraceAddEvent(thr, fast_state, EventTypeMop, pc); bool unaligned = (addr % kShadowCell) != 0; // Handle unaligned beginning, if any. for (; addr % kShadowCell && size; addr++, size--) { int const kAccessSizeLog = 0; Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); } if (unaligned) shadow_mem += kShadowCnt; // Handle middle part, if any. for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { int const kAccessSizeLog = 3; Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(0, kAccessSizeLog); MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); shadow_mem += kShadowCnt; } // Handle ending, if any. for (; size; addr++, size--) { int const kAccessSizeLog = 0; Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); } } } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.cc (revision 320961) @@ -1,190 +1,187 @@ //===-- tsan_stat.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_stat.h" #include "tsan_rtl.h" namespace __tsan { #if TSAN_COLLECT_STATS void StatAggregate(u64 *dst, u64 *src) { for (int i = 0; i < StatCnt; i++) dst[i] += src[i]; } void StatOutput(u64 *stat) { stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero]; static const char *name[StatCnt] = {}; name[StatMop] = "Memory accesses "; name[StatMopRead] = " Including reads "; name[StatMopWrite] = " writes "; name[StatMop1] = " Including size 1 "; name[StatMop2] = " size 2 "; name[StatMop4] = " size 4 "; name[StatMop8] = " size 8 "; name[StatMopSame] = " Including same "; name[StatMopIgnored] = " Including ignored "; name[StatMopRange] = " Including range "; name[StatMopRodata] = " Including .rodata "; name[StatMopRangeRodata] = " Including .rodata range "; name[StatShadowProcessed] = "Shadow processed "; name[StatShadowZero] = " Including empty "; name[StatShadowNonZero] = " Including non empty "; name[StatShadowSameSize] = " Including same size "; name[StatShadowIntersect] = " intersect "; name[StatShadowNotIntersect] = " not intersect "; name[StatShadowSameThread] = " Including same thread "; name[StatShadowAnotherThread] = " another thread "; name[StatShadowReplace] = " Including evicted "; name[StatFuncEnter] = "Function entries "; name[StatFuncExit] = "Function exits "; name[StatEvents] = "Events collected "; name[StatThreadCreate] = "Total threads created "; name[StatThreadFinish] = " threads finished "; name[StatThreadReuse] = " threads reused "; name[StatThreadMaxTid] = " max tid "; name[StatThreadMaxAlive] = " max alive threads "; name[StatMutexCreate] = "Mutexes created "; name[StatMutexDestroy] = " destroyed "; name[StatMutexLock] = " lock "; name[StatMutexUnlock] = " unlock "; name[StatMutexRecLock] = " recursive lock "; name[StatMutexRecUnlock] = " recursive unlock "; name[StatMutexReadLock] = " read lock "; name[StatMutexReadUnlock] = " read unlock "; name[StatSyncCreated] = "Sync objects created "; name[StatSyncDestroyed] = " destroyed "; name[StatSyncAcquire] = " acquired "; name[StatSyncRelease] = " released "; name[StatClockAcquire] = "Clock acquire "; name[StatClockAcquireEmpty] = " empty clock "; name[StatClockAcquireFastRelease] = " fast from release-store "; - name[StatClockAcquireLarge] = " contains my tid "; - name[StatClockAcquireRepeat] = " repeated (fast) "; name[StatClockAcquireFull] = " full (slow) "; name[StatClockAcquiredSomething] = " acquired something "; name[StatClockRelease] = "Clock release "; name[StatClockReleaseResize] = " resize "; - name[StatClockReleaseFast1] = " fast1 "; - name[StatClockReleaseFast2] = " fast2 "; + name[StatClockReleaseFast] = " fast "; name[StatClockReleaseSlow] = " dirty overflow (slow) "; name[StatClockReleaseFull] = " full (slow) "; name[StatClockReleaseAcquired] = " was acquired "; name[StatClockReleaseClearTail] = " clear tail "; name[StatClockStore] = "Clock release store "; name[StatClockStoreResize] = " resize "; name[StatClockStoreFast] = " fast "; name[StatClockStoreFull] = " slow "; name[StatClockStoreTail] = " clear tail "; name[StatClockAcquireRelease] = "Clock acquire-release "; name[StatAtomic] = "Atomic operations "; name[StatAtomicLoad] = " Including load "; name[StatAtomicStore] = " store "; name[StatAtomicExchange] = " exchange "; name[StatAtomicFetchAdd] = " fetch_add "; name[StatAtomicFetchSub] = " fetch_sub "; name[StatAtomicFetchAnd] = " fetch_and "; name[StatAtomicFetchOr] = " fetch_or "; name[StatAtomicFetchXor] = " fetch_xor "; name[StatAtomicFetchNand] = " fetch_nand "; name[StatAtomicCAS] = " compare_exchange "; name[StatAtomicFence] = " fence "; name[StatAtomicRelaxed] = " Including relaxed "; name[StatAtomicConsume] = " consume "; name[StatAtomicAcquire] = " acquire "; name[StatAtomicRelease] = " release "; name[StatAtomicAcq_Rel] = " acq_rel "; name[StatAtomicSeq_Cst] = " seq_cst "; name[StatAtomic1] = " Including size 1 "; name[StatAtomic2] = " size 2 "; name[StatAtomic4] = " size 4 "; name[StatAtomic8] = " size 8 "; name[StatAtomic16] = " size 16 "; name[StatAnnotation] = "Dynamic annotations "; name[StatAnnotateHappensBefore] = " HappensBefore "; name[StatAnnotateHappensAfter] = " HappensAfter "; name[StatAnnotateCondVarSignal] = " CondVarSignal "; name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll "; name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB "; name[StatAnnotateCondVarWait] = " CondVarWait "; name[StatAnnotateRWLockCreate] = " RWLockCreate "; name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic "; name[StatAnnotateRWLockDestroy] = " RWLockDestroy "; name[StatAnnotateRWLockAcquired] = " RWLockAcquired "; name[StatAnnotateRWLockReleased] = " RWLockReleased "; name[StatAnnotateTraceMemory] = " TraceMemory "; name[StatAnnotateFlushState] = " FlushState "; name[StatAnnotateNewMemory] = " NewMemory "; name[StatAnnotateNoOp] = " NoOp "; name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces "; name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection "; name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar "; name[StatAnnotatePCQGet] = " PCQGet "; name[StatAnnotatePCQPut] = " PCQPut "; name[StatAnnotatePCQDestroy] = " PCQDestroy "; name[StatAnnotatePCQCreate] = " PCQCreate "; name[StatAnnotateExpectRace] = " ExpectRace "; name[StatAnnotateBenignRaceSized] = " BenignRaceSized "; name[StatAnnotateBenignRace] = " BenignRace "; name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin "; name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd "; name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin "; name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd "; name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin "; name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd "; name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange "; name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange "; name[StatAnnotateThreadName] = " ThreadName "; name[Stat__tsan_mutex_create] = " __tsan_mutex_create "; name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy "; name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock "; name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock "; name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock "; name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock "; name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal "; name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal "; name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert "; name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert "; name[StatMtxTotal] = "Contentionz "; name[StatMtxTrace] = " Trace "; name[StatMtxThreads] = " Threads "; name[StatMtxReport] = " Report "; name[StatMtxSyncVar] = " SyncVar "; name[StatMtxSyncTab] = " SyncTab "; name[StatMtxSlab] = " Slab "; name[StatMtxAtExit] = " Atexit "; name[StatMtxAnnotations] = " Annotations "; name[StatMtxMBlock] = " MBlock "; name[StatMtxDeadlockDetector] = " DeadlockDetector "; name[StatMtxFired] = " FiredSuppressions "; name[StatMtxRacy] = " RacyStacks "; name[StatMtxFD] = " FD "; name[StatMtxGlobalProc] = " GlobalProc "; Printf("Statistics:\n"); for (int i = 0; i < StatCnt; i++) Printf("%s: %16zu\n", name[i], (uptr)stat[i]); } #endif } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.h =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.h (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_stat.h (revision 320961) @@ -1,194 +1,191 @@ //===-- tsan_stat.h ---------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #ifndef TSAN_STAT_H #define TSAN_STAT_H namespace __tsan { enum StatType { // Memory access processing related stuff. StatMop, StatMopRead, StatMopWrite, StatMop1, // These must be consequtive. StatMop2, StatMop4, StatMop8, StatMopSame, StatMopIgnored, StatMopRange, StatMopRodata, StatMopRangeRodata, StatShadowProcessed, StatShadowZero, StatShadowNonZero, // Derived. StatShadowSameSize, StatShadowIntersect, StatShadowNotIntersect, StatShadowSameThread, StatShadowAnotherThread, StatShadowReplace, // Func processing. StatFuncEnter, StatFuncExit, // Trace processing. StatEvents, // Threads. StatThreadCreate, StatThreadFinish, StatThreadReuse, StatThreadMaxTid, StatThreadMaxAlive, // Mutexes. StatMutexCreate, StatMutexDestroy, StatMutexLock, StatMutexUnlock, StatMutexRecLock, StatMutexRecUnlock, StatMutexReadLock, StatMutexReadUnlock, // Synchronization. StatSyncCreated, StatSyncDestroyed, StatSyncAcquire, StatSyncRelease, // Clocks - acquire. StatClockAcquire, StatClockAcquireEmpty, StatClockAcquireFastRelease, - StatClockAcquireLarge, - StatClockAcquireRepeat, StatClockAcquireFull, StatClockAcquiredSomething, // Clocks - release. StatClockRelease, StatClockReleaseResize, - StatClockReleaseFast1, - StatClockReleaseFast2, + StatClockReleaseFast, StatClockReleaseSlow, StatClockReleaseFull, StatClockReleaseAcquired, StatClockReleaseClearTail, // Clocks - release store. StatClockStore, StatClockStoreResize, StatClockStoreFast, StatClockStoreFull, StatClockStoreTail, // Clocks - acquire-release. StatClockAcquireRelease, // Atomics. StatAtomic, StatAtomicLoad, StatAtomicStore, StatAtomicExchange, StatAtomicFetchAdd, StatAtomicFetchSub, StatAtomicFetchAnd, StatAtomicFetchOr, StatAtomicFetchXor, StatAtomicFetchNand, StatAtomicCAS, StatAtomicFence, StatAtomicRelaxed, StatAtomicConsume, StatAtomicAcquire, StatAtomicRelease, StatAtomicAcq_Rel, StatAtomicSeq_Cst, StatAtomic1, StatAtomic2, StatAtomic4, StatAtomic8, StatAtomic16, // Dynamic annotations. StatAnnotation, StatAnnotateHappensBefore, StatAnnotateHappensAfter, StatAnnotateCondVarSignal, StatAnnotateCondVarSignalAll, StatAnnotateMutexIsNotPHB, StatAnnotateCondVarWait, StatAnnotateRWLockCreate, StatAnnotateRWLockCreateStatic, StatAnnotateRWLockDestroy, StatAnnotateRWLockAcquired, StatAnnotateRWLockReleased, StatAnnotateTraceMemory, StatAnnotateFlushState, StatAnnotateNewMemory, StatAnnotateNoOp, StatAnnotateFlushExpectedRaces, StatAnnotateEnableRaceDetection, StatAnnotateMutexIsUsedAsCondVar, StatAnnotatePCQGet, StatAnnotatePCQPut, StatAnnotatePCQDestroy, StatAnnotatePCQCreate, StatAnnotateExpectRace, StatAnnotateBenignRaceSized, StatAnnotateBenignRace, StatAnnotateIgnoreReadsBegin, StatAnnotateIgnoreReadsEnd, StatAnnotateIgnoreWritesBegin, StatAnnotateIgnoreWritesEnd, StatAnnotateIgnoreSyncBegin, StatAnnotateIgnoreSyncEnd, StatAnnotatePublishMemoryRange, StatAnnotateUnpublishMemoryRange, StatAnnotateThreadName, Stat__tsan_mutex_create, Stat__tsan_mutex_destroy, Stat__tsan_mutex_pre_lock, Stat__tsan_mutex_post_lock, Stat__tsan_mutex_pre_unlock, Stat__tsan_mutex_post_unlock, Stat__tsan_mutex_pre_signal, Stat__tsan_mutex_post_signal, Stat__tsan_mutex_pre_divert, Stat__tsan_mutex_post_divert, // Internal mutex contentionz. StatMtxTotal, StatMtxTrace, StatMtxThreads, StatMtxReport, StatMtxSyncVar, StatMtxSyncTab, StatMtxSlab, StatMtxAnnotations, StatMtxAtExit, StatMtxMBlock, StatMtxDeadlockDetector, StatMtxFired, StatMtxRacy, StatMtxFD, StatMtxGlobalProc, // This must be the last. StatCnt }; } // namespace __tsan #endif // TSAN_STAT_H Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_sync.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_sync.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_sync.cc (revision 320961) @@ -1,294 +1,296 @@ //===-- tsan_sync.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_sync.h" #include "tsan_rtl.h" #include "tsan_mman.h" namespace __tsan { void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); SyncVar::SyncVar() : mtx(MutexTypeSyncVar, StatMtxSyncVar) { Reset(0); } void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { this->addr = addr; this->uid = uid; this->next = 0; creation_stack_id = 0; if (!SANITIZER_GO) // Go does not use them creation_stack_id = CurrentStackId(thr, pc); if (common_flags()->detect_deadlocks) DDMutexInit(thr, pc, this); } void SyncVar::Reset(Processor *proc) { uid = 0; creation_stack_id = 0; owner_tid = kInvalidTid; last_lock = 0; recursion = 0; atomic_store_relaxed(&flags, 0); if (proc == 0) { CHECK_EQ(clock.size(), 0); CHECK_EQ(read_clock.size(), 0); } else { clock.Reset(&proc->clock_cache); read_clock.Reset(&proc->clock_cache); } } -MetaMap::MetaMap() { +MetaMap::MetaMap() + : block_alloc_("heap block allocator") + , sync_alloc_("sync allocator") { atomic_store(&uid_gen_, 0, memory_order_relaxed); } void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache); MBlock *b = block_alloc_.Map(idx); b->siz = sz; b->tag = 0; b->tid = thr->tid; b->stk = CurrentStackId(thr, pc); u32 *meta = MemToMeta(p); DCHECK_EQ(*meta, 0); *meta = idx | kFlagBlock; } uptr MetaMap::FreeBlock(Processor *proc, uptr p) { MBlock* b = GetBlock(p); if (b == 0) return 0; uptr sz = RoundUpTo(b->siz, kMetaShadowCell); FreeRange(proc, p, sz); return sz; } bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) { bool has_something = false; u32 *meta = MemToMeta(p); u32 *end = MemToMeta(p + sz); if (end == meta) end++; for (; meta < end; meta++) { u32 idx = *meta; if (idx == 0) { // Note: don't write to meta in this case -- the block can be huge. continue; } *meta = 0; has_something = true; while (idx != 0) { if (idx & kFlagBlock) { block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask); break; } else if (idx & kFlagSync) { DCHECK(idx & kFlagSync); SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); u32 next = s->next; s->Reset(proc); sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask); idx = next; } else { CHECK(0); } } } return has_something; } // ResetRange removes all meta objects from the range. // It is called for large mmap-ed regions. The function is best-effort wrt // freeing of meta objects, because we don't want to page in the whole range // which can be huge. The function probes pages one-by-one until it finds a page // without meta objects, at this point it stops freeing meta objects. Because // thread stacks grow top-down, we do the same starting from end as well. void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) { if (SANITIZER_GO) { // UnmapOrDie/MmapFixedNoReserve does not work on Windows, // so we do the optimization only for C/C++. FreeRange(proc, p, sz); return; } const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; const uptr kPageSize = GetPageSizeCached() * kMetaRatio; if (sz <= 4 * kPageSize) { // If the range is small, just do the normal free procedure. FreeRange(proc, p, sz); return; } // First, round both ends of the range to page size. uptr diff = RoundUp(p, kPageSize) - p; if (diff != 0) { FreeRange(proc, p, diff); p += diff; sz -= diff; } diff = p + sz - RoundDown(p + sz, kPageSize); if (diff != 0) { FreeRange(proc, p + sz - diff, diff); sz -= diff; } // Now we must have a non-empty page-aligned range. CHECK_GT(sz, 0); CHECK_EQ(p, RoundUp(p, kPageSize)); CHECK_EQ(sz, RoundUp(sz, kPageSize)); const uptr p0 = p; const uptr sz0 = sz; // Probe start of the range. for (uptr checked = 0; sz > 0; checked += kPageSize) { bool has_something = FreeRange(proc, p, kPageSize); p += kPageSize; sz -= kPageSize; if (!has_something && checked > (128 << 10)) break; } // Probe end of the range. for (uptr checked = 0; sz > 0; checked += kPageSize) { bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize); sz -= kPageSize; // Stacks grow down, so sync object are most likely at the end of the region // (if it is a stack). The very end of the stack is TLS and tsan increases // TLS by at least 256K, so check at least 512K. if (!has_something && checked > (512 << 10)) break; } // Finally, page out the whole range (including the parts that we've just // freed). Note: we can't simply madvise, because we need to leave a zeroed // range (otherwise __tsan_java_move can crash if it encounters a left-over // meta objects in java heap). uptr metap = (uptr)MemToMeta(p0); uptr metasz = sz0 / kMetaRatio; UnmapOrDie((void*)metap, metasz); MmapFixedNoReserve(metap, metasz); } MBlock* MetaMap::GetBlock(uptr p) { u32 *meta = MemToMeta(p); u32 idx = *meta; for (;;) { if (idx == 0) return 0; if (idx & kFlagBlock) return block_alloc_.Map(idx & ~kFlagMask); DCHECK(idx & kFlagSync); SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); idx = s->next; } } SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock) { return GetAndLock(thr, pc, addr, write_lock, true); } SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) { return GetAndLock(0, 0, addr, write_lock, false); } SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, bool create) { u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; SyncVar *mys = 0; for (;;) { u32 idx = idx0; for (;;) { if (idx == 0) break; if (idx & kFlagBlock) break; DCHECK(idx & kFlagSync); SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); if (s->addr == addr) { if (myidx != 0) { mys->Reset(thr->proc()); sync_alloc_.Free(&thr->proc()->sync_cache, myidx); } if (write_lock) s->mtx.Lock(); else s->mtx.ReadLock(); return s; } idx = s->next; } if (!create) return 0; if (*meta != idx0) { idx0 = *meta; continue; } if (myidx == 0) { const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache); mys = sync_alloc_.Map(myidx); mys->Init(thr, pc, addr, uid); } mys->next = idx0; if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, myidx | kFlagSync, memory_order_release)) { if (write_lock) mys->mtx.Lock(); else mys->mtx.ReadLock(); return mys; } } } void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { // src and dst can overlap, // there are no concurrent accesses to the regions (e.g. stop-the-world). CHECK_NE(src, dst); CHECK_NE(sz, 0); uptr diff = dst - src; u32 *src_meta = MemToMeta(src); u32 *dst_meta = MemToMeta(dst); u32 *src_meta_end = MemToMeta(src + sz); uptr inc = 1; if (dst > src) { src_meta = MemToMeta(src + sz) - 1; dst_meta = MemToMeta(dst + sz) - 1; src_meta_end = MemToMeta(src) - 1; inc = -1; } for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) { CHECK_EQ(*dst_meta, 0); u32 idx = *src_meta; *src_meta = 0; *dst_meta = idx; // Patch the addresses in sync objects. while (idx != 0) { if (idx & kFlagBlock) break; CHECK(idx & kFlagSync); SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); s->addr += diff; idx = s->next; } } } void MetaMap::OnProcIdle(Processor *proc) { block_alloc_.FlushCache(&proc->block_cache); sync_alloc_.FlushCache(&proc->sync_cache); } } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/tests/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/tsan/tests/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/tests/CMakeLists.txt (revision 320961) @@ -1,100 +1,101 @@ include_directories(../rtl) add_custom_target(TsanUnitTests) set_target_properties(TsanUnitTests PROPERTIES FOLDER "TSan unittests") set(TSAN_UNITTEST_CFLAGS ${TSAN_CFLAGS} ${COMPILER_RT_UNITTEST_CFLAGS} ${COMPILER_RT_GTEST_CFLAGS} -I${COMPILER_RT_SOURCE_DIR}/include -I${COMPILER_RT_SOURCE_DIR}/lib -I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl -DGTEST_HAS_RTTI=0) if(APPLE) list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS}) + list(APPEND TSAN_UNITTEST_LINKFLAGS ${DARWIN_osx_LINKFLAGS}) endif() set(TSAN_RTL_HEADERS) foreach (header ${TSAN_HEADERS}) list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header}) endforeach() # tsan_compile(obj_list, source, arch, {headers}) macro(tsan_compile obj_list source arch) get_filename_component(basename ${source} NAME) set(output_obj "${basename}.${arch}.o") get_target_flags_for_arch(${arch} TARGET_CFLAGS) set(COMPILE_DEPS ${TSAN_RTL_HEADERS} ${ARGN}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND COMPILE_DEPS gtest tsan) endif() clang_compile(${output_obj} ${source} CFLAGS ${TSAN_UNITTEST_CFLAGS} ${TARGET_CFLAGS} DEPS ${COMPILE_DEPS}) list(APPEND ${obj_list} ${output_obj}) endmacro() macro(add_tsan_unittest testname) set(TSAN_TEST_ARCH ${TSAN_SUPPORTED_ARCH}) if(APPLE) darwin_filter_host_archs(TSAN_SUPPORTED_ARCH TSAN_TEST_ARCH) endif() if(UNIX) foreach(arch ${TSAN_TEST_ARCH}) cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN}) set(TEST_OBJECTS) foreach(SOURCE ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}) tsan_compile(TEST_OBJECTS ${SOURCE} ${arch} ${TEST_HEADERS}) endforeach() get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS) set(TEST_DEPS ${TEST_OBJECTS}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND TEST_DEPS tsan) endif() if(NOT APPLE) # FIXME: Looks like we should link TSan with just-built runtime, # and not rely on -fsanitize=thread, as these tests are essentially # unit tests. add_compiler_rt_test(TsanUnitTests ${testname} OBJECTS ${TEST_OBJECTS} DEPS ${TEST_DEPS} LINK_FLAGS ${TARGET_LINK_FLAGS} -fsanitize=thread -lstdc++ -lm) else() set(TSAN_TEST_RUNTIME_OBJECTS $ $ $ $ $) set(TSAN_TEST_RUNTIME RTTsanTest.${testname}.${arch}) add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS}) set_target_properties(${TSAN_TEST_RUNTIME} PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) list(APPEND TEST_OBJECTS lib${TSAN_TEST_RUNTIME}.a) list(APPEND TEST_DEPS ${TSAN_TEST_RUNTIME}) add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS) add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS) # Intentionally do *not* link with `-fsanitize=thread`. We already link # against a static version of the runtime, and we don't want the dynamic # one. add_compiler_rt_test(TsanUnitTests "${testname}-${arch}-Test" OBJECTS ${TEST_OBJECTS} DEPS ${TEST_DEPS} LINK_FLAGS ${TARGET_LINK_FLAGS} ${DARWIN_osx_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} -lc++) endif() endforeach() endif() endmacro() if(COMPILER_RT_CAN_EXECUTE_TESTS) add_subdirectory(rtl) add_subdirectory(unit) endif() Index: vendor/compiler-rt/dist/lib/tsan/tests/unit/tsan_clock_test.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/tests/unit/tsan_clock_test.cc (revision 320960) +++ vendor/compiler-rt/dist/lib/tsan/tests/unit/tsan_clock_test.cc (revision 320961) @@ -1,433 +1,471 @@ //===-- tsan_clock_test.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_clock.h" #include "tsan_rtl.h" #include "gtest/gtest.h" #include #include namespace __tsan { ClockCache cache; TEST(Clock, VectorBasic) { ThreadClock clk(0); ASSERT_EQ(clk.size(), 1U); clk.tick(); ASSERT_EQ(clk.size(), 1U); ASSERT_EQ(clk.get(0), 1U); - clk.set(3, clk.get(3) + 1); + clk.set(&cache, 3, clk.get(3) + 1); ASSERT_EQ(clk.size(), 4U); ASSERT_EQ(clk.get(0), 1U); ASSERT_EQ(clk.get(1), 0U); ASSERT_EQ(clk.get(2), 0U); ASSERT_EQ(clk.get(3), 1U); - clk.set(3, clk.get(3) + 1); + clk.set(&cache, 3, clk.get(3) + 1); ASSERT_EQ(clk.get(3), 2U); } TEST(Clock, ChunkedBasic) { ThreadClock vector(0); SyncClock chunked; ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 0U); vector.acquire(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 0U); vector.release(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 1U); vector.acq_rel(&cache, &chunked); ASSERT_EQ(vector.size(), 1U); ASSERT_EQ(chunked.size(), 1U); chunked.Reset(&cache); } TEST(Clock, AcquireRelease) { ThreadClock vector1(100); vector1.tick(); SyncClock chunked; vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 101U); ThreadClock vector2(0); vector2.acquire(&cache, &chunked); ASSERT_EQ(vector2.size(), 101U); ASSERT_EQ(vector2.get(0), 0U); ASSERT_EQ(vector2.get(1), 0U); ASSERT_EQ(vector2.get(99), 0U); ASSERT_EQ(vector2.get(100), 1U); chunked.Reset(&cache); } TEST(Clock, RepeatedAcquire) { ThreadClock thr1(1); thr1.tick(); ThreadClock thr2(2); thr2.tick(); SyncClock sync; thr1.ReleaseStore(&cache, &sync); thr2.acquire(&cache, &sync); thr2.acquire(&cache, &sync); sync.Reset(&cache); } TEST(Clock, ManyThreads) { SyncClock chunked; - for (unsigned i = 0; i < 100; i++) { + for (unsigned i = 0; i < 200; i++) { ThreadClock vector(0); vector.tick(); - vector.set(i, 1); + vector.set(&cache, i, i + 1); vector.release(&cache, &chunked); ASSERT_EQ(i + 1, chunked.size()); vector.acquire(&cache, &chunked); ASSERT_EQ(i + 1, vector.size()); } - for (unsigned i = 0; i < 100; i++) - ASSERT_EQ(1U, chunked.get(i)); + for (unsigned i = 0; i < 200; i++) { + printf("i=%d\n", i); + ASSERT_EQ(i + 1, chunked.get(i)); + } ThreadClock vector(1); vector.acquire(&cache, &chunked); - ASSERT_EQ(100U, vector.size()); - for (unsigned i = 0; i < 100; i++) - ASSERT_EQ(1U, vector.get(i)); + ASSERT_EQ(200U, vector.size()); + for (unsigned i = 0; i < 200; i++) + ASSERT_EQ(i + 1, vector.get(i)); chunked.Reset(&cache); } TEST(Clock, DifferentSizes) { { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(20); vector2.tick(); { SyncClock chunked; vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 11U); vector2.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector2.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); vector1.release(&cache, &chunked); ASSERT_EQ(chunked.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector1.release(&cache, &chunked); vector2.acquire(&cache, &chunked); ASSERT_EQ(vector2.size(), 21U); chunked.Reset(&cache); } { SyncClock chunked; vector2.release(&cache, &chunked); vector1.acquire(&cache, &chunked); ASSERT_EQ(vector1.size(), 21U); chunked.Reset(&cache); } } } TEST(Clock, Growth) { { ThreadClock vector(10); vector.tick(); - vector.set(5, 42); + vector.set(&cache, 5, 42); SyncClock sync; vector.release(&cache, &sync); ASSERT_EQ(sync.size(), 11U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(1), 0ULL); ASSERT_EQ(sync.get(5), 42ULL); ASSERT_EQ(sync.get(9), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); sync.Reset(&cache); } { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(20); vector2.tick(); SyncClock sync; vector1.release(&cache, &sync); vector2.release(&cache, &sync); ASSERT_EQ(sync.size(), 21U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); ASSERT_EQ(sync.get(19), 0ULL); ASSERT_EQ(sync.get(20), 1ULL); sync.Reset(&cache); } { ThreadClock vector(100); vector.tick(); - vector.set(5, 42); - vector.set(90, 84); + vector.set(&cache, 5, 42); + vector.set(&cache, 90, 84); SyncClock sync; vector.release(&cache, &sync); ASSERT_EQ(sync.size(), 101U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(1), 0ULL); ASSERT_EQ(sync.get(5), 42ULL); ASSERT_EQ(sync.get(60), 0ULL); ASSERT_EQ(sync.get(70), 0ULL); ASSERT_EQ(sync.get(90), 84ULL); ASSERT_EQ(sync.get(99), 0ULL); ASSERT_EQ(sync.get(100), 1ULL); sync.Reset(&cache); } { ThreadClock vector1(10); vector1.tick(); ThreadClock vector2(100); vector2.tick(); SyncClock sync; vector1.release(&cache, &sync); vector2.release(&cache, &sync); ASSERT_EQ(sync.size(), 101U); ASSERT_EQ(sync.get(0), 0ULL); ASSERT_EQ(sync.get(10), 1ULL); ASSERT_EQ(sync.get(99), 0ULL); ASSERT_EQ(sync.get(100), 1ULL); sync.Reset(&cache); + } +} + +TEST(Clock, Growth2) { + // Test clock growth for every pair of sizes: + const uptr sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66, 100, 124, 125, 126, + 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254, 255}; + const uptr n = sizeof(sizes) / sizeof(sizes[0]); + for (uptr fi = 0; fi < n; fi++) { + for (uptr ti = fi + 1; ti < n; ti++) { + const uptr from = sizes[fi]; + const uptr to = sizes[ti]; + SyncClock sync; + ThreadClock vector(0); + for (uptr i = 0; i < from; i++) + vector.set(&cache, i, i + 1); + if (from != 0) + vector.release(&cache, &sync); + ASSERT_EQ(sync.size(), from); + for (uptr i = 0; i < from; i++) + ASSERT_EQ(sync.get(i), i + 1); + for (uptr i = 0; i < to; i++) + vector.set(&cache, i, i + 1); + vector.release(&cache, &sync); + ASSERT_EQ(sync.size(), to); + for (uptr i = 0; i < to; i++) + ASSERT_EQ(sync.get(i), i + 1); + vector.set(&cache, to + 1, to + 1); + vector.release(&cache, &sync); + ASSERT_EQ(sync.size(), to + 2); + for (uptr i = 0; i < to; i++) + ASSERT_EQ(sync.get(i), i + 1); + ASSERT_EQ(sync.get(to), 0U); + ASSERT_EQ(sync.get(to + 1), to + 1); + sync.Reset(&cache); + } } } const uptr kThreads = 4; const uptr kClocks = 4; // SimpleSyncClock and SimpleThreadClock implement the same thing as // SyncClock and ThreadClock, but in a very simple way. struct SimpleSyncClock { u64 clock[kThreads]; uptr size; SimpleSyncClock() { Reset(); } void Reset() { size = 0; for (uptr i = 0; i < kThreads; i++) clock[i] = 0; } bool verify(const SyncClock *other) const { for (uptr i = 0; i < min(size, other->size()); i++) { if (clock[i] != other->get(i)) return false; } for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) { if (i < size && clock[i] != 0) return false; if (i < other->size() && other->get(i) != 0) return false; } return true; } }; struct SimpleThreadClock { u64 clock[kThreads]; uptr size; unsigned tid; explicit SimpleThreadClock(unsigned tid) { this->tid = tid; size = tid + 1; for (uptr i = 0; i < kThreads; i++) clock[i] = 0; } void tick() { clock[tid]++; } void acquire(const SimpleSyncClock *src) { if (size < src->size) size = src->size; for (uptr i = 0; i < kThreads; i++) clock[i] = max(clock[i], src->clock[i]); } void release(SimpleSyncClock *dst) const { if (dst->size < size) dst->size = size; for (uptr i = 0; i < kThreads; i++) dst->clock[i] = max(dst->clock[i], clock[i]); } void acq_rel(SimpleSyncClock *dst) { acquire(dst); release(dst); } void ReleaseStore(SimpleSyncClock *dst) const { if (dst->size < size) dst->size = size; for (uptr i = 0; i < kThreads; i++) dst->clock[i] = clock[i]; } bool verify(const ThreadClock *other) const { for (uptr i = 0; i < min(size, other->size()); i++) { if (clock[i] != other->get(i)) return false; } for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) { if (i < size && clock[i] != 0) return false; if (i < other->size() && other->get(i) != 0) return false; } return true; } }; static bool ClockFuzzer(bool printing) { // Create kThreads thread clocks. SimpleThreadClock *thr0[kThreads]; ThreadClock *thr1[kThreads]; unsigned reused[kThreads]; for (unsigned i = 0; i < kThreads; i++) { reused[i] = 0; thr0[i] = new SimpleThreadClock(i); thr1[i] = new ThreadClock(i, reused[i]); } // Create kClocks sync clocks. SimpleSyncClock *sync0[kClocks]; SyncClock *sync1[kClocks]; for (unsigned i = 0; i < kClocks; i++) { sync0[i] = new SimpleSyncClock(); sync1[i] = new SyncClock(); } // Do N random operations (acquire, release, etc) and compare results // for SimpleThread/SyncClock and real Thread/SyncClock. for (int i = 0; i < 10000; i++) { unsigned tid = rand() % kThreads; unsigned cid = rand() % kClocks; thr0[tid]->tick(); thr1[tid]->tick(); switch (rand() % 6) { case 0: if (printing) printf("acquire thr%d <- clk%d\n", tid, cid); thr0[tid]->acquire(sync0[cid]); thr1[tid]->acquire(&cache, sync1[cid]); break; case 1: if (printing) printf("release thr%d -> clk%d\n", tid, cid); thr0[tid]->release(sync0[cid]); thr1[tid]->release(&cache, sync1[cid]); break; case 2: if (printing) printf("acq_rel thr%d <> clk%d\n", tid, cid); thr0[tid]->acq_rel(sync0[cid]); thr1[tid]->acq_rel(&cache, sync1[cid]); break; case 3: if (printing) printf("rel_str thr%d >> clk%d\n", tid, cid); thr0[tid]->ReleaseStore(sync0[cid]); thr1[tid]->ReleaseStore(&cache, sync1[cid]); break; case 4: if (printing) printf("reset clk%d\n", cid); sync0[cid]->Reset(); sync1[cid]->Reset(&cache); break; case 5: if (printing) printf("reset thr%d\n", tid); u64 epoch = thr0[tid]->clock[tid] + 1; reused[tid]++; delete thr0[tid]; thr0[tid] = new SimpleThreadClock(tid); thr0[tid]->clock[tid] = epoch; delete thr1[tid]; thr1[tid] = new ThreadClock(tid, reused[tid]); thr1[tid]->set(epoch); break; } if (printing) { for (unsigned i = 0; i < kThreads; i++) { printf("thr%d: ", i); thr1[i]->DebugDump(printf); printf("\n"); } for (unsigned i = 0; i < kClocks; i++) { printf("clk%d: ", i); sync1[i]->DebugDump(printf); printf("\n"); } printf("\n"); } if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) { if (!printing) return false; printf("differs with model:\n"); for (unsigned i = 0; i < kThreads; i++) { printf("thr%d: clock=[", i); for (uptr j = 0; j < thr0[i]->size; j++) printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]); printf("]\n"); } for (unsigned i = 0; i < kClocks; i++) { printf("clk%d: clock=[", i); for (uptr j = 0; j < sync0[i]->size; j++) printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]); printf("]\n"); } return false; } } for (unsigned i = 0; i < kClocks; i++) { sync1[i]->Reset(&cache); } return true; } TEST(Clock, Fuzzer) { struct timeval tv; gettimeofday(&tv, NULL); int seed = tv.tv_sec + tv.tv_usec; printf("seed=%d\n", seed); srand(seed); if (!ClockFuzzer(false)) { // Redo the test with the same seed, but logging operations. srand(seed); ClockFuzzer(true); ASSERT_TRUE(false); } } } // namespace __tsan Index: vendor/compiler-rt/dist/test/asan/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/test/asan/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/test/asan/CMakeLists.txt (revision 320961) @@ -1,178 +1,187 @@ set(ASAN_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(ASAN_TESTSUITES) set(ASAN_DYNAMIC_TESTSUITES) # Before Windows 8 (CMAKE_SYSTEM_VERSION 6.2), reserving large regions of shadow # memory allocated physical memory for page tables, which made it very # unreliable. Remove the asan tests from check-all in this configuration. set(SHADOW_MAPPING_UNRELIABLE FALSE) if(OS_NAME MATCHES "Windows" AND CMAKE_SIZEOF_VOID_P EQUAL 8 AND ${CMAKE_SYSTEM_VERSION} LESS 6.2) set(SHADOW_MAPPING_UNRELIABLE TRUE) message(WARNING "Disabling ASan tests because they are unreliable on Windows 7 and earlier") endif() if (SHADOW_MAPPING_UNRELIABLE) set(EXCLUDE_FROM_ALL TRUE) endif() macro(get_bits_for_arch arch bits) if (${arch} MATCHES "i386|i686|arm|mips|mipsel") set(${bits} 32) elseif (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|mips64|mips64el|s390x") set(${bits} 64) else() message(FATAL_ERROR "Unknown target architecture: ${arch}") endif() endmacro() set(ASAN_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND ASAN_TEST_DEPS asan) if(NOT APPLE AND COMPILER_RT_HAS_LLD) list(APPEND ASAN_TEST_DEPS lld ) endif() endif() set(ASAN_DYNAMIC_TEST_DEPS ${ASAN_TEST_DEPS}) set(ASAN_TEST_ARCH ${ASAN_SUPPORTED_ARCH}) if(APPLE) darwin_filter_host_archs(ASAN_SUPPORTED_ARCH ASAN_TEST_ARCH) endif() foreach(arch ${ASAN_TEST_ARCH}) if(ANDROID) set(ASAN_TEST_TARGET_ARCH ${arch}-android) else() set(ASAN_TEST_TARGET_ARCH ${arch}) endif() set(ASAN_TEST_IOS "0") pythonize_bool(ASAN_TEST_IOS) set(ASAN_TEST_IOSSIM "0") pythonize_bool(ASAN_TEST_IOSSIM) string(TOLOWER "-${arch}-${OS_NAME}" ASAN_TEST_CONFIG_SUFFIX) get_bits_for_arch(${arch} ASAN_TEST_BITS) get_test_cc_for_arch(${arch} ASAN_TEST_TARGET_CC ASAN_TEST_TARGET_CFLAGS) if(ANDROID) set(ASAN_TEST_DYNAMIC True) else() set(ASAN_TEST_DYNAMIC False) endif() string(TOUPPER ${arch} ARCH_UPPER_CASE) set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}Config) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg ) list(APPEND ASAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) string(TOLOWER "-${arch}-${OS_NAME}-dynamic" ASAN_TEST_CONFIG_SUFFIX) set(ASAN_TEST_DYNAMIC True) set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}DynamicConfig) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg) list(APPEND ASAN_DYNAMIC_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}) endif() endforeach() # iOS and iOS simulator test suites -# These are not added into "check-all", in order to run these tests, you have to -# manually call (from the build directory). They also require that an extra env +# These are not added into "check-all", in order to run these tests, use +# "check-asan-iossim-x86_64" and similar. They also require that an extra env # variable to select which iOS device or simulator to use, e.g.: -# $ SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER=BBE44C1C-8AAA-4000-8D06-91C89ED58172 -# $ ./bin/llvm-lit ./projects/compiler-rt/test/asan/IOSSimI386Config +# SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER="iPhone 6" if(APPLE) + set(EXCLUDE_FROM_ALL ON) + set(ASAN_TEST_TARGET_CC ${COMPILER_RT_TEST_COMPILER}) set(ASAN_TEST_IOS "1") pythonize_bool(ASAN_TEST_IOS) set(ASAN_TEST_DYNAMIC True) foreach(arch ${DARWIN_iossim_ARCHS}) set(ASAN_TEST_IOSSIM "1") pythonize_bool(ASAN_TEST_IOSSIM) set(ASAN_TEST_TARGET_ARCH ${arch}) set(ASAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_iossim_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") set(ASAN_TEST_CONFIG_SUFFIX "-${arch}-iossim") get_bits_for_arch(${arch} ASAN_TEST_BITS) string(TOUPPER ${arch} ARCH_UPPER_CASE) set(CONFIG_NAME "IOSSim${ARCH_UPPER_CASE}Config") configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg ) + add_lit_testsuite(check-asan-iossim-${arch} "AddressSanitizer iOS Simulator ${arch} tests" + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/ + DEPENDS ${ASAN_TEST_DEPS}) endforeach() foreach (arch ${DARWIN_ios_ARCHS}) set(ASAN_TEST_IOSSIM "0") pythonize_bool(ASAN_TEST_IOSSIM) set(ASAN_TEST_TARGET_ARCH ${arch}) set(ASAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_ios_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") set(ASAN_TEST_CONFIG_SUFFIX "-${arch}-ios") get_bits_for_arch(${arch} ASAN_TEST_BITS) string(TOUPPER ${arch} ARCH_UPPER_CASE) set(CONFIG_NAME "IOS${ARCH_UPPER_CASE}Config") configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg ) + add_lit_testsuite(check-asan-ios-${arch} "AddressSanitizer iOS ${arch} tests" + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/ + DEPENDS ${ASAN_TEST_DEPS}) endforeach() + + set(EXCLUDE_FROM_ALL OFF) endif() # Add unit tests. if(COMPILER_RT_INCLUDE_TESTS) set(ASAN_TEST_DYNAMIC False) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) set(ASAN_TEST_DYNAMIC True) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/Unit/dynamic/lit.site.cfg) endif() # FIXME: support unit test in the android test runner if (NOT ANDROID) list(APPEND ASAN_TEST_DEPS AsanUnitTests) list(APPEND ASAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/Unit) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) list(APPEND ASAN_DYNAMIC_TEST_DEPS AsanDynamicUnitTests) list(APPEND ASAN_DYNAMIC_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/Unit/dynamic) endif() endif() endif() add_lit_testsuite(check-asan "Running the AddressSanitizer tests" ${ASAN_TESTSUITES} DEPENDS ${ASAN_TEST_DEPS}) set_target_properties(check-asan PROPERTIES FOLDER "Compiler-RT Misc") if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) # Add check-dynamic-asan target. It is a part of check-all only on Windows, # where we want to always test both dynamic and static runtime. if(NOT OS_NAME MATCHES "Windows") set(EXCLUDE_FROM_ALL TRUE) endif() add_lit_testsuite(check-asan-dynamic "Running the AddressSanitizer tests with dynamic runtime" ${ASAN_DYNAMIC_TESTSUITES} DEPENDS ${ASAN_DYNAMIC_TEST_DEPS}) set_target_properties(check-asan-dynamic PROPERTIES FOLDER "Compiler-RT Misc") if(NOT OS_NAME MATCHES "Windows") set(EXCLUDE_FROM_ALL FALSE) endif() endif() # Reset EXCLUDE_FROM_ALL to its initial value. if (SHADOW_MAPPING_UNRELIABLE) set(EXCLUDE_FROM_ALL FALSE) endif() Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/allow_user_segv.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/allow_user_segv.cc (revision 320960) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/allow_user_segv.cc (revision 320961) @@ -1,88 +1,87 @@ // Regression test for // https://code.google.com/p/address-sanitizer/issues/detail?id=180 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 -// RUN: %clangxx_asan -O2 %s -o %t && %env_asan_opts=handle_segv=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 +// clang-format off +// RUN: %clangxx_asan -O0 %s -o %t -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 -// RUN: %clangxx_asan -O2 %s -o %t && %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 +// RUN: %env_asan_opts=handle_segv=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 +// RUN: %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 +// RUN: %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 -// RUN: %clangxx_asan -O2 %s -o %t && %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 +// RUN: %env_asan_opts=handle_segv=0:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 +// RUN: %env_asan_opts=handle_segv=1:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 +// RUN: %env_asan_opts=handle_segv=2:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=0:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=1:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 - -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=0:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=1:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 -// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 +// RUN: %env_asan_opts=handle_segv=0:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0 +// RUN: %env_asan_opts=handle_segv=1:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 +// RUN: %env_asan_opts=handle_segv=2:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 +// clang-format on #include #include #include struct sigaction original_sigaction_sigbus; struct sigaction original_sigaction_sigsegv; void User_OnSIGSEGV(int signum, siginfo_t *siginfo, void *context) { fprintf(stderr, "User sigaction called\n"); struct sigaction original_sigaction; if (signum == SIGBUS) original_sigaction = original_sigaction_sigbus; else if (signum == SIGSEGV) original_sigaction = original_sigaction_sigsegv; else { printf("Invalid signum"); exit(1); } if (original_sigaction.sa_flags | SA_SIGINFO) { if (original_sigaction.sa_sigaction) original_sigaction.sa_sigaction(signum, siginfo, context); } else { if (original_sigaction.sa_handler) original_sigaction.sa_handler(signum); } exit(1); } int DoSEGV() { volatile int *x = 0; return *x; } bool InstallHandler(int signum, struct sigaction *original_sigaction) { struct sigaction user_sigaction; user_sigaction.sa_sigaction = User_OnSIGSEGV; user_sigaction.sa_flags = SA_SIGINFO; if (sigaction(signum, &user_sigaction, original_sigaction)) { perror("sigaction"); return false; } return true; } int main() { // Let's install handlers for both SIGSEGV and SIGBUS, since pre-Yosemite // 32-bit Darwin triggers SIGBUS instead. if (InstallHandler(SIGSEGV, &original_sigaction_sigsegv) && InstallHandler(SIGBUS, &original_sigaction_sigbus)) { fprintf(stderr, "User sigaction installed\n"); } return DoSEGV(); } // CHECK0-NOT: ASAN:DEADLYSIGNAL // CHECK0-NOT: AddressSanitizer: SEGV on unknown address // CHECK0: User sigaction installed // CHECK0-NEXT: User sigaction called // CHECK1: User sigaction installed // CHECK1-NEXT: User sigaction called // CHECK1-NEXT: ASAN:DEADLYSIGNAL // CHECK1: AddressSanitizer: SEGV on unknown address // CHECK2-NOT: User sigaction called // CHECK2: User sigaction installed // CHECK2-NEXT: ASAN:DEADLYSIGNAL // CHECK2: AddressSanitizer: SEGV on unknown address Index: vendor/compiler-rt/dist/test/profile/Linux/counter_promo_nest.c =================================================================== --- vendor/compiler-rt/dist/test/profile/Linux/counter_promo_nest.c (nonexistent) +++ vendor/compiler-rt/dist/test/profile/Linux/counter_promo_nest.c (revision 320961) @@ -0,0 +1,48 @@ +// RUN: rm -fr %t.promo.prof +// RUN: rm -fr %t.nopromo.prof +// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen -O2 %s +// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen.ll -emit-llvm -S -O2 %s +// RUN: cat %t.promo.gen.ll | FileCheck --check-prefix=PROMO %s +// RUN: %run %t.promo.gen +// RUN: llvm-profdata merge -o %t.promo.profdata %t.promo.prof/ +// RUN: llvm-profdata show --counts --all-functions %t.promo.profdata > %t.promo.dump +// RUN: %clang_pgogen=%t.nopromo.prof/ -mllvm -do-counter-promotion=false -o %t.nopromo.gen -O2 %s +// RUN: %run %t.nopromo.gen +// RUN: llvm-profdata merge -o %t.nopromo.profdata %t.nopromo.prof/ +// RUN: llvm-profdata show --counts --all-functions %t.nopromo.profdata > %t.nopromo.dump +// RUN: diff %t.promo.profdata %t.nopromo.profdata +int g; +__attribute__((noinline)) void bar() { + g++; +} + +extern int printf(const char*,...); + +int c = 10; + +int main() +// PROMO-LABEL: @main +// PROMO: load{{.*}}@__profc_main{{.*}} +// PROMO-NEXT: add +// PROMO-NEXT: store{{.*}}@__profc_main{{.*}} +// PROMO-NEXT: load{{.*}}@__profc_main{{.*}} +// PROMO-NEXT: add +// PROMO-NEXT: store{{.*}}@__profc_main{{.*}} +{ + int i, j, k; + + g = 0; + for (i = 0; i < c; i++) + for (j = 0; j < c; j++) + for (k = 0; k < c; k++) + bar(); + + for (i = 0; i < c; i++) + for (j = 0; j < 10*c;j++) + bar(); + + for (i = 0; i < 100*c; i++) + bar(); + + return 0; +} Property changes on: vendor/compiler-rt/dist/test/profile/Linux/counter_promo_nest.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py =================================================================== --- vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py (revision 320960) +++ vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py (revision 320961) @@ -1,17 +1,18 @@ #!/usr/bin/python import os, sys, subprocess if not "SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER" in os.environ: raise EnvironmentError("Specify SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER to select which simulator to use.") device_id = os.environ["SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER"] -if "ASAN_OPTIONS" in os.environ: - os.environ["SIMCTL_CHILD_ASAN_OPTIONS"] = os.environ["ASAN_OPTIONS"] +for e in ["ASAN_OPTIONS", "TSAN_OPTIONS"]: + if e in os.environ: + os.environ["SIMCTL_CHILD_" + e] = os.environ[e] exitcode = subprocess.call(["xcrun", "simctl", "spawn", device_id] + sys.argv[1:]) if exitcode > 125: exitcode = 126 sys.exit(exitcode) Index: vendor/compiler-rt/dist/test/tsan/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/test/tsan/CMakeLists.txt (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/CMakeLists.txt (revision 320961) @@ -1,49 +1,103 @@ +set(TSAN_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + set(TSAN_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) if(${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "x86_64") list(APPEND TSAN_TEST_DEPS GotsanRuntimeCheck) endif() if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND TSAN_TEST_DEPS tsan) endif() if(COMPILER_RT_HAS_LIBCXX_SOURCES AND COMPILER_RT_TEST_COMPILER_ID STREQUAL "Clang" AND NOT APPLE) list(APPEND TSAN_TEST_DEPS libcxx_tsan) set(TSAN_HAS_LIBCXX True) else() set(TSAN_HAS_LIBCXX False) endif() set(TSAN_TESTSUITES) set(TSAN_TEST_ARCH ${TSAN_SUPPORTED_ARCH}) if(APPLE) darwin_filter_host_archs(TSAN_SUPPORTED_ARCH TSAN_TEST_ARCH) endif() foreach(arch ${TSAN_TEST_ARCH}) + set(TSAN_TEST_IOS "0") + pythonize_bool(TSAN_TEST_IOS) + set(TSAN_TEST_IOSSIM "0") + pythonize_bool(TSAN_TEST_IOSSIM) + set(TSAN_TEST_TARGET_ARCH ${arch}) string(TOLOWER "-${arch}" TSAN_TEST_CONFIG_SUFFIX) get_test_cc_for_arch(${arch} TSAN_TEST_TARGET_CC TSAN_TEST_TARGET_CFLAGS) string(TOUPPER ${arch} ARCH_UPPER_CASE) set(CONFIG_NAME ${ARCH_UPPER_CASE}Config) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg) list(APPEND TSAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}) endforeach() + +# iOS and iOS simulator test suites +# These are not added into "check-all", in order to run these tests, use +# "check-tsan-iossim-x86_64" and similar. They also require an extra environment +# variable to select which iOS device or simulator to use, e.g.: +# SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER="iPhone 6" +if(APPLE) + set(EXCLUDE_FROM_ALL ON) + + set(TSAN_TEST_TARGET_CC ${COMPILER_RT_TEST_COMPILER}) + set(TSAN_TEST_IOS "1") + pythonize_bool(TSAN_TEST_IOS) + + set(arch "x86_64") + set(TSAN_TEST_IOSSIM "1") + pythonize_bool(TSAN_TEST_IOSSIM) + set(TSAN_TEST_TARGET_ARCH ${arch}) + set(TSAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_iossim_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") + set(TSAN_TEST_CONFIG_SUFFIX "-${arch}-iossim") + string(TOUPPER ${arch} ARCH_UPPER_CASE) + set(CONFIG_NAME "IOSSim${ARCH_UPPER_CASE}Config") + configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg + ) + add_lit_testsuite(check-tsan-iossim-${arch} "ThreadSanitizer iOS Simulator ${arch} tests" + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/ + DEPENDS ${TSAN_TEST_DEPS}) + + set(arch "arm64") + set(TSAN_TEST_IOSSIM "0") + pythonize_bool(TSAN_TEST_IOSSIM) + set(TSAN_TEST_TARGET_ARCH ${arch}) + set(TSAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_ios_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") + set(TSAN_TEST_CONFIG_SUFFIX "-${arch}-ios") + string(TOUPPER ${arch} ARCH_UPPER_CASE) + set(CONFIG_NAME "IOS${ARCH_UPPER_CASE}Config") + configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg + ) + add_lit_testsuite(check-tsan-ios-${arch} "ThreadSanitizer iOS Simulator ${arch} tests" + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/ + DEPENDS ${TSAN_TEST_DEPS}) + + set(EXCLUDE_FROM_ALL OFF) +endif() if(COMPILER_RT_INCLUDE_TESTS) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg) list(APPEND TSAN_TEST_DEPS TsanUnitTests) list(APPEND TSAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/Unit) endif() add_lit_testsuite(check-tsan "Running ThreadSanitizer tests" ${TSAN_TESTSUITES} DEPENDS ${TSAN_TEST_DEPS}) set_target_properties(check-tsan PROPERTIES FOLDER "Compiler-RT Tests") Index: vendor/compiler-rt/dist/test/tsan/Darwin/dlopen.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/dlopen.cc (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/dlopen.cc (revision 320961) @@ -1,41 +1,43 @@ // Checks that on OS X 10.11+ (where we do not re-exec anymore, because // interceptors work automatically), dlopen'ing a TSanified library from a // non-instrumented program exits with a user-friendly message. // REQUIRES: osx-autointerception +// XFAIL: ios + // RUN: %clangxx_tsan %s -o %t.so -shared -DSHARED_LIB // RUN: %clangxx_tsan -fno-sanitize=thread %s -o %t // RUN: TSAN_DYLIB_PATH=`%clangxx_tsan %s -### 2>&1 \ // RUN: | grep "libclang_rt.tsan_osx_dynamic.dylib" \ // RUN: | sed -e 's/.*"\(.*libclang_rt.tsan_osx_dynamic.dylib\)".*/\1/'` // Launching a non-instrumented binary that dlopen's an instrumented library should fail. // RUN: not %run %t %t.so 2>&1 | FileCheck %s --check-prefix=CHECK-FAIL // Launching a non-instrumented binary with an explicit DYLD_INSERT_LIBRARIES should work. // RUN: DYLD_INSERT_LIBRARIES=$TSAN_DYLIB_PATH %run %t %t.so 2>&1 | FileCheck %s #include #include #include #if defined(SHARED_LIB) extern "C" void foo() { fprintf(stderr, "Hello world.\n"); } #else // defined(SHARED_LIB) int main(int argc, char *argv[]) { void *handle = dlopen(argv[1], RTLD_NOW); fprintf(stderr, "handle = %p\n", handle); void (*foo)() = (void (*)())dlsym(handle, "foo"); fprintf(stderr, "foo = %p\n", foo); foo(); } #endif // defined(SHARED_LIB) // CHECK: Hello world. // CHECK-NOT: ERROR: Interceptors are not working. // CHECK-FAIL-NOT: Hello world. // CHECK-FAIL: ERROR: Interceptors are not working. Index: vendor/compiler-rt/dist/test/tsan/Darwin/ignore-noninstrumented.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/ignore-noninstrumented.mm (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/ignore-noninstrumented.mm (revision 320961) @@ -1,53 +1,53 @@ -// Check that ignore_noninstrumented_modules=1 supresses races from system libraries on OS X. +// Check that ignore_noninstrumented_modules=1 suppresses races from system libraries on OS X. // RUN: %clang_tsan %s -o %t -framework Foundation // Check that without the flag, there are false positives. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=0 %deflake %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-RACE // With ignore_noninstrumented_modules=1, no races are reported. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=1 %run %t 2>&1 | FileCheck %s // With ignore_noninstrumented_modules=1, races in user's code are still reported. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=1 %deflake %run %t race 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-RACE #import #import "../test.h" char global_buf[64]; void *Thread1(void *x) { barrier_wait(&barrier); strcpy(global_buf, "hello world"); return NULL; } void *Thread2(void *x) { strcpy(global_buf, "world hello"); barrier_wait(&barrier); return NULL; } int main(int argc, char *argv[]) { fprintf(stderr, "Hello world.\n"); // NSUserDefaults uses XPC which triggers the false positive. NSDictionary *d = [[NSUserDefaults standardUserDefaults] dictionaryRepresentation]; fprintf(stderr, "d = %p\n", d); if (argc > 1 && strcmp(argv[1], "race") == 0) { barrier_init(&barrier, 2); pthread_t t[2]; pthread_create(&t[0], NULL, Thread1, NULL); pthread_create(&t[1], NULL, Thread2, NULL); pthread_join(t[0], NULL); pthread_join(t[1], NULL); } fprintf(stderr, "Done.\n"); } // CHECK: Hello world. // CHECK-RACE: SUMMARY: ThreadSanitizer: data race // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/ignored-interceptors.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/ignored-interceptors.mm (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/ignored-interceptors.mm (revision 320961) @@ -1,55 +1,55 @@ -// Check that ignore_interceptors_accesses=1 supresses reporting races from +// Check that ignore_interceptors_accesses=1 suppresses reporting races from // system libraries on OS X. There are currently false positives coming from // libxpc, libdispatch, CoreFoundation and others, because these libraries use // TSan-invisible atomics as synchronization. // RUN: %clang_tsan %s -o %t -framework Foundation // Check that without the flag, there are false positives. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=0 %deflake %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-RACE // With ignore_interceptors_accesses=1, no races are reported. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=0:ignore_interceptors_accesses=1 %run %t 2>&1 | FileCheck %s // With ignore_interceptors_accesses=1, races in user's code are still reported. // RUN: %env_tsan_opts=ignore_noninstrumented_modules=0:ignore_interceptors_accesses=1 %deflake %run %t race 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-RACE #import #import "../test.h" long global; void *Thread1(void *x) { barrier_wait(&barrier); global = 42; return NULL; } void *Thread2(void *x) { global = 43; barrier_wait(&barrier); return NULL; } int main(int argc, char *argv[]) { fprintf(stderr, "Hello world.\n"); // NSUserDefaults uses XPC which triggers the false positive. NSDictionary *d = [[NSUserDefaults standardUserDefaults] dictionaryRepresentation]; if (argc > 1 && strcmp(argv[1], "race") == 0) { barrier_init(&barrier, 2); pthread_t t[2]; pthread_create(&t[0], NULL, Thread1, NULL); pthread_create(&t[1], NULL, Thread2, NULL); pthread_join(t[0], NULL); pthread_join(t[1], NULL); } fprintf(stderr, "Done.\n"); } // CHECK: Hello world. // CHECK-RACE: SUMMARY: ThreadSanitizer: data race // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/osspinlock-norace.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/osspinlock-norace.cc (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/osspinlock-norace.cc (revision 320961) @@ -1,30 +1,34 @@ // RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s -#include #include +#include #include + +typedef int32_t OSSpinLock; +extern "C" void OSSpinLockLock(OSSpinLock *); +extern "C" void OSSpinLockUnlock(OSSpinLock *); int Global; OSSpinLock lock; void *Thread(void *x) { OSSpinLockLock(&lock); Global++; OSSpinLockUnlock(&lock); return NULL; } int main() { fprintf(stderr, "Hello world.\n"); pthread_t t[2]; pthread_create(&t[0], NULL, Thread, NULL); pthread_create(&t[1], NULL, Thread, NULL); pthread_join(t[0], NULL); pthread_join(t[1], NULL); fprintf(stderr, "Done.\n"); } // CHECK: Hello world. // CHECK: Done. // CHECK-NOT: WARNING: ThreadSanitizer Index: vendor/compiler-rt/dist/test/tsan/Darwin/signals-blocked.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/signals-blocked.cc (nonexistent) +++ vendor/compiler-rt/dist/test/tsan/Darwin/signals-blocked.cc (revision 320961) @@ -0,0 +1,75 @@ +// RUN: %clangxx_tsan %s -o %t && %run %t 2>&1 | FileCheck %s + +#include +#include +#include +#include +#include +#include +#include +#include + +volatile bool signal_delivered; + +static void handler(int sig) { + if (sig == SIGALRM) + signal_delivered = true; +} + +static void* thr(void *p) { + sigset_t sigset; + sigemptyset(&sigset); + sigaddset(&sigset, SIGALRM); + int ret = pthread_sigmask(SIG_UNBLOCK, &sigset, NULL); + if (ret) abort(); + + struct sigaction act = {}; + act.sa_handler = &handler; + if (sigaction(SIGALRM, &act, 0)) { + perror("sigaction"); + exit(1); + } + + itimerval t; + t.it_value.tv_sec = 0; + t.it_value.tv_usec = 10000; + t.it_interval = t.it_value; + if (setitimer(ITIMER_REAL, &t, 0)) { + perror("setitimer"); + exit(1); + } + + while (!signal_delivered) { + usleep(1000); + } + + t.it_value.tv_usec = 0; + if (setitimer(ITIMER_REAL, &t, 0)) { + perror("setitimer"); + exit(1); + } + + fprintf(stderr, "SIGNAL DELIVERED\n"); + + return 0; +} + +int main() { + sigset_t sigset; + sigemptyset(&sigset); + sigaddset(&sigset, SIGALRM); + int ret = pthread_sigmask(SIG_BLOCK, &sigset, NULL); + if (ret) abort(); + + pthread_t th; + pthread_create(&th, 0, thr, 0); + pthread_join(th, 0); + + fprintf(stderr, "DONE\n"); + return 0; +} + +// CHECK-NOT: WARNING: ThreadSanitizer: +// CHECK: SIGNAL DELIVERED +// CHECK: DONE +// CHECK-NOT: WARNING: ThreadSanitizer: Property changes on: vendor/compiler-rt/dist/test/tsan/Darwin/signals-blocked.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm (revision 320961) @@ -1,39 +1,39 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %run %t 2>&1 | FileCheck %s -// XFAIL: ios +// UNSUPPORTED: ios #import #import long global; int main(int argc, const char *argv[]) { fprintf(stderr, "Hello world.\n"); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); xpc_connection_t server_conn = xpc_connection_create(NULL, server_q); xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { global = 43; dispatch_async(dispatch_get_main_queue(), ^{ CFRunLoopStop(CFRunLoopGetCurrent()); }); } }); xpc_connection_resume(server_conn); global = 42; xpc_connection_cancel(server_conn); CFRunLoopRun(); fprintf(stderr, "Done.\n"); } // CHECK: Hello world. // CHECK-NOT: WARNING: ThreadSanitizer // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm (revision 320961) @@ -1,83 +1,83 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %deflake %run %t 2>&1 | FileCheck %s -// XFAIL: ios +// UNSUPPORTED: ios #import #import #import "../test.h" long global; long received_msgs; xpc_connection_t server_conn; xpc_connection_t client_conns[2]; int main(int argc, const char *argv[]) { @autoreleasepool { NSLog(@"Hello world."); barrier_init(&barrier, 2); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); server_conn = xpc_connection_create(NULL, server_q); xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { NSLog(@"server event handler, client = %@", client); if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { return; } xpc_connection_set_event_handler(client, ^(xpc_object_t object) { NSLog(@"received message: %@", object); barrier_wait(&barrier); global = 42; dispatch_sync(dispatch_get_main_queue(), ^{ received_msgs++; if (received_msgs >= 2) { xpc_connection_cancel(client_conns[0]); xpc_connection_cancel(client_conns[1]); xpc_connection_cancel(server_conn); CFRunLoopStop(CFRunLoopGetCurrent()); } }); }); xpc_connection_resume(client); }); xpc_connection_resume(server_conn); xpc_endpoint_t endpoint = xpc_endpoint_create(server_conn); for (int i = 0; i < 2; i++) { client_conns[i] = xpc_connection_create_from_endpoint(endpoint); xpc_connection_set_event_handler(client_conns[i], ^(xpc_object_t event) { NSLog(@"client event handler, event = %@", event); }); xpc_object_t msg = xpc_dictionary_create(NULL, NULL, 0); xpc_dictionary_set_string(msg, "hello", "world"); NSLog(@"sending message: %@", msg); xpc_connection_send_message(client_conns[i], msg); xpc_connection_resume(client_conns[i]); } CFRunLoopRun(); NSLog(@"Done."); } return 0; } // CHECK: Hello world. // CHECK: WARNING: ThreadSanitizer: data race // CHECK: Write of size 8 // CHECK: #0 {{.*}}xpc-race.mm:36 // CHECK: Previous write of size 8 // CHECK: #0 {{.*}}xpc-race.mm:36 // CHECK: Location is global 'global' // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm (revision 320961) @@ -1,76 +1,76 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %run %t 2>&1 | FileCheck %s -// XFAIL: ios +// UNSUPPORTED: ios #import #import long global; int main(int argc, const char *argv[]) { @autoreleasepool { NSLog(@"Hello world."); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); dispatch_queue_t client_q = dispatch_queue_create("client.queue", DISPATCH_QUEUE_CONCURRENT); xpc_connection_t server_conn = xpc_connection_create(NULL, server_q); global = 42; xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { NSLog(@"global = %ld", global); NSLog(@"server event handler, client = %@", client); if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { return; } xpc_connection_set_event_handler(client, ^(xpc_object_t object) { NSLog(@"received message: %@", object); xpc_object_t reply = xpc_dictionary_create_reply(object); if (!reply) return; xpc_dictionary_set_string(reply, "reply", "value"); xpc_connection_t remote = xpc_dictionary_get_remote_connection(object); xpc_connection_send_message(remote, reply); }); xpc_connection_resume(client); }); xpc_connection_resume(server_conn); xpc_endpoint_t endpoint = xpc_endpoint_create(server_conn); xpc_connection_t client_conn = xpc_connection_create_from_endpoint(endpoint); xpc_connection_set_event_handler(client_conn, ^(xpc_object_t event) { NSLog(@"client event handler, event = %@", event); }); xpc_object_t msg = xpc_dictionary_create(NULL, NULL, 0); xpc_dictionary_set_string(msg, "hello", "world"); NSLog(@"sending message: %@", msg); xpc_connection_send_message_with_reply( client_conn, msg, client_q, ^(xpc_object_t object) { NSLog(@"received reply: %@", object); xpc_connection_cancel(client_conn); xpc_connection_cancel(server_conn); dispatch_sync(dispatch_get_main_queue(), ^{ CFRunLoopStop(CFRunLoopGetCurrent()); }); }); xpc_connection_resume(client_conn); CFRunLoopRun(); NSLog(@"Done."); } return 0; } // CHECK: Done. // CHECK-NOT: WARNING: ThreadSanitizer Index: vendor/compiler-rt/dist/test/tsan/deep_stack1.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/deep_stack1.cc (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/deep_stack1.cc (revision 320961) @@ -1,50 +1,57 @@ // RUN: %clangxx_tsan -O1 %s -o %t -DORDER1 && %deflake %run %t | FileCheck %s // RUN: %clangxx_tsan -O1 %s -o %t -DORDER2 && %deflake %run %t | FileCheck %s #include "test.h" volatile int X; volatile int N; void (*volatile F)(); static void foo() { if (--N == 0) X = 42; else F(); } void *Thread(void *p) { #ifdef ORDER1 barrier_wait(&barrier); #endif F(); #ifdef ORDER2 barrier_wait(&barrier); #endif return 0; } +static size_t RoundUp(size_t n, size_t to) { + return ((n + to - 1) / to) * to; +} + int main() { barrier_init(&barrier, 2); N = 50000; F = foo; pthread_t t; pthread_attr_t a; pthread_attr_init(&a); - pthread_attr_setstacksize(&a, N * 256 + (1 << 20)); + size_t stack_size = N * 256 + (1 << 20); + stack_size = RoundUp(stack_size, 0x10000); // round the stack size to 64k + int ret = pthread_attr_setstacksize(&a, stack_size); + if (ret) abort(); pthread_create(&t, &a, Thread, 0); #ifdef ORDER2 barrier_wait(&barrier); #endif X = 43; #ifdef ORDER1 barrier_wait(&barrier); #endif pthread_join(t, 0); } // CHECK: WARNING: ThreadSanitizer: data race // CHECK: #100 foo // We must output suffucuently large stack (at least 100 frames) Index: vendor/compiler-rt/dist/test/tsan/ignore_lib0.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/ignore_lib0.cc (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/ignore_lib0.cc (revision 320961) @@ -1,33 +1,35 @@ // RUN: %clangxx_tsan -O1 %s -DLIB -fPIC -fno-sanitize=thread -shared -o %T/libignore_lib0.so // RUN: %clangxx_tsan -O1 %s -L%T -lignore_lib0 -o %t // RUN: echo running w/o suppressions: // RUN: env LD_LIBRARY_PATH=%T${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} %deflake %run %t | FileCheck %s --check-prefix=CHECK-NOSUPP // RUN: echo running with suppressions: // RUN: env LD_LIBRARY_PATH=%T${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} %env_tsan_opts=suppressions='%s.supp' %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-WITHSUPP // Tests that interceptors coming from a library specified in called_from_lib // suppression are ignored. // Some aarch64 kernels do not support non executable write pages // REQUIRES: stable-runtime +// UNSUPPORTED: ios + #ifndef LIB extern "C" void libfunc(); int main() { libfunc(); } #else // #ifdef LIB #include "ignore_lib_lib.h" #endif // #ifdef LIB // CHECK-NOSUPP: WARNING: ThreadSanitizer: data race // CHECK-NOSUPP: OK // CHECK-WITHSUPP-NOT: WARNING: ThreadSanitizer: data race // CHECK-WITHSUPP: OK Index: vendor/compiler-rt/dist/test/tsan/java_find.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/java_find.cc (nonexistent) +++ vendor/compiler-rt/dist/test/tsan/java_find.cc (revision 320961) @@ -0,0 +1,69 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s +#include "java.h" + +int const kHeapSize = 1024 * 1024; + +static void verify_find(jptr from, jptr to, jptr expected_addr, + jptr expected_size) { + jptr addr = from; + jptr size = __tsan_java_find(&addr, to); + if (expected_size) { + if (!size) { + fprintf(stderr, "FAILED: range: [%p..%p): found nothing\n", (void *)from, + (void *)to); + return; + } else if (expected_size != size) { + fprintf(stderr, "FAILED: range: [%p..%p): wrong size, %lu instead of %lu\n", + (void *)from, (void *)to, size, expected_size); + return; + } + } else if (size) { + fprintf(stderr, + "FAILED: range [%p..%p): did not expect to find anything here\n", + (void *)from, (void *)to); + return; + } else { + return; + } + if (expected_addr != addr) { + fprintf( + stderr, + "FAILED: range [%p..%p): expected to find object at %p, found at %p\n", + (void *)from, (void *)to, (void *)expected_addr, (void *)addr); + } +} + +int main() { + const jptr jheap = (jptr)malloc(kHeapSize + 8) + 8; + const jptr jheap_end = jheap + kHeapSize; + __tsan_java_init(jheap, kHeapSize); + const jptr addr1 = jheap; + const int size1 = 16; + __tsan_java_alloc(jheap, size1); + + const jptr addr2 = addr1 + size1; + const int size2 = 32; + __tsan_java_alloc(jheap + size1, size2); + + const jptr addr3 = addr2 + size2; + const int size3 = 1024; + __tsan_java_alloc(jheap + size1 + size2, size3); + + const jptr addr4 = addr3 + size3; + + verify_find(jheap, jheap_end, addr1, size1); + verify_find(jheap + 8, jheap_end, addr2, size2); + verify_find(addr2 + 8, jheap_end, addr3, size3); + verify_find(addr3 + 8, jheap_end, 0, 0); + + __tsan_java_move(addr2, addr4, size2); + verify_find(jheap + 8, jheap_end, addr3, size3); + verify_find(addr3 + 8, jheap_end, addr4, size2); + verify_find(addr4 + 8, jheap_end, 0, 0); + + fprintf(stderr, "DONE\n"); + return 0; +} + +// CHECK-NOT: FAILED +// CHECK: DONE Property changes on: vendor/compiler-rt/dist/test/tsan/java_find.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/tsan/lit.cfg =================================================================== --- vendor/compiler-rt/dist/test/tsan/lit.cfg (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/lit.cfg (revision 320961) @@ -1,92 +1,92 @@ # -*- Python -*- import os def get_required_attr(config, attr_name): attr_value = getattr(config, attr_name, None) if not attr_value: lit_config.fatal( "No attribute %r in test configuration! You may need to run " "tests from your build directory or add this attribute " "to lit.site.cfg " % attr_name) return attr_value # Setup config name. config.name = 'ThreadSanitizer' + config.name_suffix # Setup source root. config.test_source_root = os.path.dirname(__file__) # Setup environment variables for running ThreadSanitizer. default_tsan_opts = "atexit_sleep_ms=0" if config.host_os == 'Darwin': # On Darwin, we default to `abort_on_error=1`, which would make tests run # much slower. Let's override this and run lit tests with 'abort_on_error=0'. default_tsan_opts += ':abort_on_error=0' # On Darwin, we default to ignore_noninstrumented_modules=1, which also # suppresses some races the tests are supposed to find. Let's run without this # setting, but turn it back on for Darwin tests (see Darwin/lit.local.cfg). default_tsan_opts += ':ignore_noninstrumented_modules=0' # Platform-specific default TSAN_OPTIONS for lit tests. if default_tsan_opts: config.environment['TSAN_OPTIONS'] = default_tsan_opts default_tsan_opts += ':' config.substitutions.append(('%env_tsan_opts=', 'env TSAN_OPTIONS=' + default_tsan_opts)) # GCC driver doesn't add necessary compile/link flags with -fsanitize=thread. if config.compiler_id == 'GNU': extra_cflags = ["-fPIE", "-pthread", "-ldl", "-lstdc++", "-lrt", "-pie"] else: extra_cflags = [] tsan_incdir = config.test_source_root + "/../" # Setup default compiler flags used with -fsanitize=thread option. clang_tsan_cflags = (["-fsanitize=thread", "-Wall"] + [config.target_cflags] + config.debug_info_flags + extra_cflags + ["-I%s" % tsan_incdir]) clang_tsan_cxxflags = config.cxx_mode_flags + clang_tsan_cflags + ["-std=c++11"] + ["-I%s" % tsan_incdir] # Add additional flags if we're using instrumented libc++. # Instrumented libcxx currently not supported on Darwin. if config.has_libcxx and config.host_os != 'Darwin': # FIXME: Dehardcode this path somehow. libcxx_path = os.path.join(config.compiler_rt_obj_root, "lib", "tsan", "libcxx_tsan_" + config.target_arch) libcxx_incdir = os.path.join(libcxx_path, "include", "c++", "v1") libcxx_libdir = os.path.join(libcxx_path, "lib") libcxx_so = os.path.join(libcxx_libdir, "libc++.so") clang_tsan_cxxflags += ["-nostdinc++", "-I%s" % libcxx_incdir, libcxx_so, "-Wl,-rpath=%s" % libcxx_libdir] def build_invocation(compile_flags): - return " " + " ".join([config.clang] + compile_flags) + " " + return " " + " ".join([config.compile_wrapper, config.clang] + compile_flags) + " " config.substitutions.append( ("%clang_tsan ", build_invocation(clang_tsan_cflags)) ) config.substitutions.append( ("%clangxx_tsan ", build_invocation(clang_tsan_cxxflags)) ) # Define CHECK-%os to check for OS-dependent output. config.substitutions.append( ('CHECK-%os', ("CHECK-" + config.host_os))) config.substitutions.append( ("%deflake ", os.path.join(os.path.dirname(__file__), "deflake.bash") + " ")) # Default test suffixes. config.suffixes = ['.c', '.cc', '.cpp', '.m', '.mm'] # ThreadSanitizer tests are currently supported on FreeBSD, Linux and Darwin. if config.host_os not in ['FreeBSD', 'Linux', 'Darwin']: config.unsupported = True # Allow tests to use REQUIRES=stable-runtime. For use when you cannot use XFAIL # because the test hangs. if config.target_arch != 'aarch64': config.available_features.add('stable-runtime') if config.host_os == 'Darwin' and config.target_arch in ["x86_64", "x86_64h"]: config.parallelism_group = "darwin-64bit-sanitizer" Index: vendor/compiler-rt/dist/test/tsan/lit.site.cfg.in =================================================================== --- vendor/compiler-rt/dist/test/tsan/lit.site.cfg.in (revision 320960) +++ vendor/compiler-rt/dist/test/tsan/lit.site.cfg.in (revision 320961) @@ -1,12 +1,15 @@ @LIT_SITE_CFG_IN_HEADER@ config.name_suffix = "@TSAN_TEST_CONFIG_SUFFIX@" +config.tsan_lit_source_dir = "@TSAN_LIT_SOURCE_DIR@" config.has_libcxx = @TSAN_HAS_LIBCXX@ +config.ios = @TSAN_TEST_IOS_PYBOOL@ +config.iossim = @TSAN_TEST_IOSSIM_PYBOOL@ config.target_cflags = "@TSAN_TEST_TARGET_CFLAGS@" config.target_arch = "@TSAN_TEST_TARGET_ARCH@" # Load common config for all compiler-rt lit tests. lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") # Load tool-specific config that would do the real work. lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")