diff --git a/CMakeLists.txt b/CMakeLists.txt index 51769088a992..3152233587c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,271 +1,271 @@ # CMake build for CompilerRT. # # This build assumes that CompilerRT is checked out into the # 'projects/compiler-rt' or 'runtimes/compiler-rt' inside of an LLVM tree. # Standalone build system for CompilerRT is not yet ready. # # An important constraint of the build is that it only produces libraries # based on the ability of the host toolchain to target various platforms. +cmake_minimum_required(VERSION 3.4.3) + # Check if compiler-rt is built as a standalone project. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR COMPILER_RT_STANDALONE_BUILD) project(CompilerRT C CXX ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) endif() -cmake_minimum_required(VERSION 3.4.3) - # Add path for custom compiler-rt modules. list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_SOURCE_DIR}/cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules" ) include(base-config-ix) include(CompilerRTUtils) option(COMPILER_RT_BUILD_BUILTINS "Build builtins" ON) mark_as_advanced(COMPILER_RT_BUILD_BUILTINS) option(COMPILER_RT_BUILD_SANITIZERS "Build sanitizers" ON) mark_as_advanced(COMPILER_RT_BUILD_SANITIZERS) option(COMPILER_RT_BUILD_XRAY "Build xray" ON) mark_as_advanced(COMPILER_RT_BUILD_XRAY) set(COMPILER_RT_BAREMETAL_BUILD OFF CACHE BOOLEAN "Build for a bare-metal target.") if (COMPILER_RT_STANDALONE_BUILD) load_llvm_config() # Find Python interpreter. set(Python_ADDITIONAL_VERSIONS 2.7 2.6 2.5) include(FindPythonInterp) if(NOT PYTHONINTERP_FOUND) message(FATAL_ERROR " Unable to find Python interpreter required testing. Please install Python or specify the PYTHON_EXECUTABLE CMake variable.") endif() # Define default arguments to lit. set(LIT_ARGS_DEFAULT "-sv") if (MSVC OR XCODE) set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar") endif() set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit") endif() construct_compiler_rt_default_triple() if ("${COMPILER_RT_DEFAULT_TARGET_ABI}" STREQUAL "androideabi") set(ANDROID 1) endif() set(COMPILER_RT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(COMPILER_RT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) # We support running instrumented tests when we're not cross compiling # and target a UNIX-like system or Windows. # We can run tests on Android even when we are cross-compiling. if(("${CMAKE_HOST_SYSTEM}" STREQUAL "${CMAKE_SYSTEM}" AND (UNIX OR WIN32)) OR ANDROID OR COMPILER_RT_EMULATOR) option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" ON) else() option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" OFF) endif() option(COMPILER_RT_DEBUG "Build runtimes with full debug info" OFF) option(COMPILER_RT_EXTERNALIZE_DEBUGINFO "Generate dSYM files and strip executables and libraries (Darwin Only)" OFF) # COMPILER_RT_DEBUG_PYBOOL is used by lit.common.configured.in. pythonize_bool(COMPILER_RT_DEBUG) include(config-ix) if(APPLE AND SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.9") # Mac OS X prior to 10.9 had problems with exporting symbols from # libc++/libc++abi. set(use_cxxabi_default OFF) elseif(MSVC) set(use_cxxabi_default OFF) else() set(use_cxxabi_default ON) endif() option(SANITIZER_CAN_USE_CXXABI "Sanitizers can use cxxabi" ${use_cxxabi_default}) pythonize_bool(SANITIZER_CAN_USE_CXXABI) #================================ # Setup Compiler Flags #================================ if(MSVC) # Override any existing /W flags with /W4. This is what LLVM does. Failing to # remove other /W[0-4] flags will result in a warning about overriding a # previous flag. if (COMPILER_RT_HAS_W4_FLAG) string(REGEX REPLACE " /W[0-4]" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REGEX REPLACE " /W[0-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") append_string_if(COMPILER_RT_HAS_W4_FLAG /W4 CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() else() append_string_if(COMPILER_RT_HAS_WALL_FLAG -Wall CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() if(COMPILER_RT_ENABLE_WERROR) append_string_if(COMPILER_RT_HAS_WERROR_FLAG -Werror CMAKE_C_FLAGS CMAKE_CXX_FLAGS) append_string_if(COMPILER_RT_HAS_WX_FLAG /WX CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() append_string_if(COMPILER_RT_HAS_STD_CXX11_FLAG -std=c++11 CMAKE_CXX_FLAGS) # Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP. if(NOT COMPILER_RT_HAS_FUNC_SYMBOL) add_definitions(-D__func__=__FUNCTION__) endif() # Provide some common commmandline flags for Sanitizer runtimes. if(NOT WIN32) append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG -fno-exceptions SANITIZER_COMMON_CFLAGS) if(NOT COMPILER_RT_DEBUG AND NOT APPLE) append_list_if(COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG -fomit-frame-pointer SANITIZER_COMMON_CFLAGS) endif() append_list_if(COMPILER_RT_HAS_FUNWIND_TABLES_FLAG -funwind-tables SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG -fno-stack-protector SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_SANITIZE_SAFE_STACK_FLAG -fno-sanitize=safe-stack SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_INLINES_HIDDEN_FLAG -fvisibility-inlines-hidden SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SANITIZER_COMMON_CFLAGS) # The following is a workaround for powerpc64le. This is the only architecture # that requires -fno-function-sections to work properly. If lacking, the ASan # Linux test function-sections-are-bad.cc fails with the following error: # 'undefined symbol: __sanitizer_unaligned_load32'. if(DEFINED TARGET_powerpc64le_CFLAGS) append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections TARGET_powerpc64le_CFLAGS) endif() if(MSVC) # Replace the /M[DT][d] flags with /MT, and strip any definitions of _DEBUG, # which cause definition mismatches at link time. # FIXME: In fact, sanitizers should support both /MT and /MD, see PR20214. if(COMPILER_RT_HAS_MT_FLAG) foreach(flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) string(REGEX REPLACE "/M[DT]d" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/D_DEBUG" "" ${flag_var} "${${flag_var}}") endforeach() endif() append_list_if(COMPILER_RT_HAS_Oy_FLAG /Oy- SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_GS_FLAG /GS- SANITIZER_COMMON_CFLAGS) # VS 2015 (version 1900) added support for thread safe static initialization. # However, ASan interceptors run before CRT initialization, which causes the # new thread safe code to crash. Disable this feature for now. if (MSVC_VERSION GREATER 1899 OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") list(APPEND SANITIZER_COMMON_CFLAGS /Zc:threadSafeInit-) endif() endif() append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS) # Build with optimization, unless we're in debug mode. If we're using MSVC, # always respect the optimization flags set by CMAKE_BUILD_TYPE instead. if(NOT COMPILER_RT_DEBUG AND NOT MSVC) list(APPEND SANITIZER_COMMON_CFLAGS -O3) endif() # Determine if we should restrict stack frame sizes. # Stack frames on PowerPC and Mips and in debug biuld can be much larger than # anticipated. # FIXME: Fix all sanitizers and add -Wframe-larger-than to # SANITIZER_COMMON_FLAGS if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG AND NOT COMPILER_RT_DEBUG AND NOT ${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "powerpc|mips") set(SANITIZER_LIMIT_FRAME_SIZE TRUE) else() set(SANITIZER_LIMIT_FRAME_SIZE FALSE) endif() # Build sanitizer runtimes with debug info. if(COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG AND NOT COMPILER_RT_DEBUG) list(APPEND SANITIZER_COMMON_CFLAGS -gline-tables-only) elseif(COMPILER_RT_HAS_G_FLAG) list(APPEND SANITIZER_COMMON_CFLAGS -g) elseif(MSVC) # Use /Z7 instead of /Zi for the asan runtime. This avoids the LNK4099 # warning from the MS linker complaining that it can't find the 'vc140.pdb' # file used by our object library compilations. list(APPEND SANITIZER_COMMON_CFLAGS /Z7) llvm_replace_compiler_option(CMAKE_CXX_FLAGS "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_DEBUG "/Z[i7I]" "/Z7") llvm_replace_compiler_option(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Z[i7I]" "/Z7") endif() if(LLVM_ENABLE_MODULES) # Sanitizers cannot be built with -fmodules. The interceptors intentionally # don't include system headers, which is incompatible with modules. list(APPEND SANITIZER_COMMON_CFLAGS -fno-modules) endif() # Turn off several warnings. append_list_if(COMPILER_RT_HAS_WGNU_FLAG -Wno-gnu SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG -Wno-c99-extensions SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WNON_VIRTUAL_DTOR_FLAG -Wno-non-virtual-dtor SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4146_FLAG /wd4146 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4291_FLAG /wd4291 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4391_FLAG /wd4391 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4722_FLAG /wd4722 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4800_FLAG /wd4800 SANITIZER_COMMON_CFLAGS) # Warnings to turn off for all libraries, not just sanitizers. append_string_if(COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG -Wno-unused-parameter CMAKE_C_FLAGS CMAKE_CXX_FLAGS) if (CMAKE_LINKER MATCHES "link.exe$") # Silence MSVC linker warnings caused by empty object files. The # sanitizer libraries intentionally use ifdefs that result in empty # files, rather than skipping these files in the build system. # Ideally, we would pass this flag only for the libraries that need # it, but CMake doesn't seem to have a way to set linker flags for # individual static libraries, so we enable the suppression flag for # the whole compiler-rt project. append("/IGNORE:4221" CMAKE_STATIC_LINKER_FLAGS) endif() add_subdirectory(include) set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/projects/libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_LIBCXX_PATH ${LLVM_MAIN_SRC_DIR}/../libcxx) if(EXISTS ${COMPILER_RT_LIBCXX_PATH}/) set(COMPILER_RT_HAS_LIBCXX_SOURCES TRUE) else() set(COMPILER_RT_HAS_LIBCXX_SOURCES FALSE) endif() endif() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/tools/lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/../lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) else() set(COMPILER_RT_HAS_LLD FALSE) endif() endif() pythonize_bool(COMPILER_RT_HAS_LLD) add_subdirectory(lib) if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(unittests) add_subdirectory(test) endif() diff --git a/cmake/config-ix.cmake b/cmake/config-ix.cmake index ae2a262a14a9..0da98b9a489d 100644 --- a/cmake/config-ix.cmake +++ b/cmake/config-ix.cmake @@ -1,546 +1,546 @@ include(CMakePushCheckState) include(CheckCXXCompilerFlag) include(CheckLibraryExists) include(CheckSymbolExists) include(TestBigEndian) function(check_linker_flag flag out_var) cmake_push_check_state() set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${flag}") check_cxx_compiler_flag("" ${out_var}) cmake_pop_check_state() endfunction() # CodeGen options. check_cxx_compiler_flag(-fPIC COMPILER_RT_HAS_FPIC_FLAG) check_cxx_compiler_flag(-fPIE COMPILER_RT_HAS_FPIE_FLAG) check_cxx_compiler_flag(-fno-builtin COMPILER_RT_HAS_FNO_BUILTIN_FLAG) check_cxx_compiler_flag(-fno-exceptions COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG) check_cxx_compiler_flag(-fomit-frame-pointer COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG) check_cxx_compiler_flag(-funwind-tables COMPILER_RT_HAS_FUNWIND_TABLES_FLAG) check_cxx_compiler_flag(-fno-stack-protector COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG) check_cxx_compiler_flag(-fno-sanitize=safe-stack COMPILER_RT_HAS_FNO_SANITIZE_SAFE_STACK_FLAG) check_cxx_compiler_flag(-fvisibility=hidden COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG) check_cxx_compiler_flag(-frtti COMPILER_RT_HAS_FRTTI_FLAG) check_cxx_compiler_flag(-fno-rtti COMPILER_RT_HAS_FNO_RTTI_FLAG) check_cxx_compiler_flag(-ffreestanding COMPILER_RT_HAS_FFREESTANDING_FLAG) check_cxx_compiler_flag("-Werror -fno-function-sections" COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG) check_cxx_compiler_flag(-std=c++11 COMPILER_RT_HAS_STD_CXX11_FLAG) check_cxx_compiler_flag(-ftls-model=initial-exec COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC) check_cxx_compiler_flag(-fno-lto COMPILER_RT_HAS_FNO_LTO_FLAG) check_cxx_compiler_flag("-Werror -msse3" COMPILER_RT_HAS_MSSE3_FLAG) check_cxx_compiler_flag("-Werror -msse4.2" COMPILER_RT_HAS_MSSE4_2_FLAG) check_cxx_compiler_flag(--sysroot=. COMPILER_RT_HAS_SYSROOT_FLAG) check_cxx_compiler_flag("-Werror -mcrc" COMPILER_RT_HAS_MCRC_FLAG) if(NOT WIN32 AND NOT CYGWIN) # MinGW warns if -fvisibility-inlines-hidden is used. check_cxx_compiler_flag("-fvisibility-inlines-hidden" COMPILER_RT_HAS_FVISIBILITY_INLINES_HIDDEN_FLAG) endif() check_cxx_compiler_flag(/GR COMPILER_RT_HAS_GR_FLAG) check_cxx_compiler_flag(/GS COMPILER_RT_HAS_GS_FLAG) check_cxx_compiler_flag(/MT COMPILER_RT_HAS_MT_FLAG) check_cxx_compiler_flag(/Oy COMPILER_RT_HAS_Oy_FLAG) # Debug info flags. check_cxx_compiler_flag(-gline-tables-only COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG) check_cxx_compiler_flag(-g COMPILER_RT_HAS_G_FLAG) check_cxx_compiler_flag(/Zi COMPILER_RT_HAS_Zi_FLAG) # Warnings. check_cxx_compiler_flag(-Wall COMPILER_RT_HAS_WALL_FLAG) check_cxx_compiler_flag(-Werror COMPILER_RT_HAS_WERROR_FLAG) check_cxx_compiler_flag("-Werror -Wframe-larger-than=512" COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG) check_cxx_compiler_flag("-Werror -Wglobal-constructors" COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG) check_cxx_compiler_flag("-Werror -Wc99-extensions" COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG) check_cxx_compiler_flag("-Werror -Wgnu" COMPILER_RT_HAS_WGNU_FLAG) check_cxx_compiler_flag("-Werror -Wnon-virtual-dtor" COMPILER_RT_HAS_WNON_VIRTUAL_DTOR_FLAG) check_cxx_compiler_flag("-Werror -Wvariadic-macros" COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG) check_cxx_compiler_flag("-Werror -Wunused-parameter" COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG) check_cxx_compiler_flag("-Werror -Wcovered-switch-default" COMPILER_RT_HAS_WCOVERED_SWITCH_DEFAULT_FLAG) check_cxx_compiler_flag(/W4 COMPILER_RT_HAS_W4_FLAG) check_cxx_compiler_flag(/WX COMPILER_RT_HAS_WX_FLAG) check_cxx_compiler_flag(/wd4146 COMPILER_RT_HAS_WD4146_FLAG) check_cxx_compiler_flag(/wd4291 COMPILER_RT_HAS_WD4291_FLAG) check_cxx_compiler_flag(/wd4221 COMPILER_RT_HAS_WD4221_FLAG) check_cxx_compiler_flag(/wd4391 COMPILER_RT_HAS_WD4391_FLAG) check_cxx_compiler_flag(/wd4722 COMPILER_RT_HAS_WD4722_FLAG) check_cxx_compiler_flag(/wd4800 COMPILER_RT_HAS_WD4800_FLAG) # Symbols. check_symbol_exists(__func__ "" COMPILER_RT_HAS_FUNC_SYMBOL) # Libraries. check_library_exists(c fopen "" COMPILER_RT_HAS_LIBC) check_library_exists(dl dlopen "" COMPILER_RT_HAS_LIBDL) check_library_exists(rt shm_open "" COMPILER_RT_HAS_LIBRT) check_library_exists(m pow "" COMPILER_RT_HAS_LIBM) check_library_exists(pthread pthread_create "" COMPILER_RT_HAS_LIBPTHREAD) check_library_exists(stdc++ __cxa_throw "" COMPILER_RT_HAS_LIBSTDCXX) # Linker flags. if(ANDROID) check_linker_flag("-Wl,-z,global" COMPILER_RT_HAS_Z_GLOBAL) check_library_exists(log __android_log_write "" COMPILER_RT_HAS_LIBLOG) endif() # Architectures. # List of all architectures we can target. set(COMPILER_RT_SUPPORTED_ARCH) # Try to compile a very simple source file to ensure we can target the given # platform. We use the results of these tests to build only the various target # runtime libraries supported by our current compilers cross-compiling # abilities. set(SIMPLE_SOURCE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple.cc) file(WRITE ${SIMPLE_SOURCE} "#include \n#include \nint main() { printf(\"hello, world\"); }\n") # Detect whether the current target platform is 32-bit or 64-bit, and setup # the correct commandline flags needed to attempt to target 32-bit and 64-bit. if (NOT CMAKE_SIZEOF_VOID_P EQUAL 4 AND NOT CMAKE_SIZEOF_VOID_P EQUAL 8) message(FATAL_ERROR "Please use architecture with 4 or 8 byte pointers.") endif() test_targets() # Returns a list of architecture specific target cflags in @out_var list. function(get_target_flags_for_arch arch out_var) list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX) if(ARCH_INDEX EQUAL -1) message(FATAL_ERROR "Unsupported architecture: ${arch}") else() if (NOT APPLE) set(${out_var} ${TARGET_${arch}_CFLAGS} PARENT_SCOPE) else() # This is only called in constructing cflags for tests executing on the # host. This will need to all be cleaned up to support building tests # for cross-targeted hardware (i.e. iOS). set(${out_var} -arch ${arch} PARENT_SCOPE) endif() endif() endfunction() # Returns a compiler and CFLAGS that should be used to run tests for the # specific architecture. When cross-compiling, this is controled via # COMPILER_RT_TEST_COMPILER and COMPILER_RT_TEST_COMPILER_CFLAGS. macro(get_test_cc_for_arch arch cc_out cflags_out) if(ANDROID OR ${arch} MATCHES "arm|aarch64") # This is only true if we are cross-compiling. # Build all tests with host compiler and use host tools. set(${cc_out} ${COMPILER_RT_TEST_COMPILER}) set(${cflags_out} ${COMPILER_RT_TEST_COMPILER_CFLAGS}) else() get_target_flags_for_arch(${arch} ${cflags_out}) if(APPLE) list(APPEND ${cflags_out} ${DARWIN_osx_CFLAGS}) endif() string(REPLACE ";" " " ${cflags_out} "${${cflags_out}}") endif() endmacro() set(ARM64 aarch64) set(ARM32 arm armhf) set(X86 i386 i686) set(X86_64 x86_64) set(MIPS32 mips mipsel) set(MIPS64 mips64 mips64el) set(PPC64 powerpc64 powerpc64le) set(S390X s390x) set(WASM32 wasm32) set(WASM64 wasm64) if(APPLE) set(ARM64 arm64) set(ARM32 armv7 armv7s armv7k) set(X86_64 x86_64 x86_64h) endif() set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X}) set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X}) set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) if(APPLE) set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64}) else() set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32}) endif() set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64} ${MIPS32} ${MIPS64} ${S390X}) set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X}) set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64}) -set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}) +set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) if(APPLE) include(CompilerRTDarwinUtils) find_darwin_sdk_dir(DARWIN_osx_SYSROOT macosx) find_darwin_sdk_dir(DARWIN_iossim_SYSROOT iphonesimulator) find_darwin_sdk_dir(DARWIN_ios_SYSROOT iphoneos) find_darwin_sdk_dir(DARWIN_watchossim_SYSROOT watchsimulator) find_darwin_sdk_dir(DARWIN_watchos_SYSROOT watchos) find_darwin_sdk_dir(DARWIN_tvossim_SYSROOT appletvsimulator) find_darwin_sdk_dir(DARWIN_tvos_SYSROOT appletvos) if(NOT DARWIN_osx_SYSROOT) if(EXISTS /usr/include) set(DARWIN_osx_SYSROOT /) else() message(ERROR "Could not detect OS X Sysroot. Either install Xcode or the Apple Command Line Tools") endif() endif() if(COMPILER_RT_ENABLE_IOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS ios) set(DARWIN_ios_MIN_VER_FLAG -miphoneos-version-min) set(DARWIN_ios_SANITIZER_MIN_VER_FLAG ${DARWIN_ios_MIN_VER_FLAG}=8.0) endif() if(COMPILER_RT_ENABLE_WATCHOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS watchos) set(DARWIN_watchos_MIN_VER_FLAG -mwatchos-version-min) set(DARWIN_watchos_SANITIZER_MIN_VER_FLAG ${DARWIN_watchos_MIN_VER_FLAG}=2.0) endif() if(COMPILER_RT_ENABLE_TVOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS tvos) set(DARWIN_tvos_MIN_VER_FLAG -mtvos-version-min) set(DARWIN_tvos_SANITIZER_MIN_VER_FLAG ${DARWIN_tvos_MIN_VER_FLAG}=9.0) endif() # Note: In order to target x86_64h on OS X the minimum deployment target must # be 10.8 or higher. set(SANITIZER_COMMON_SUPPORTED_OS osx) set(PROFILE_SUPPORTED_OS osx) set(TSAN_SUPPORTED_OS osx) if(NOT SANITIZER_MIN_OSX_VERSION) string(REGEX MATCH "-mmacosx-version-min=([.0-9]+)" MACOSX_VERSION_MIN_FLAG "${CMAKE_CXX_FLAGS}") if(MACOSX_VERSION_MIN_FLAG) set(SANITIZER_MIN_OSX_VERSION "${CMAKE_MATCH_1}") elseif(CMAKE_OSX_DEPLOYMENT_TARGET) set(SANITIZER_MIN_OSX_VERSION ${CMAKE_OSX_DEPLOYMENT_TARGET}) else() set(SANITIZER_MIN_OSX_VERSION 10.9) endif() if(SANITIZER_MIN_OSX_VERSION VERSION_LESS "10.7") message(FATAL_ERROR "macOS deployment target '${SANITIZER_MIN_OSX_VERSION}' is too old.") endif() if(SANITIZER_MIN_OSX_VERSION VERSION_GREATER "10.9") message(WARNING "macOS deployment target '${SANITIZER_MIN_OSX_VERSION}' is too new, setting to '10.9' instead.") set(SANITIZER_MIN_OSX_VERSION 10.9) endif() endif() # We're setting the flag manually for each target OS set(CMAKE_OSX_DEPLOYMENT_TARGET "") set(DARWIN_COMMON_CFLAGS -stdlib=libc++) set(DARWIN_COMMON_LINK_FLAGS -stdlib=libc++ -lc++ -lc++abi) check_linker_flag("-fapplication-extension" COMPILER_RT_HAS_APP_EXTENSION) if(COMPILER_RT_HAS_APP_EXTENSION) list(APPEND DARWIN_COMMON_LINK_FLAGS "-fapplication-extension") endif() set(DARWIN_osx_CFLAGS ${DARWIN_COMMON_CFLAGS} -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) set(DARWIN_osx_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) if(DARWIN_osx_SYSROOT) list(APPEND DARWIN_osx_CFLAGS -isysroot ${DARWIN_osx_SYSROOT}) list(APPEND DARWIN_osx_LINK_FLAGS -isysroot ${DARWIN_osx_SYSROOT}) endif() # Figure out which arches to use for each OS darwin_get_toolchain_supported_archs(toolchain_arches) message(STATUS "Toolchain supported arches: ${toolchain_arches}") if(NOT MACOSX_VERSION_MIN_FLAG) darwin_test_archs(osx DARWIN_osx_ARCHS ${toolchain_arches}) message(STATUS "OSX supported arches: ${DARWIN_osx_ARCHS}") foreach(arch ${DARWIN_osx_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() foreach(platform ${DARWIN_EMBEDDED_PLATFORMS}) if(DARWIN_${platform}sim_SYSROOT) set(DARWIN_${platform}sim_CFLAGS ${DARWIN_COMMON_CFLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_SKIP_CC_KEXT On) darwin_test_archs(${platform}sim DARWIN_${platform}sim_ARCHS ${toolchain_arches}) message(STATUS "${platform} Simulator supported arches: ${DARWIN_${platform}sim_ARCHS}") if(DARWIN_${platform}sim_ARCHS) list(APPEND SANITIZER_COMMON_SUPPORTED_OS ${platform}sim) list(APPEND PROFILE_SUPPORTED_OS ${platform}sim) if(DARWIN_${platform}_SYSROOT_INTERNAL) list(APPEND TSAN_SUPPORTED_OS ${platform}sim) endif() endif() foreach(arch ${DARWIN_${platform}sim_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() endif() if(DARWIN_${platform}_SYSROOT) set(DARWIN_${platform}_CFLAGS ${DARWIN_COMMON_CFLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}_SYSROOT}) set(DARWIN_${platform}_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}_SYSROOT}) darwin_test_archs(${platform} DARWIN_${platform}_ARCHS ${toolchain_arches}) message(STATUS "${platform} supported arches: ${DARWIN_${platform}_ARCHS}") if(DARWIN_${platform}_ARCHS) list(APPEND SANITIZER_COMMON_SUPPORTED_OS ${platform}) list(APPEND PROFILE_SUPPORTED_OS ${platform}) endif() foreach(arch ${DARWIN_${platform}_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) set(CAN_TARGET_${arch} 1) endforeach() endif() endforeach() endif() # for list_intersect include(CompilerRTUtils) list_intersect(SANITIZER_COMMON_SUPPORTED_ARCH ALL_SANITIZER_COMMON_SUPPORTED_ARCH COMPILER_RT_SUPPORTED_ARCH ) set(LSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) set(UBSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) list_intersect(ASAN_SUPPORTED_ARCH ALL_ASAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(DFSAN_SUPPORTED_ARCH ALL_DFSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(LSAN_SUPPORTED_ARCH ALL_LSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(MSAN_SUPPORTED_ARCH ALL_MSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(PROFILE_SUPPORTED_ARCH ALL_PROFILE_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(TSAN_SUPPORTED_ARCH ALL_TSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(UBSAN_SUPPORTED_ARCH ALL_UBSAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(SAFESTACK_SUPPORTED_ARCH ALL_SAFESTACK_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(CFI_SUPPORTED_ARCH ALL_CFI_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(ESAN_SUPPORTED_ARCH ALL_ESAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(SCUDO_SUPPORTED_ARCH ALL_SCUDO_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) list_intersect(XRAY_SUPPORTED_ARCH ALL_XRAY_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) else() # Architectures supported by compiler-rt libraries. filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH ${ALL_SANITIZER_COMMON_SUPPORTED_ARCH}) # LSan and UBSan common files should be available on all architectures # supported by other sanitizers (even if they build into dummy object files). filter_available_targets(LSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) filter_available_targets(UBSAN_COMMON_SUPPORTED_ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}) filter_available_targets(ASAN_SUPPORTED_ARCH ${ALL_ASAN_SUPPORTED_ARCH}) filter_available_targets(DFSAN_SUPPORTED_ARCH ${ALL_DFSAN_SUPPORTED_ARCH}) filter_available_targets(LSAN_SUPPORTED_ARCH ${ALL_LSAN_SUPPORTED_ARCH}) filter_available_targets(MSAN_SUPPORTED_ARCH ${ALL_MSAN_SUPPORTED_ARCH}) filter_available_targets(PROFILE_SUPPORTED_ARCH ${ALL_PROFILE_SUPPORTED_ARCH}) filter_available_targets(TSAN_SUPPORTED_ARCH ${ALL_TSAN_SUPPORTED_ARCH}) filter_available_targets(UBSAN_SUPPORTED_ARCH ${ALL_UBSAN_SUPPORTED_ARCH}) filter_available_targets(SAFESTACK_SUPPORTED_ARCH ${ALL_SAFESTACK_SUPPORTED_ARCH}) filter_available_targets(CFI_SUPPORTED_ARCH ${ALL_CFI_SUPPORTED_ARCH}) filter_available_targets(ESAN_SUPPORTED_ARCH ${ALL_ESAN_SUPPORTED_ARCH}) filter_available_targets(SCUDO_SUPPORTED_ARCH ${ALL_SCUDO_SUPPORTED_ARCH}) filter_available_targets(XRAY_SUPPORTED_ARCH ${ALL_XRAY_SUPPORTED_ARCH}) endif() if (MSVC) # See if the DIA SDK is available and usable. set(MSVC_DIA_SDK_DIR "$ENV{VSINSTALLDIR}DIA SDK") if (IS_DIRECTORY ${MSVC_DIA_SDK_DIR}) set(CAN_SYMBOLIZE 1) else() set(CAN_SYMBOLIZE 0) endif() else() set(CAN_SYMBOLIZE 1) endif() find_program(GOLD_EXECUTABLE NAMES ${LLVM_DEFAULT_TARGET_TRIPLE}-ld.gold ld.gold ${LLVM_DEFAULT_TARGET_TRIPLE}-ld ld DOC "The gold linker") if(COMPILER_RT_SUPPORTED_ARCH) list(REMOVE_DUPLICATES COMPILER_RT_SUPPORTED_ARCH) endif() message(STATUS "Compiler-RT supported architectures: ${COMPILER_RT_SUPPORTED_ARCH}") if(ANDROID) set(OS_NAME "Android") else() set(OS_NAME "${CMAKE_SYSTEM_NAME}") endif() set(ALL_SANITIZERS asan;dfsan;msan;tsan;safestack;cfi;esan;scudo) set(COMPILER_RT_SANITIZERS_TO_BUILD ${ALL_SANITIZERS} CACHE STRING "sanitizers to build if supported on the target (all;${ALL_SANITIZERS})") list_replace(COMPILER_RT_SANITIZERS_TO_BUILD all "${ALL_SANITIZERS}") if (SANITIZER_COMMON_SUPPORTED_ARCH AND NOT LLVM_USE_SANITIZER AND (OS_NAME MATCHES "Android|Darwin|Linux|FreeBSD" OR (OS_NAME MATCHES "Windows" AND (NOT MINGW AND NOT CYGWIN)))) set(COMPILER_RT_HAS_SANITIZER_COMMON TRUE) else() set(COMPILER_RT_HAS_SANITIZER_COMMON FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON) set(COMPILER_RT_HAS_INTERCEPTION TRUE) else() set(COMPILER_RT_HAS_INTERCEPTION FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND ASAN_SUPPORTED_ARCH) set(COMPILER_RT_HAS_ASAN TRUE) else() set(COMPILER_RT_HAS_ASAN FALSE) endif() if (OS_NAME MATCHES "Linux|FreeBSD|Windows") set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME TRUE) else() set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME FALSE) endif() # TODO: Add builtins support. if (COMPILER_RT_HAS_SANITIZER_COMMON AND DFSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_DFSAN TRUE) else() set(COMPILER_RT_HAS_DFSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND LSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD") set(COMPILER_RT_HAS_LSAN TRUE) else() set(COMPILER_RT_HAS_LSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND MSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_MSAN TRUE) else() set(COMPILER_RT_HAS_MSAN FALSE) endif() if (PROFILE_SUPPORTED_ARCH AND NOT LLVM_USE_SANITIZER AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Windows|Android") set(COMPILER_RT_HAS_PROFILE TRUE) else() set(COMPILER_RT_HAS_PROFILE FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND TSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Android") set(COMPILER_RT_HAS_TSAN TRUE) else() set(COMPILER_RT_HAS_TSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND UBSAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD|Windows|Android") set(COMPILER_RT_HAS_UBSAN TRUE) else() set(COMPILER_RT_HAS_UBSAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND SAFESTACK_SUPPORTED_ARCH AND OS_NAME MATCHES "Darwin|Linux|FreeBSD") set(COMPILER_RT_HAS_SAFESTACK TRUE) else() set(COMPILER_RT_HAS_SAFESTACK FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND CFI_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_CFI TRUE) else() set(COMPILER_RT_HAS_CFI FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND ESAN_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_ESAN TRUE) else() set(COMPILER_RT_HAS_ESAN FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND SCUDO_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_SCUDO TRUE) else() set(COMPILER_RT_HAS_SCUDO FALSE) endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND XRAY_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") set(COMPILER_RT_HAS_XRAY TRUE) else() set(COMPILER_RT_HAS_XRAY FALSE) endif() diff --git a/include/xray/xray_interface.h b/include/xray/xray_interface.h index dc0c277aa841..564613417069 100644 --- a/include/xray/xray_interface.h +++ b/include/xray/xray_interface.h @@ -1,110 +1,111 @@ //===- xray_interface.h -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // APIs for controlling XRay functionality explicitly. //===----------------------------------------------------------------------===// #ifndef XRAY_XRAY_INTERFACE_H #define XRAY_XRAY_INTERFACE_H #include #include extern "C" { /// Synchronize this with AsmPrinter::SledKind in LLVM. enum XRayEntryType { ENTRY = 0, EXIT = 1, TAIL = 2, LOG_ARGS_ENTRY = 3, CUSTOM_EVENT = 4, }; /// Provide a function to invoke for when instrumentation points are hit. This /// is a user-visible control surface that overrides the default implementation. /// The function provided should take the following arguments: /// /// - function id: an identifier that indicates the id of a function; this id /// is generated by xray; the mapping between the function id /// and the actual function pointer is available through /// __xray_table. /// - entry type: identifies what kind of instrumentation point was /// encountered (function entry, function exit, etc.). See the /// enum XRayEntryType for more details. /// /// The user handler must handle correctly spurious calls after this handler is /// removed or replaced with another handler, because it would be too costly for /// XRay runtime to avoid spurious calls. /// To prevent circular calling, the handler function itself and all its /// direct&indirect callees must not be instrumented with XRay, which can be /// achieved by marking them all with: __attribute__((xray_never_instrument)) /// /// Returns 1 on success, 0 on error. extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType)); /// This removes whatever the currently provided handler is. Returns 1 on /// success, 0 on error. extern int __xray_remove_handler(); /// Use XRay to log the first argument of each (instrumented) function call. /// When this function exits, all threads will have observed the effect and /// start logging their subsequent affected function calls (if patched). /// /// Returns 1 on success, 0 on error. -extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t)); +extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, + uint64_t)); /// Disables the XRay handler used to log first arguments of function calls. /// Returns 1 on success, 0 on error. extern int __xray_remove_handler_arg1(); /// Provide a function to invoke when XRay encounters a custom event. extern int __xray_set_customevent_handler(void (*entry)(void*, std::size_t)); /// This removes whatever the currently provided custom event handler is. /// Returns 1 on success, 0 on error. extern int __xray_remove_customevent_handler(); enum XRayPatchingStatus { NOT_INITIALIZED = 0, SUCCESS = 1, ONGOING = 2, FAILED = 3, }; /// This tells XRay to patch the instrumentation points. See XRayPatchingStatus /// for possible result values. extern XRayPatchingStatus __xray_patch(); /// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible /// result values. extern XRayPatchingStatus __xray_unpatch(); /// This patches a specific function id. See XRayPatchingStatus for possible /// result values. extern XRayPatchingStatus __xray_patch_function(int32_t FuncId); /// This unpatches a specific function id. See XRayPatchingStatus for possible /// result values. extern XRayPatchingStatus __xray_unpatch_function(int32_t FuncId); /// This function returns the address of the function provided a valid function /// id. We return 0 if we encounter any error, even if 0 may be a valid function /// address. extern uintptr_t __xray_function_address(int32_t FuncId); /// This function returns the maximum valid function id. Returns 0 if we /// encounter errors (when there are no instrumented functions, etc.). extern size_t __xray_max_function_id(); } // end extern "C" #endif // XRAY_XRAY_INTERFACE_H diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index 1ded7794c9f4..57651f49cdb9 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -1,1007 +1,1006 @@ //===-- asan_allocator.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Implementation of ASan's memory allocator, 2-nd version. // This variant uses the allocator from sanitizer_common, i.e. the one shared // with ThreadSanitizer and MemorySanitizer. // //===----------------------------------------------------------------------===// #include "asan_allocator.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_list.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_quarantine.h" #include "lsan/lsan_common.h" namespace __asan { // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. // We use adaptive redzones: for larger allocation larger redzones are used. static u32 RZLog2Size(u32 rz_log) { CHECK_LT(rz_log, 8); return 16 << rz_log; } static u32 RZSize2Log(u32 rz_size) { CHECK_GE(rz_size, 16); CHECK_LE(rz_size, 2048); CHECK(IsPowerOfTwo(rz_size)); u32 res = Log2(rz_size) - 4; CHECK_EQ(rz_size, RZLog2Size(res)); return res; } static AsanAllocator &get_allocator(); // The memory chunk allocated from the underlying allocator looks like this: // L L L L L L H H U U U U U U R R // L -- left redzone words (0 or more bytes) // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. // U -- user memory. // R -- right redzone (0 or more bytes) // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. // If the left redzone is greater than the ChunkHeader size we store a magic // value in the first uptr word of the memory block and store the address of // ChunkBase in the next uptr. // M B L L L L L L L L L H H U U U U U U // | ^ // ---------------------| // M -- magic value kAllocBegMagic // B -- address of ChunkHeader pointing to the first 'H' static const uptr kAllocBegMagic = 0xCC6E96B9; struct ChunkHeader { // 1-st 8 bytes. u32 chunk_state : 8; // Must be first. u32 alloc_tid : 24; u32 free_tid : 24; u32 from_memalign : 1; u32 alloc_type : 2; u32 rz_log : 3; u32 lsan_tag : 2; // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to // SizeClassMap::kMaxSize and the actual size is stored in the // SecondaryAllocator's metadata. u32 user_requested_size; u32 alloc_context_id; }; struct ChunkBase : ChunkHeader { // Header2, intersects with user memory. u32 free_context_id; }; static const uptr kChunkHeaderSize = sizeof(ChunkHeader); static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; COMPILER_CHECK(kChunkHeaderSize == 16); COMPILER_CHECK(kChunkHeader2Size <= 16); // Every chunk of memory allocated by this allocator can be in one of 3 states: // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. enum { CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. CHUNK_ALLOCATED = 2, CHUNK_QUARANTINE = 3 }; struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } uptr UsedSize(bool locked_version = false) { if (user_requested_size != SizeClassMap::kMaxSize) return user_requested_size; return *reinterpret_cast( get_allocator().GetMetaData(AllocBeg(locked_version))); } void *AllocBeg(bool locked_version = false) { if (from_memalign) { if (locked_version) return get_allocator().GetBlockBeginFastLocked( reinterpret_cast(this)); return get_allocator().GetBlockBegin(reinterpret_cast(this)); } return reinterpret_cast(Beg() - RZLog2Size(rz_log)); } bool AddrIsInside(uptr addr, bool locked_version = false) { return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); } }; struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *cache) : cache_(cache) { } void Recycle(AsanChunk *m) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); CHECK_NE(m->alloc_tid, kInvalidTid); CHECK_NE(m->free_tid, kInvalidTid); PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); void *p = reinterpret_cast(m->AllocBeg()); if (p != m) { uptr *alloc_magic = reinterpret_cast(p); CHECK_EQ(alloc_magic[0], kAllocBegMagic); // Clear the magic value, as allocator internals may overwrite the // contents of deallocated chunk, confusing GetAsanChunk lookup. alloc_magic[0] = 0; CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); } // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; thread_stats.really_freed += m->UsedSize(); get_allocator().Deallocate(cache_, p); } void *Allocate(uptr size) { - return get_allocator().Allocate(cache_, size, 1, false); + return get_allocator().Allocate(cache_, size, 1); } void Deallocate(void *p) { get_allocator().Deallocate(cache_, p); } AllocatorCache *cache_; }; typedef Quarantine AsanQuarantine; typedef AsanQuarantine::Cache QuarantineCache; void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { PoisonShadow(p, size, 0); // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. FlushUnneededASanShadowMemory(p, size); // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.munmaps++; thread_stats.munmaped += size; } // We can not use THREADLOCAL because it is not supported on some of the // platforms we care about (OSX 10.6, Android). // static THREADLOCAL AllocatorCache cache; AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); return &ms->allocator_cache; } QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); return reinterpret_cast(ms->quarantine_cache); } void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { quarantine_size_mb = f->quarantine_size_mb; thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; min_redzone = f->redzone; max_redzone = f->max_redzone; may_return_null = cf->allocator_may_return_null; alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; } void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { f->quarantine_size_mb = quarantine_size_mb; f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; f->redzone = min_redzone; f->max_redzone = max_redzone; cf->allocator_may_return_null = may_return_null; f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; } struct Allocator { static const uptr kMaxAllowedMallocSize = FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); AsanAllocator allocator; AsanQuarantine quarantine; StaticSpinMutex fallback_mutex; AllocatorCache fallback_allocator_cache; QuarantineCache fallback_quarantine_cache; atomic_uint8_t rss_limit_exceeded; // ------------------- Options -------------------------- atomic_uint16_t min_redzone; atomic_uint16_t max_redzone; atomic_uint8_t alloc_dealloc_mismatch; // ------------------- Initialization ------------------------ explicit Allocator(LinkerInitialized) : quarantine(LINKER_INITIALIZED), fallback_quarantine_cache(LINKER_INITIALIZED) {} void CheckOptions(const AllocatorOptions &options) const { CHECK_GE(options.min_redzone, 16); CHECK_GE(options.max_redzone, options.min_redzone); CHECK_LE(options.max_redzone, 2048); CHECK(IsPowerOfTwo(options.min_redzone)); CHECK(IsPowerOfTwo(options.max_redzone)); } void SharedInitCode(const AllocatorOptions &options) { CheckOptions(options); quarantine.Init((uptr)options.quarantine_size_mb << 20, (uptr)options.thread_local_quarantine_size_kb << 10); atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, memory_order_release); atomic_store(&min_redzone, options.min_redzone, memory_order_release); atomic_store(&max_redzone, options.max_redzone, memory_order_release); } void Initialize(const AllocatorOptions &options) { - allocator.Init(options.may_return_null, options.release_to_os_interval_ms); + SetAllocatorMayReturnNull(options.may_return_null); + allocator.Init(options.release_to_os_interval_ms); SharedInitCode(options); } bool RssLimitExceeded() { return atomic_load(&rss_limit_exceeded, memory_order_relaxed); } void SetRssLimitExceeded(bool limit_exceeded) { atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); } void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. AsanChunk *ac = GetAsanChunk((void *)chunk); uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); uptr beg = ac->Beg(); uptr end = ac->Beg() + ac->UsedSize(true); uptr chunk_end = chunk + allocated_size; if (chunk < beg && beg < end && end <= chunk_end && ac->chunk_state == CHUNK_ALLOCATED) { // Looks like a valid AsanChunk in use, poison redzones only. PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); FastPoisonShadowPartialRightRedzone( end_aligned_down, end - end_aligned_down, chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); } else { // This is either not an AsanChunk or freed or quarantined AsanChunk. // In either case, poison everything. PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); } } void ReInitialize(const AllocatorOptions &options) { - allocator.SetMayReturnNull(options.may_return_null); + SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { allocator.ForceLock(); allocator.ForEachChunk( [](uptr chunk, void *alloc) { ((Allocator *)alloc)->RePoisonChunk(chunk); }, this); allocator.ForceUnlock(); } } void GetOptions(AllocatorOptions *options) const { options->quarantine_size_mb = quarantine.GetSize() >> 20; options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); - options->may_return_null = allocator.MayReturnNull(); + options->may_return_null = AllocatorMayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); } // -------------------- Helper methods. ------------------------- uptr ComputeRZLog(uptr user_requested_size) { u32 rz_log = user_requested_size <= 64 - 16 ? 0 : user_requested_size <= 128 - 32 ? 1 : user_requested_size <= 512 - 64 ? 2 : user_requested_size <= 4096 - 128 ? 3 : user_requested_size <= (1 << 14) - 256 ? 4 : user_requested_size <= (1 << 15) - 512 ? 5 : user_requested_size <= (1 << 16) - 1024 ? 6 : 7; u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); } // We have an address between two chunks, and we want to report just one. AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, AsanChunk *right_chunk) { // Prefer an allocated chunk over freed chunk and freed chunk // over available chunk. if (left_chunk->chunk_state != right_chunk->chunk_state) { if (left_chunk->chunk_state == CHUNK_ALLOCATED) return left_chunk; if (right_chunk->chunk_state == CHUNK_ALLOCATED) return right_chunk; if (left_chunk->chunk_state == CHUNK_QUARANTINE) return left_chunk; if (right_chunk->chunk_state == CHUNK_QUARANTINE) return right_chunk; } // Same chunk_state: choose based on offset. sptr l_offset = 0, r_offset = 0; CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); if (l_offset < r_offset) return left_chunk; return right_chunk; } // -------------------- Allocation/Deallocation routines --------------- void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) - return allocator.ReturnNullOrDieOnOOM(); + return AsanAllocator::FailureHandler::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; if (alignment < min_alignment) alignment = min_alignment; if (size == 0) { // We'd be happy to avoid allocating memory for zero-size requests, but // some programs/tests depend on this behavior and assume that malloc // would not return NULL even for zero-size allocations. Moreover, it // looks like operator new should never return NULL, and results of // consecutive "new" calls must be different even if the allocated size // is zero. size = 1; } CHECK(IsPowerOfTwo(alignment)); uptr rz_log = ComputeRZLog(size); uptr rz_size = RZLog2Size(rz_log); uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); uptr needed_size = rounded_size + rz_size; if (alignment > min_alignment) needed_size += alignment; bool using_primary_allocator = true; // If we are allocating from the secondary allocator, there will be no // automatic right redzone, so add the right redzone manually. if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { needed_size += rz_size; using_primary_allocator = false; } CHECK(IsAligned(needed_size, min_alignment)); if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); } AsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = - allocator.Allocate(cache, needed_size, 8, false); + allocated = allocator.Allocate(cache, needed_size, 8); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; - allocated = - allocator.Allocate(cache, needed_size, 8, false); + allocated = allocator.Allocate(cache, needed_size, 8); } - - if (!allocated) return allocator.ReturnNullOrDieOnOOM(); + if (!allocated) + return nullptr; if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned // chunk. This is possible if CanPoisonMemory() was false for some // time, for example, due to flags()->start_disabled. // Anyway, poison the block before using it for anything else. uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); } uptr alloc_beg = reinterpret_cast(allocated); uptr alloc_end = alloc_beg + needed_size; uptr beg_plus_redzone = alloc_beg + rz_size; uptr user_beg = beg_plus_redzone; if (!IsAligned(user_beg, alignment)) user_beg = RoundUpTo(user_beg, alignment); uptr user_end = user_beg + size; CHECK_LE(user_end, alloc_end); uptr chunk_beg = user_beg - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); m->alloc_type = alloc_type; m->rz_log = rz_log; u32 alloc_tid = t ? t->tid() : 0; m->alloc_tid = alloc_tid; CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? m->free_tid = kInvalidTid; m->from_memalign = user_beg != beg_plus_redzone; if (alloc_beg != chunk_beg) { CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; reinterpret_cast(alloc_beg)[1] = chunk_beg; } if (using_primary_allocator) { CHECK(size); m->user_requested_size = size; CHECK(allocator.FromPrimary(allocated)); } else { CHECK(!allocator.FromPrimary(allocated)); m->user_requested_size = SizeClassMap::kMaxSize; uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta[0] = size; meta[1] = chunk_beg; } m->alloc_context_id = StackDepotPut(*stack); uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); // Unpoison the bulk of the memory region. if (size_rounded_down_to_granularity) PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); // Deal with the end of the region if size is not aligned to granularity. if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { u8 *shadow = (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; } AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; if (needed_size > SizeClassMap::kMaxSize) thread_stats.malloc_large++; else thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; void *res = reinterpret_cast(user_beg); if (can_fill && fl.max_malloc_fill_size) { uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); REAL(memset)(res, fl.malloc_fill_byte, fill_size); } #if CAN_SANITIZE_LEAKS m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored : __lsan::kDirectlyLeaked; #endif // Must be the last mutation of metadata in this function. atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); ASAN_MALLOC_HOOK(res, size); return res; } // Set quarantine flag if chunk is allocated, issue ASan error report on // available and quarantined chunks. Return true on success, false otherwise. bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, CHUNK_QUARANTINE, memory_order_acquire)) { ReportInvalidFree(ptr, old_chunk_state, stack); // It's not safe to push a chunk in quarantine on invalid free. return false; } CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); return true; } // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); CHECK_GE(m->alloc_tid, 0); if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. CHECK_EQ(m->free_tid, kInvalidTid); AsanThread *t = GetCurrentThread(); m->free_tid = t ? t->tid() : 0; m->free_context_id = StackDepotPut(*stack); Flags &fl = *flags(); if (fl.max_free_fill_size > 0) { // We have to skip the chunk header, it contains free_context_id. uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); } } // Poison the region. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapFreeMagic); AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.frees++; thread_stats.freed += m->UsedSize(); // Push into quarantine. if (t) { AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m, m->UsedSize()); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *ac = &fallback_allocator_cache; quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, m->UsedSize()); } } void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, AllocType alloc_type) { uptr p = reinterpret_cast(ptr); if (p == 0) return; uptr chunk_beg = p - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); // On Windows, uninstrumented DLLs may allocate memory before ASan hooks // malloc. Don't report an invalid free in this case. if (SANITIZER_WINDOWS && !get_allocator().PointerIsMine(ptr)) { if (!IsSystemHeapAddress(p)) ReportFreeNotMalloced(p, stack); return; } ASAN_FREE_HOOK(ptr); // Must mark the chunk as quarantined before any changes to its metadata. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; if (m->alloc_type != alloc_type) { if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, (AllocType)alloc_type); } } if (delete_size && flags()->new_delete_type_mismatch && delete_size != m->UsedSize()) { ReportNewDeleteSizeMismatch(p, delete_size, stack); } QuarantineChunk(m, ptr, stack, alloc_type); } void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { CHECK(old_ptr && new_size); uptr p = reinterpret_cast(old_ptr); uptr chunk_beg = p - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.reallocs++; thread_stats.realloced += new_size; void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { u8 chunk_state = m->chunk_state; if (chunk_state != CHUNK_ALLOCATED) ReportInvalidFree(old_ptr, chunk_state, stack); CHECK_NE(REAL(memcpy), nullptr); uptr memcpy_size = Min(new_size, m->UsedSize()); // If realloc() races with free(), we may start copying freed memory. // However, we will report racy double-free later anyway. REAL(memcpy)(new_ptr, old_ptr, memcpy_size); Deallocate(old_ptr, 0, stack, FROM_MALLOC); } return new_ptr; } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. if (ptr && allocator.FromPrimary(ptr)) REAL(memset)(ptr, 0, nmemb * size); return ptr; } void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else ReportFreeNotMalloced((uptr)ptr, stack); } void CommitBack(AsanThreadLocalMallocStorage *ms) { AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac)); allocator.SwallowCache(ac); } // -------------------------- Chunk lookup ---------------------- // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; if (!allocator.FromPrimary(alloc_beg)) { uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); AsanChunk *m = reinterpret_cast(meta[1]); return m; } uptr *alloc_magic = reinterpret_cast(alloc_beg); if (alloc_magic[0] == kAllocBegMagic) return reinterpret_cast(alloc_magic[1]); return reinterpret_cast(alloc_beg); } AsanChunk *GetAsanChunkByAddr(uptr p) { void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } // Allocator must be locked when this function is called. AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { void *alloc_beg = allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } uptr AllocationSize(uptr p) { AsanChunk *m = GetAsanChunkByAddr(p); if (!m) return 0; if (m->chunk_state != CHUNK_ALLOCATED) return 0; if (m->Beg() != p) return 0; return m->UsedSize(); } AsanChunkView FindHeapChunkByAddress(uptr addr) { AsanChunk *m1 = GetAsanChunkByAddr(addr); if (!m1) return AsanChunkView(m1); sptr offset = 0; if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { // The address is in the chunk's left redzone, so maybe it is actually // a right buffer overflow from the other chunk to the left. // Search a bit to the left to see if there is another chunk. AsanChunk *m2 = nullptr; for (uptr l = 1; l < GetPageSizeCached(); l++) { m2 = GetAsanChunkByAddr(addr - l); if (m2 == m1) continue; // Still the same chunk. break; } if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) m1 = ChooseChunk(addr, m2, m1); } return AsanChunkView(m1); } void PrintStats() { allocator.PrintStats(); quarantine.PrintStats(); } void ForceLock() { allocator.ForceLock(); fallback_mutex.Lock(); } void ForceUnlock() { fallback_mutex.Unlock(); allocator.ForceUnlock(); } }; static Allocator instance(LINKER_INITIALIZED); static AsanAllocator &get_allocator() { return instance.allocator; } bool AsanChunkView::IsValid() const { return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; } bool AsanChunkView::IsAllocated() const { return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; } bool AsanChunkView::IsQuarantined() const { return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; } uptr AsanChunkView::Beg() const { return chunk_->Beg(); } uptr AsanChunkView::End() const { return Beg() + UsedSize(); } uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } AllocType AsanChunkView::GetAllocType() const { return (AllocType)chunk_->alloc_type; } static StackTrace GetStackTraceFromId(u32 id) { CHECK(id); StackTrace res = StackDepotGet(id); CHECK(res.trace); return res; } u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } StackTrace AsanChunkView::GetAllocStack() const { return GetStackTraceFromId(GetAllocStackId()); } StackTrace AsanChunkView::GetFreeStack() const { return GetStackTraceFromId(GetFreeStackId()); } void InitializeAllocator(const AllocatorOptions &options) { instance.Initialize(options); } void ReInitializeAllocator(const AllocatorOptions &options) { instance.ReInitialize(options); } void GetAllocatorOptions(AllocatorOptions *options) { instance.GetOptions(options); } AsanChunkView FindHeapChunkByAddress(uptr addr) { return instance.FindHeapChunkByAddress(addr); } AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { return AsanChunkView(instance.GetAsanChunk(reinterpret_cast(addr))); } void AsanThreadLocalMallocStorage::CommitBack() { instance.CommitBack(this); } void PrintInternalAllocatorStats() { instance.PrintStats(); } void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { return instance.Allocate(size, alignment, stack, alloc_type, true); } void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { instance.Deallocate(ptr, 0, stack, alloc_type); } void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { instance.Deallocate(ptr, size, stack, alloc_type); } void *asan_malloc(uptr size, BufferedStackTrace *stack) { return instance.Allocate(size, 8, stack, FROM_MALLOC, true); } void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { return instance.Calloc(nmemb, size, stack); } void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { if (!p) return instance.Allocate(size, 8, stack, FROM_MALLOC, true); if (size == 0) { if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { instance.Deallocate(p, 0, stack, FROM_MALLOC); return nullptr; } // Allocate a size of 1 if we shouldn't free() on Realloc to 0 size = 1; } return instance.Reallocate(p, size, stack); } void *asan_valloc(uptr size, BufferedStackTrace *stack) { return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); } void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); size = RoundUpTo(size, PageSize); if (size == 0) { // pvalloc(0) should allocate one page. size = PageSize; } return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true); } int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; return 0; } uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { if (!ptr) return 0; uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); if (flags()->check_malloc_usable_size && (usable_size == 0)) { GET_STACK_TRACE_FATAL(pc, bp); ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); } return usable_size; } uptr asan_mz_size(const void *ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } void asan_mz_force_lock() { instance.ForceLock(); } void asan_mz_force_unlock() { instance.ForceUnlock(); } void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { instance.SetRssLimitExceeded(limit_exceeded); } } // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { void LockAllocator() { __asan::get_allocator().ForceLock(); } void UnlockAllocator() { __asan::get_allocator().ForceUnlock(); } void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *begin = (uptr)&__asan::get_allocator(); *end = *begin + sizeof(__asan::get_allocator()); } uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); if (!m) return 0; uptr chunk = m->Beg(); if (m->chunk_state != __asan::CHUNK_ALLOCATED) return 0; if (m->AddrIsInside(addr, /*locked_version=*/true)) return chunk; if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), addr)) return chunk; return 0; } uptr GetUserBegin(uptr chunk) { __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); CHECK(m); return m->Beg(); } LsanMetadata::LsanMetadata(uptr chunk) { metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); } bool LsanMetadata::allocated() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->chunk_state == __asan::CHUNK_ALLOCATED; } ChunkTag LsanMetadata::tag() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return static_cast(m->lsan_tag); } void LsanMetadata::set_tag(ChunkTag value) { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); m->lsan_tag = value; } uptr LsanMetadata::requested_size() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->UsedSize(/*locked_version=*/true); } u32 LsanMetadata::stack_trace_id() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->alloc_context_id; } void ForEachChunk(ForEachChunkCallback callback, void *arg) { __asan::get_allocator().ForEachChunk(callback, arg); } IgnoreObjectResult IgnoreObjectLocked(const void *p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); if (!m) return kIgnoreObjectInvalid; if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { if (m->lsan_tag == kIgnored) return kIgnoreObjectAlreadyIgnored; m->lsan_tag = __lsan::kIgnored; return kIgnoreObjectSuccess; } else { return kIgnoreObjectInvalid; } } } // namespace __lsan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT // ASan allocator doesn't reserve extra bytes, so normally we would // just return "size". We don't want to expose our redzone sizes, etc here. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *p) { uptr ptr = reinterpret_cast(p); return instance.AllocationSize(ptr) > 0; } uptr __sanitizer_get_allocated_size(const void *p) { if (!p) return 0; uptr ptr = reinterpret_cast(p); uptr allocated_size = instance.AllocationSize(ptr); // Die if p is not malloced or if it is already freed. if (allocated_size == 0) { GET_STACK_TRACE_FATAL_HERE; ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); } return allocated_size; } #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default (no-op) implementation of malloc hooks. SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr, uptr size) { (void)ptr; (void)size; } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { (void)ptr; } #endif diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc index f751b6184c6b..2e477f258b8d 100644 --- a/lib/asan/asan_report.cc +++ b/lib/asan/asan_report.cc @@ -1,501 +1,509 @@ //===-- asan_report.cc ----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // This file contains error reporting code. //===----------------------------------------------------------------------===// #include "asan_errors.h" #include "asan_flags.h" #include "asan_descriptions.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_report.h" #include "asan_scariness_score.h" #include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_symbolizer.h" namespace __asan { // -------------------- User-specified callbacks ----------------- {{{1 static void (*error_report_callback)(const char*); static char *error_message_buffer = nullptr; static uptr error_message_buffer_pos = 0; static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); static const unsigned kAsanBuggyPcPoolSize = 25; static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; void AppendToErrorMessageBuffer(const char *buffer) { BlockingMutexLock l(&error_message_buf_mutex); if (!error_message_buffer) { error_message_buffer = (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); error_message_buffer_pos = 0; } uptr length = internal_strlen(buffer); RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; internal_strncpy(error_message_buffer + error_message_buffer_pos, buffer, remaining); error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; // FIXME: reallocate the buffer instead of truncating the message. error_message_buffer_pos += Min(remaining, length); } // ---------------------- Helper functions ----------------------- {{{1 void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, bool in_shadow, const char *after) { Decorator d; str->append("%s%s%x%x%s%s", before, in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, byte & 15, in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after); } static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, const char *zone_name) { if (zone_ptr) { if (zone_name) { Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", ptr, zone_ptr, zone_name); } else { Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", ptr, zone_ptr); } } else { Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); } } // ---------------------- Address Descriptions ------------------- {{{1 bool ParseFrameDescription(const char *frame_descr, InternalMmapVector *vars) { CHECK(frame_descr); char *p; // This string is created by the compiler and has the following form: // "n alloc_1 alloc_2 ... alloc_n" // where alloc_i looks like "offset size len ObjectName" // or "offset size len ObjectName:line". uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); if (n_objects == 0) return false; for (uptr i = 0; i < n_objects; i++) { uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); uptr size = (uptr)internal_simple_strtoll(p, &p, 10); uptr len = (uptr)internal_simple_strtoll(p, &p, 10); if (beg == 0 || size == 0 || *p != ' ') { return false; } p++; char *colon_pos = internal_strchr(p, ':'); uptr line = 0; uptr name_len = len; if (colon_pos != nullptr && colon_pos < p + len) { name_len = colon_pos - p; line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); } StackVarDescr var = {beg, size, p, name_len, line}; vars->push_back(var); p += len; } return true; } // -------------------- Different kinds of reports ----------------- {{{1 // Use ScopedInErrorReport to run common actions just before and // immediately after printing error report. class ScopedInErrorReport { public: explicit ScopedInErrorReport(bool fatal = false) { halt_on_error_ = fatal || flags()->halt_on_error; if (lock_.TryLock()) { StartReporting(); return; } // ASan found two bugs in different threads simultaneously. u32 current_tid = GetCurrentTidOrInvalid(); if (reporting_thread_tid_ == current_tid || reporting_thread_tid_ == kInvalidTid) { // This is either asynch signal or nested error during error reporting. // Fail simple to avoid deadlocks in Report(). // Can't use Report() here because of potential deadlocks // in nested signal handlers. const char msg[] = "AddressSanitizer: nested bug in the same thread, " "aborting.\n"; WriteToFile(kStderrFd, msg, sizeof(msg)); internal__exit(common_flags()->exitcode); } if (halt_on_error_) { // Do not print more than one report, otherwise they will mix up. // Error reporting functions shouldn't return at this situation, as // they are effectively no-returns. Report("AddressSanitizer: while reporting a bug found another one. " "Ignoring.\n"); // Sleep long enough to make sure that the thread which started // to print an error report will finish doing it. SleepForSeconds(Max(100, flags()->sleep_before_dying + 1)); // If we're still not dead for some reason, use raw _exit() instead of // Die() to bypass any additional checks. internal__exit(common_flags()->exitcode); } else { // The other thread will eventually finish reporting // so it's safe to wait lock_.Lock(); } StartReporting(); } ~ScopedInErrorReport() { ASAN_ON_ERROR(); if (current_error_.IsValid()) current_error_.Print(); // Make sure the current thread is announced. DescribeThread(GetCurrentThread()); // We may want to grab this lock again when printing stats. asanThreadRegistry().Unlock(); // Print memory stats. if (flags()->print_stats) __asan_print_accumulated_stats(); if (common_flags()->print_cmdline) PrintCmdline(); if (common_flags()->print_module_map == 2) PrintModuleMap(); // Copy the message buffer so that we could start logging without holding a // lock that gets aquired during printing. InternalScopedBuffer buffer_copy(kErrorMessageBufferSize); { BlockingMutexLock l(&error_message_buf_mutex); internal_memcpy(buffer_copy.data(), error_message_buffer, kErrorMessageBufferSize); } LogFullErrorReport(buffer_copy.data()); if (error_report_callback) { error_report_callback(buffer_copy.data()); } + if (halt_on_error_ && common_flags()->abort_on_error) { + // On Android the message is truncated to 512 characters. + // FIXME: implement "compact" error format, possibly without, or with + // highly compressed stack traces? + // FIXME: or just use the summary line as abort message? + SetAbortMessage(buffer_copy.data()); + } + // In halt_on_error = false mode, reset the current error object (before // unlocking). if (!halt_on_error_) internal_memset(¤t_error_, 0, sizeof(current_error_)); CommonSanitizerReportMutex.Unlock(); reporting_thread_tid_ = kInvalidTid; lock_.Unlock(); if (halt_on_error_) { Report("ABORTING\n"); Die(); } } void ReportError(const ErrorDescription &description) { // Can only report one error per ScopedInErrorReport. CHECK_EQ(current_error_.kind, kErrorKindInvalid); current_error_ = description; } static ErrorDescription &CurrentError() { return current_error_; } private: void StartReporting() { // Make sure the registry and sanitizer report mutexes are locked while // we're printing an error report. // We can lock them only here to avoid self-deadlock in case of // recursive reports. asanThreadRegistry().Lock(); CommonSanitizerReportMutex.Lock(); reporting_thread_tid_ = GetCurrentTidOrInvalid(); Printf("====================================================" "=============\n"); } static StaticSpinMutex lock_; static u32 reporting_thread_tid_; // Error currently being reported. This enables the destructor to interact // with the debugger and point it to an error description. static ErrorDescription current_error_; bool halt_on_error_; }; StaticSpinMutex ScopedInErrorReport::lock_; u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid; ErrorDescription ScopedInErrorReport::current_error_; void ReportStackOverflow(const SignalContext &sig) { ScopedInErrorReport in_report(/*fatal*/ true); ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig); in_report.ReportError(error); } void ReportDeadlySignal(int signo, const SignalContext &sig) { ScopedInErrorReport in_report(/*fatal*/ true); ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo); in_report.ReportError(error); } void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); in_report.ReportError(error); } void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, delete_size); in_report.ReportError(error); } void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); in_report.ReportError(error); } void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, AllocType alloc_type, AllocType dealloc_type) { ScopedInErrorReport in_report; ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, alloc_type, dealloc_type); in_report.ReportError(error); } void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { ScopedInErrorReport in_report; ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); in_report.ReportError(error); } void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, BufferedStackTrace *stack) { ScopedInErrorReport in_report; ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); in_report.ReportError(error); } void ReportStringFunctionMemoryRangesOverlap(const char *function, const char *offset1, uptr length1, const char *offset2, uptr length2, BufferedStackTrace *stack) { ScopedInErrorReport in_report; ErrorStringFunctionMemoryRangesOverlap error( GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, length2, function); in_report.ReportError(error); } void ReportStringFunctionSizeOverflow(uptr offset, uptr size, BufferedStackTrace *stack) { ScopedInErrorReport in_report; ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, size); in_report.ReportError(error); } void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid, uptr new_mid, BufferedStackTrace *stack) { ScopedInErrorReport in_report; ErrorBadParamsToAnnotateContiguousContainer error( GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); in_report.ReportError(error); } void ReportODRViolation(const __asan_global *g1, u32 stack_id1, const __asan_global *g2, u32 stack_id2) { ScopedInErrorReport in_report; ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, stack_id2); in_report.ReportError(error); } // ----------------------- CheckForInvalidPointerPair ----------- {{{1 static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) { ScopedInErrorReport in_report; ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); in_report.ReportError(error); } static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { if (!flags()->detect_invalid_pointer_pairs) return; uptr a1 = reinterpret_cast(p1); uptr a2 = reinterpret_cast(p2); AsanChunkView chunk1 = FindHeapChunkByAddress(a1); AsanChunkView chunk2 = FindHeapChunkByAddress(a2); bool valid1 = chunk1.IsAllocated(); bool valid2 = chunk2.IsAllocated(); if (!valid1 || !valid2 || !chunk1.Eq(chunk2)) { GET_CALLER_PC_BP_SP; return ReportInvalidPointerPair(pc, bp, sp, a1, a2); } } // ----------------------- Mac-specific reports ----------------- {{{1 void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, BufferedStackTrace *stack) { ScopedInErrorReport in_report; Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" "This is an unrecoverable problem, exiting now.\n", addr); PrintZoneForPointer(addr, zone_ptr, zone_name); stack->Print(); DescribeAddressIfHeap(addr); } // -------------- SuppressErrorReport -------------- {{{1 // Avoid error reports duplicating for ASan recover mode. static bool SuppressErrorReport(uptr pc) { if (!common_flags()->suppress_equal_pcs) return false; for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, pc, memory_order_relaxed)) return false; if (cmp == pc) return true; } Die(); } void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, uptr access_size, u32 exp, bool fatal) { if (!fatal && SuppressErrorReport(pc)) return; ENABLE_FRAME_POINTER; // Optimization experiments. // The experiments can be used to evaluate potential optimizations that remove // instrumentation (assess false negatives). Instead of completely removing // some instrumentation, compiler can emit special calls into runtime // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass // mask of experiments (exp). // The reaction to a non-zero value of exp is to be defined. (void)exp; ScopedInErrorReport in_report(fatal); ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, access_size); in_report.ReportError(error); } } // namespace __asan // --------------------------- Interface --------------------- {{{1 using namespace __asan; // NOLINT void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, uptr access_size, u32 exp) { ENABLE_FRAME_POINTER; bool fatal = flags()->halt_on_error; ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); } void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { BlockingMutexLock l(&error_message_buf_mutex); error_report_callback = callback; } void __asan_describe_address(uptr addr) { // Thread registry must be locked while we're describing an address. asanThreadRegistry().Lock(); PrintAddressDescription(addr, 1, ""); asanThreadRegistry().Unlock(); } int __asan_report_present() { return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; } uptr __asan_get_report_pc() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.pc; return 0; } uptr __asan_get_report_bp() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.bp; return 0; } uptr __asan_get_report_sp() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.sp; return 0; } uptr __asan_get_report_address() { ErrorDescription &err = ScopedInErrorReport::CurrentError(); if (err.kind == kErrorKindGeneric) return err.Generic.addr_description.Address(); else if (err.kind == kErrorKindDoubleFree) return err.DoubleFree.addr_description.addr; return 0; } int __asan_get_report_access_type() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.is_write; return 0; } uptr __asan_get_report_access_size() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.access_size; return 0; } const char *__asan_get_report_description() { if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) return ScopedInErrorReport::CurrentError().Generic.bug_descr; return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); } extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_ptr_sub(void *a, void *b) { CheckForInvalidPointerPair(a, b); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_ptr_cmp(void *a, void *b) { CheckForInvalidPointerPair(a, b); } } // extern "C" // Provide default implementation of __asan_on_error that does nothing // and may be overriden by user. SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup index c807df3cd252..79ac2f916569 100755 --- a/lib/asan/scripts/asan_device_setup +++ b/lib/asan/scripts/asan_device_setup @@ -1,443 +1,443 @@ #!/bin/bash #===- lib/asan/scripts/asan_device_setup -----------------------------------===# # # The LLVM Compiler Infrastructure # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # # Prepare Android device to run ASan applications. # #===------------------------------------------------------------------------===# set -e HERE="$(cd "$(dirname "$0")" && pwd)" revert=no extra_options= device= lib= use_su=0 function usage { echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]" echo " --revert: Uninstall ASan from the device." echo " --lib: Path to ASan runtime library." echo " --extra-options: Extra ASAN_OPTIONS." echo " --device: Install to the given device. Use 'adb devices' to find" echo " device-id." echo " --use-su: Use 'su -c' prefix for every adb command instead of using" echo " 'adb root' once." echo exit 1 } function adb_push { if [ $use_su -eq 0 ]; then $ADB push "$1" "$2" else local FILENAME=$(basename $1) $ADB push "$1" "/data/local/tmp/$FILENAME" $ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null $ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\"" $ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\"" fi } function adb_remount { if [ $use_su -eq 0 ]; then $ADB remount else local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1` if [ "$STORAGE" != "" ]; then echo Remounting $STORAGE at /system $ADB shell su -c "mount -o remount,rw $STORAGE /system" else echo Failed to get storage device name for "/system" mount point fi fi } function adb_shell { if [ $use_su -eq 0 ]; then $ADB shell $@ else $ADB shell su -c "$*" fi } function adb_root { if [ $use_su -eq 0 ]; then $ADB root fi } function adb_wait_for_device { $ADB wait-for-device } function adb_pull { if [ $use_su -eq 0 ]; then $ADB pull "$1" "$2" else local FILENAME=$(basename $1) $ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null $ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" && $ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\"" fi } function get_device_arch { # OUT OUT64 local _outvar=$1 local _outvar64=$2 local _ABI=$(adb_shell getprop ro.product.cpu.abi) local _ARCH= local _ARCH64= if [[ $_ABI == x86* ]]; then _ARCH=i686 elif [[ $_ABI == armeabi* ]]; then _ARCH=arm elif [[ $_ABI == arm64-v8a* ]]; then _ARCH=arm _ARCH64=aarch64 else echo "Unrecognized device ABI: $_ABI" exit 1 fi eval $_outvar=\$_ARCH eval $_outvar64=\$_ARCH64 } while [[ $# > 0 ]]; do case $1 in --revert) revert=yes ;; --extra-options) shift if [[ $# == 0 ]]; then echo "--extra-options requires an argument." exit 1 fi extra_options="$1" ;; --lib) shift if [[ $# == 0 ]]; then echo "--lib requires an argument." exit 1 fi lib="$1" ;; --device) shift if [[ $# == 0 ]]; then echo "--device requires an argument." exit 1 fi device="$1" ;; --use-su) use_su=1 ;; *) usage ;; esac shift done ADB=${ADB:-adb} if [[ x$device != x ]]; then ADB="$ADB -s $device" fi if [ $use_su -eq 1 ]; then # Test if 'su' is present on the device SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'` if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then echo "ERROR: Cannot use 'su -c':" echo "$ adb shell su -c \"echo foo\"" echo $SU_TEST_OUT echo "Check that 'su' binary is correctly installed on the device or omit" echo " --use-su flag" exit 1 fi fi echo '>> Remounting /system rw' adb_wait_for_device adb_root adb_wait_for_device adb_remount adb_wait_for_device get_device_arch ARCH ARCH64 echo "Target architecture: $ARCH" ASAN_RT="libclang_rt.asan-$ARCH-android.so" if [[ -n $ARCH64 ]]; then echo "Target architecture: $ARCH64" ASAN_RT64="libclang_rt.asan-$ARCH64-android.so" fi if [[ x$revert == xyes ]]; then echo '>> Uninstalling ASan' if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then echo '>> Pre-L device detected.' adb_shell mv /system/bin/app_process.real /system/bin/app_process adb_shell rm /system/bin/asanwrapper elif ! adb_shell ls -l /system/bin/app_process64.real | grep -o 'No such file or directory' >&/dev/null; then # 64-bit installation. adb_shell mv /system/bin/app_process32.real /system/bin/app_process32 adb_shell mv /system/bin/app_process64.real /system/bin/app_process64 adb_shell rm /system/bin/asanwrapper adb_shell rm /system/bin/asanwrapper64 else # 32-bit installation. adb_shell rm /system/bin/app_process.wrap adb_shell rm /system/bin/asanwrapper adb_shell rm /system/bin/app_process adb_shell ln -s /system/bin/app_process32 /system/bin/app_process fi echo '>> Restarting shell' adb_shell stop adb_shell start # Remove the library on the last step to give a chance to the 'su' binary to # be executed without problem. adb_shell rm /system/lib/$ASAN_RT echo '>> Done' exit 0 fi if [[ -d "$lib" ]]; then ASAN_RT_PATH="$lib" elif [[ -f "$lib" && "$lib" == *"$ASAN_RT" ]]; then ASAN_RT_PATH=$(dirname "$lib") elif [[ -f "$HERE/$ASAN_RT" ]]; then ASAN_RT_PATH="$HERE" elif [[ $(basename "$HERE") == "bin" ]]; then # We could be in the toolchain's base directory. # Consider ../lib, ../lib/asan, ../lib/linux, # ../lib/clang/$VERSION/lib/linux, and ../lib64/clang/$VERSION/lib/linux. P=$(ls "$HERE"/../lib/"$ASAN_RT" \ "$HERE"/../lib/asan/"$ASAN_RT" \ "$HERE"/../lib/linux/"$ASAN_RT" \ "$HERE"/../lib/clang/*/lib/linux/"$ASAN_RT" \ "$HERE"/../lib64/clang/*/lib/linux/"$ASAN_RT" 2>/dev/null | sort | tail -1) if [[ -n "$P" ]]; then ASAN_RT_PATH="$(dirname "$P")" fi fi if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT" ]]; then echo ">> ASan runtime library not found" exit 1 fi if [[ -n "$ASAN_RT64" ]]; then if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT64" ]]; then echo ">> ASan runtime library not found" exit 1 fi fi TMPDIRBASE=$(mktemp -d) TMPDIROLD="$TMPDIRBASE/old" TMPDIR="$TMPDIRBASE/new" mkdir "$TMPDIROLD" RELEASE=$(adb_shell getprop ro.build.version.release) PRE_L=0 if echo "$RELEASE" | grep '^4\.' >&/dev/null; then PRE_L=1 fi if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then echo '>> Old-style ASan installation detected. Reverting.' adb_shell mv /system/bin/app_process.real /system/bin/app_process fi echo '>> Pre-L device detected. Setting up app_process symlink.' adb_shell mv /system/bin/app_process /system/bin/app_process32 adb_shell ln -s /system/bin/app_process32 /system/bin/app_process fi echo '>> Copying files from the device' if [[ -n "$ASAN_RT64" ]]; then adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true adb_pull /system/lib64/"$ASAN_RT64" "$TMPDIROLD" || true adb_pull /system/bin/app_process32 "$TMPDIROLD" || true adb_pull /system/bin/app_process32.real "$TMPDIROLD" || true adb_pull /system/bin/app_process64 "$TMPDIROLD" || true adb_pull /system/bin/app_process64.real "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper64 "$TMPDIROLD" || true else adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true adb_pull /system/bin/app_process32 "$TMPDIROLD" || true adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true fi cp -r "$TMPDIROLD" "$TMPDIR" if [[ -f "$TMPDIR/app_process.wrap" || -f "$TMPDIR/app_process64.real" ]]; then echo ">> Previous installation detected" else echo ">> New installation" fi echo '>> Generating wrappers' cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/" if [[ -n "$ASAN_RT64" ]]; then cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/" fi ASAN_OPTIONS=start_deactivated=1,malloc_context_size=0 # The name of a symlink to libclang_rt.asan-$ARCH-android.so used in LD_PRELOAD. # The idea is to have the same name in lib and lib64 to keep it from falling # apart when a 64-bit process spawns a 32-bit one, inheriting the environment. ASAN_RT_SYMLINK=symlink-to-libclang_rt.asan function generate_zygote_wrapper { # from, to local _from=$1 local _to=$2 if [[ PRE_L -eq 0 ]]; then # LD_PRELOAD parsing is broken in N if it starts with ":". Luckily, it is # unset in the system environment since L. local _ld_preload=$ASAN_RT_SYMLINK else local _ld_preload=\$LD_PRELOAD:$ASAN_RT_SYMLINK fi cat <"$TMPDIR/$_from" #!/system/bin/sh-from-zygote ASAN_OPTIONS=$ASAN_OPTIONS \\ ASAN_ACTIVATION_OPTIONS=include_if_exists=/data/local/tmp/asan.options.%b \\ LD_PRELOAD=$_ld_preload \\ exec $_to \$@ EOF } if [[ x$extra_options != x ]] ; then ASAN_OPTIONS="$ASAN_OPTIONS,$extra_options" fi # Zygote wrapper. if [[ -f "$TMPDIR/app_process64" ]]; then # A 64-bit device. if [[ ! -f "$TMPDIR/app_process64.real" ]]; then # New installation. mv "$TMPDIR/app_process32" "$TMPDIR/app_process32.real" mv "$TMPDIR/app_process64" "$TMPDIR/app_process64.real" fi generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real" generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real" else # A 32-bit device. generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32" fi # General command-line tool wrapper (use for anything that's not started as # zygote). cat <"$TMPDIR/asanwrapper" #!/system/bin/sh LD_PRELOAD=$ASAN_RT_SYMLINK \\ exec \$@ EOF if [[ -n "$ASAN_RT64" ]]; then cat <"$TMPDIR/asanwrapper64" #!/system/bin/sh LD_PRELOAD=$ASAN_RT_SYMLINK \\ exec \$@ EOF fi function install { # from, to, chmod, chcon local _from=$1 local _to=$2 local _mode=$3 local _context=$4 local _basename="$(basename "$_from")" echo "Installing $_to/$_basename $_mode $_context" adb_push "$_from" "$_to/$_basename" adb_shell chown root.shell "$_to/$_basename" if [[ -n "$_mode" ]]; then adb_shell chmod "$_mode" "$_to/$_basename" fi if [[ -n "$_context" ]]; then adb_shell chcon "$_context" "$_to/$_basename" fi } if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then # Make SELinux happy by keeping app_process wrapper and the shell # it runs on in zygote domain. ENFORCING=0 if adb_shell getenforce | grep Enforcing >/dev/null; then # Sometimes shell is not allowed to change file contexts. # Temporarily switch to permissive. ENFORCING=1 adb_shell setenforce 0 fi if [[ PRE_L -eq 1 ]]; then CTX=u:object_r:system_file:s0 else CTX=u:object_r:zygote_exec:s0 fi echo '>> Pushing files to the device' if [[ -n "$ASAN_RT64" ]]; then install "$TMPDIR/$ASAN_RT" /system/lib 644 install "$TMPDIR/$ASAN_RT64" /system/lib64 644 install "$TMPDIR/app_process32" /system/bin 755 $CTX install "$TMPDIR/app_process32.real" /system/bin 755 $CTX install "$TMPDIR/app_process64" /system/bin 755 $CTX install "$TMPDIR/app_process64.real" /system/bin 755 $CTX install "$TMPDIR/asanwrapper" /system/bin 755 install "$TMPDIR/asanwrapper64" /system/bin 755 - adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK - adb_shell ln -s $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK else install "$TMPDIR/$ASAN_RT" /system/lib 644 install "$TMPDIR/app_process32" /system/bin 755 $CTX install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX install "$TMPDIR/asanwrapper" /system/bin 755 $CTX - adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK adb_shell rm /system/bin/app_process adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process fi adb_shell cp /system/bin/sh /system/bin/sh-from-zygote adb_shell chcon $CTX /system/bin/sh-from-zygote if [ $ENFORCING == 1 ]; then adb_shell setenforce 1 fi echo '>> Restarting shell (asynchronous)' adb_shell stop adb_shell start echo '>> Please wait until the device restarts' else echo '>> Device is up to date' fi rm -r "$TMPDIRBASE" diff --git a/lib/asan/weak_symbols.txt b/lib/asan/weak_symbols.txt index ba7b0272c2b1..fe680f8a9a4f 100644 --- a/lib/asan/weak_symbols.txt +++ b/lib/asan/weak_symbols.txt @@ -1,3 +1,12 @@ ___asan_default_options ___asan_default_suppressions ___asan_on_error +___asan_set_shadow_00 +___asan_set_shadow_f1 +___asan_set_shadow_f2 +___asan_set_shadow_f3 +___asan_set_shadow_f4 +___asan_set_shadow_f5 +___asan_set_shadow_f6 +___asan_set_shadow_f7 +___asan_set_shadow_f8 diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc index e4f3d358f40c..b2902d57f542 100644 --- a/lib/interception/interception_win.cc +++ b/lib/interception/interception_win.cc @@ -1,1008 +1,1013 @@ //===-- interception_linux.cc -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Windows-specific interception methods. // // This file is implementing several hooking techniques to intercept calls // to functions. The hooks are dynamically installed by modifying the assembly // code. // // The hooking techniques are making assumptions on the way the code is // generated and are safe under these assumptions. // // On 64-bit architecture, there is no direct 64-bit jump instruction. To allow // arbitrary branching on the whole memory space, the notion of trampoline // region is used. A trampoline region is a memory space withing 2G boundary // where it is safe to add custom assembly code to build 64-bit jumps. // // Hooking techniques // ================== // // 1) Detour // // The Detour hooking technique is assuming the presence of an header with // padding and an overridable 2-bytes nop instruction (mov edi, edi). The // nop instruction can safely be replaced by a 2-bytes jump without any need // to save the instruction. A jump to the target is encoded in the function // header and the nop instruction is replaced by a short jump to the header. // // head: 5 x nop head: jmp // func: mov edi, edi --> func: jmp short // [...] real: [...] // // This technique is only implemented on 32-bit architecture. // Most of the time, Windows API are hookable with the detour technique. // // 2) Redirect Jump // // The redirect jump is applicable when the first instruction is a direct // jump. The instruction is replaced by jump to the hook. // // func: jmp