Index: vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake =================================================================== --- vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake (revision 317686) +++ vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake (revision 317687) @@ -1,434 +1,442 @@ include(ExternalProject) include(CompilerRTUtils) function(set_target_output_directories target output_dir) # For RUNTIME_OUTPUT_DIRECTORY variable, Multi-configuration generators # append a per-configuration subdirectory to the specified directory. # To avoid the appended folder, the configuration specific variable must be # set 'RUNTIME_OUTPUT_DIRECTORY_${CONF}': # RUNTIME_OUTPUT_DIRECTORY_DEBUG, RUNTIME_OUTPUT_DIRECTORY_RELEASE, ... if(CMAKE_CONFIGURATION_TYPES) foreach(build_mode ${CMAKE_CONFIGURATION_TYPES}) string(TOUPPER "${build_mode}" CONFIG_SUFFIX) set_target_properties("${target}" PROPERTIES "ARCHIVE_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}" ${output_dir} "LIBRARY_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}" ${output_dir} "RUNTIME_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}" ${output_dir}) endforeach() else() set_target_properties("${target}" PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${output_dir} LIBRARY_OUTPUT_DIRECTORY ${output_dir} RUNTIME_OUTPUT_DIRECTORY ${output_dir}) endif() endfunction() # Tries to add an "object library" target for a given list of OSs and/or # architectures with name "." for non-Darwin platforms if # architecture can be targeted, and "." for Darwin platforms. # add_compiler_rt_object_libraries( # OS # ARCHS # SOURCES # CFLAGS # DEFS ) function(add_compiler_rt_object_libraries name) cmake_parse_arguments(LIB "" "" "OS;ARCHS;SOURCES;CFLAGS;DEFS" ${ARGN}) set(libnames) if(APPLE) foreach(os ${LIB_OS}) set(libname "${name}.${os}") set(libnames ${libnames} ${libname}) set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS}) list_intersect(LIB_ARCHS_${libname} DARWIN_${os}_ARCHS LIB_ARCHS) endforeach() else() foreach(arch ${LIB_ARCHS}) set(libname "${name}.${arch}") set(libnames ${libnames} ${libname}) set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS}) if(NOT CAN_TARGET_${arch}) message(FATAL_ERROR "Architecture ${arch} can't be targeted") return() endif() endforeach() endif() foreach(libname ${libnames}) add_library(${libname} OBJECT ${LIB_SOURCES}) set_target_compile_flags(${libname} ${CMAKE_CXX_FLAGS} ${extra_cflags_${libname}} ${LIB_CFLAGS}) set_property(TARGET ${libname} APPEND PROPERTY COMPILE_DEFINITIONS ${LIB_DEFS}) set_target_properties(${libname} PROPERTIES FOLDER "Compiler-RT Libraries") if(APPLE) set_target_properties(${libname} PROPERTIES OSX_ARCHITECTURES "${LIB_ARCHS_${libname}}") endif() endforeach() endfunction() # Takes a list of object library targets, and a suffix and appends the proper # TARGET_OBJECTS string to the output variable. # format_object_libs( ...) macro(format_object_libs output suffix) foreach(lib ${ARGN}) list(APPEND ${output} $) endforeach() endmacro() function(add_compiler_rt_component name) add_custom_target(${name}) set_target_properties(${name} PROPERTIES FOLDER "Compiler-RT Misc") if(COMMAND runtime_register_component) runtime_register_component(${name}) endif() add_dependencies(compiler-rt ${name}) endfunction() # Adds static or shared runtime for a list of architectures and operating # systems and puts it in the proper directory in the build and install trees. # add_compiler_rt_runtime( # {STATIC|SHARED} # ARCHS # OS # SOURCES # CFLAGS # LINK_FLAGS # DEFS # LINK_LIBS (only for shared library) # OBJECT_LIBS # PARENT_TARGET ) function(add_compiler_rt_runtime name type) if(NOT type MATCHES "^(STATIC|SHARED)$") message(FATAL_ERROR "type argument must be STATIC or SHARED") return() endif() cmake_parse_arguments(LIB "" "PARENT_TARGET" "OS;ARCHS;SOURCES;CFLAGS;LINK_FLAGS;DEFS;LINK_LIBS;OBJECT_LIBS" ${ARGN}) set(libnames) # Until we support this some other way, build compiler-rt runtime without LTO # to allow non-LTO projects to link with it. if(COMPILER_RT_HAS_FNO_LTO_FLAG) set(NO_LTO_FLAGS "-fno-lto") else() set(NO_LTO_FLAGS "") endif() if(APPLE) foreach(os ${LIB_OS}) if(type STREQUAL "STATIC") set(libname "${name}_${os}") else() set(libname "${name}_${os}_dynamic") set(extra_link_flags_${libname} ${DARWIN_${os}_LINK_FLAGS} ${LIB_LINK_FLAGS}) endif() list_intersect(LIB_ARCHS_${libname} DARWIN_${os}_ARCHS LIB_ARCHS) if(LIB_ARCHS_${libname}) list(APPEND libnames ${libname}) set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS}) set(output_name_${libname} ${libname}${COMPILER_RT_OS_SUFFIX}) set(sources_${libname} ${LIB_SOURCES}) format_object_libs(sources_${libname} ${os} ${LIB_OBJECT_LIBS}) endif() endforeach() else() foreach(arch ${LIB_ARCHS}) if(NOT CAN_TARGET_${arch}) message(FATAL_ERROR "Architecture ${arch} can't be targeted") return() endif() if(type STREQUAL "STATIC") set(libname "${name}-${arch}") set(output_name_${libname} ${libname}${COMPILER_RT_OS_SUFFIX}) else() set(libname "${name}-dynamic-${arch}") set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${LIB_CFLAGS}) set(extra_link_flags_${libname} ${TARGET_${arch}_LINK_FLAGS} ${LIB_LINK_FLAGS}) if(WIN32) set(output_name_${libname} ${name}_dynamic-${arch}${COMPILER_RT_OS_SUFFIX}) else() set(output_name_${libname} ${name}-${arch}${COMPILER_RT_OS_SUFFIX}) endif() endif() set(sources_${libname} ${LIB_SOURCES}) format_object_libs(sources_${libname} ${arch} ${LIB_OBJECT_LIBS}) set(libnames ${libnames} ${libname}) set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS}) endforeach() endif() if(NOT libnames) return() endif() if(LIB_PARENT_TARGET) # If the parent targets aren't created we should create them if(NOT TARGET ${LIB_PARENT_TARGET}) add_custom_target(${LIB_PARENT_TARGET}) endif() if(NOT TARGET install-${LIB_PARENT_TARGET}) # The parent install target specifies the parent component to scrape up # anything not installed by the individual install targets, and to handle # installation when running the multi-configuration generators. add_custom_target(install-${LIB_PARENT_TARGET} DEPENDS ${LIB_PARENT_TARGET} COMMAND "${CMAKE_COMMAND}" -DCMAKE_INSTALL_COMPONENT=${LIB_PARENT_TARGET} -P "${CMAKE_BINARY_DIR}/cmake_install.cmake") set_target_properties(install-${LIB_PARENT_TARGET} PROPERTIES FOLDER "Compiler-RT Misc") add_dependencies(install-compiler-rt install-${LIB_PARENT_TARGET}) endif() endif() foreach(libname ${libnames}) # If you are using a multi-configuration generator we don't generate # per-library install rules, so we fall back to the parent target COMPONENT if(CMAKE_CONFIGURATION_TYPES AND LIB_PARENT_TARGET) set(COMPONENT_OPTION COMPONENT ${LIB_PARENT_TARGET}) else() set(COMPONENT_OPTION COMPONENT ${libname}) endif() add_library(${libname} ${type} ${sources_${libname}}) set_target_compile_flags(${libname} ${extra_cflags_${libname}}) set_target_link_flags(${libname} ${extra_link_flags_${libname}}) set_property(TARGET ${libname} APPEND PROPERTY COMPILE_DEFINITIONS ${LIB_DEFS}) set_target_output_directories(${libname} ${COMPILER_RT_LIBRARY_OUTPUT_DIR}) set_target_properties(${libname} PROPERTIES OUTPUT_NAME ${output_name_${libname}}) set_target_properties(${libname} PROPERTIES FOLDER "Compiler-RT Runtime") if(${type} STREQUAL "SHARED") if(LIB_LINK_LIBS) target_link_libraries(${libname} ${LIB_LINK_LIBS}) endif() if(WIN32 AND NOT CYGWIN AND NOT MINGW) set_target_properties(${libname} PROPERTIES IMPORT_PREFIX "") set_target_properties(${libname} PROPERTIES IMPORT_SUFFIX ".lib") endif() + if(APPLE) + # Ad-hoc sign the dylibs + add_custom_command(TARGET ${libname} + POST_BUILD + COMMAND codesign --sign - $ + WORKING_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR} + ) + endif() endif() install(TARGETS ${libname} ARCHIVE DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR} ${COMPONENT_OPTION} LIBRARY DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR} ${COMPONENT_OPTION} RUNTIME DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR} ${COMPONENT_OPTION}) # We only want to generate per-library install targets if you aren't using # an IDE because the extra targets get cluttered in IDEs. if(NOT CMAKE_CONFIGURATION_TYPES) add_custom_target(install-${libname} DEPENDS ${libname} COMMAND "${CMAKE_COMMAND}" -DCMAKE_INSTALL_COMPONENT=${libname} -P "${CMAKE_BINARY_DIR}/cmake_install.cmake") # If you have a parent target specified, we bind the new install target # to the parent install target. if(LIB_PARENT_TARGET) add_dependencies(install-${LIB_PARENT_TARGET} install-${libname}) endif() endif() if(APPLE) set_target_properties(${libname} PROPERTIES OSX_ARCHITECTURES "${LIB_ARCHS_${libname}}") endif() if(type STREQUAL "SHARED") rt_externalize_debuginfo(${libname}) endif() endforeach() if(LIB_PARENT_TARGET) add_dependencies(${LIB_PARENT_TARGET} ${libnames}) endif() endfunction() # when cross compiling, COMPILER_RT_TEST_COMPILER_CFLAGS help # in compilation and linking of unittests. string(REPLACE " " ";" COMPILER_RT_UNITTEST_CFLAGS "${COMPILER_RT_TEST_COMPILER_CFLAGS}") set(COMPILER_RT_UNITTEST_LINK_FLAGS ${COMPILER_RT_UNITTEST_CFLAGS}) # Unittests support. set(COMPILER_RT_GTEST_PATH ${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest) set(COMPILER_RT_GTEST_SOURCE ${COMPILER_RT_GTEST_PATH}/src/gtest-all.cc) set(COMPILER_RT_GTEST_CFLAGS -DGTEST_NO_LLVM_RAW_OSTREAM=1 -DGTEST_HAS_RTTI=0 -I${COMPILER_RT_GTEST_PATH}/include -I${COMPILER_RT_GTEST_PATH} ) append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 COMPILER_RT_UNITTEST_CFLAGS) append_list_if(COMPILER_RT_HAS_WCOVERED_SWITCH_DEFAULT_FLAG -Wno-covered-switch-default COMPILER_RT_UNITTEST_CFLAGS) if(MSVC) # clang doesn't support exceptions on Windows yet. list(APPEND COMPILER_RT_UNITTEST_CFLAGS -D_HAS_EXCEPTIONS=0) # We should teach clang to understand "#pragma intrinsic", see PR19898. list(APPEND COMPILER_RT_UNITTEST_CFLAGS -Wno-undefined-inline) # Clang doesn't support SEH on Windows yet. list(APPEND COMPILER_RT_GTEST_CFLAGS -DGTEST_HAS_SEH=0) # gtest use a lot of stuff marked as deprecated on Windows. list(APPEND COMPILER_RT_GTEST_CFLAGS -Wno-deprecated-declarations) endif() # Link objects into a single executable with COMPILER_RT_TEST_COMPILER, # using specified link flags. Make executable a part of provided # test_suite. # add_compiler_rt_test( # SUBDIR # OBJECTS # DEPS # LINK_FLAGS ) macro(add_compiler_rt_test test_suite test_name) cmake_parse_arguments(TEST "" "SUBDIR" "OBJECTS;DEPS;LINK_FLAGS" "" ${ARGN}) set(output_bin ${CMAKE_CURRENT_BINARY_DIR}) if(TEST_SUBDIR) set(output_bin "${output_bin}/${TEST_SUBDIR}") endif() if(CMAKE_CONFIGURATION_TYPES) set(output_bin "${output_bin}/${CMAKE_CFG_INTDIR}") endif() set(output_bin "${output_bin}/${test_name}") if(MSVC) set(output_bin "${output_bin}.exe") endif() # Use host compiler in a standalone build, and just-built Clang otherwise. if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND TEST_DEPS clang) endif() # If we're not on MSVC, include the linker flags from CMAKE but override them # with the provided link flags. This ensures that flags which are required to # link programs at all are included, but the changes needed for the test # trump. With MSVC we can't do that because CMake is set up to run link.exe # when linking, not the compiler. Here, we hack it to use the compiler # because we want to use -fsanitize flags. if(NOT MSVC) set(TEST_LINK_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${TEST_LINK_FLAGS}") separate_arguments(TEST_LINK_FLAGS) endif() add_custom_target(${test_name} COMMAND ${COMPILER_RT_TEST_COMPILER} ${TEST_OBJECTS} -o "${output_bin}" ${TEST_LINK_FLAGS} DEPENDS ${TEST_DEPS}) set_target_properties(${test_name} PROPERTIES FOLDER "Compiler-RT Tests") # Make the test suite depend on the binary. add_dependencies(${test_suite} ${test_name}) endmacro() macro(add_compiler_rt_resource_file target_name file_name component) set(src_file "${CMAKE_CURRENT_SOURCE_DIR}/${file_name}") set(dst_file "${COMPILER_RT_OUTPUT_DIR}/${file_name}") add_custom_command(OUTPUT ${dst_file} DEPENDS ${src_file} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src_file} ${dst_file} COMMENT "Copying ${file_name}...") add_custom_target(${target_name} DEPENDS ${dst_file}) # Install in Clang resource directory. install(FILES ${file_name} DESTINATION ${COMPILER_RT_INSTALL_PATH} COMPONENT ${component}) add_dependencies(${component} ${target_name}) set_target_properties(${target_name} PROPERTIES FOLDER "Compiler-RT Misc") endmacro() macro(add_compiler_rt_script name) set(dst ${COMPILER_RT_EXEC_OUTPUT_DIR}/${name}) set(src ${CMAKE_CURRENT_SOURCE_DIR}/${name}) add_custom_command(OUTPUT ${dst} DEPENDS ${src} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst} COMMENT "Copying ${name}...") add_custom_target(${name} DEPENDS ${dst}) install(FILES ${dst} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE DESTINATION ${COMPILER_RT_INSTALL_PATH}/bin) endmacro(add_compiler_rt_script src name) # Builds custom version of libc++ and installs it in . # Can be used to build sanitized versions of libc++ for running unit tests. # add_custom_libcxx( # DEPS # CFLAGS ) macro(add_custom_libcxx name prefix) if(NOT COMPILER_RT_HAS_LIBCXX_SOURCES) message(FATAL_ERROR "libcxx not found!") endif() cmake_parse_arguments(LIBCXX "" "" "DEPS;CFLAGS" ${ARGN}) foreach(flag ${LIBCXX_CFLAGS}) set(flagstr "${flagstr} ${flag}") endforeach() set(LIBCXX_CFLAGS ${flagstr}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND LIBCXX_DEPS clang) endif() ExternalProject_Add(${name} PREFIX ${prefix} SOURCE_DIR ${COMPILER_RT_LIBCXX_PATH} CMAKE_ARGS -DCMAKE_MAKE_PROGRAM:STRING=${CMAKE_MAKE_PROGRAM} -DCMAKE_C_COMPILER=${COMPILER_RT_TEST_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER_RT_TEST_CXX_COMPILER} -DCMAKE_C_FLAGS=${LIBCXX_CFLAGS} -DCMAKE_CXX_FLAGS=${LIBCXX_CFLAGS} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH= -DLLVM_PATH=${LLVM_MAIN_SRC_DIR} -DLIBCXX_STANDALONE_BUILD=On LOG_BUILD 1 LOG_CONFIGURE 1 LOG_INSTALL 1 ) set_target_properties(${name} PROPERTIES EXCLUDE_FROM_ALL TRUE) ExternalProject_Add_Step(${name} force-reconfigure DEPENDERS configure ALWAYS 1 ) ExternalProject_Add_Step(${name} clobber COMMAND ${CMAKE_COMMAND} -E remove_directory COMMAND ${CMAKE_COMMAND} -E make_directory COMMENT "Clobberring ${name} build directory..." DEPENDERS configure DEPENDS ${LIBCXX_DEPS} ) endmacro() function(rt_externalize_debuginfo name) if(NOT COMPILER_RT_EXTERNALIZE_DEBUGINFO) return() endif() if(NOT COMPILER_RT_EXTERNALIZE_DEBUGINFO_SKIP_STRIP) set(strip_command COMMAND xcrun strip -Sl $) endif() if(APPLE) if(CMAKE_CXX_FLAGS MATCHES "-flto" OR CMAKE_CXX_FLAGS_${uppercase_CMAKE_BUILD_TYPE} MATCHES "-flto") set(lto_object ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${name}-lto.o) set_property(TARGET ${name} APPEND_STRING PROPERTY LINK_FLAGS " -Wl,-object_path_lto -Wl,${lto_object}") endif() add_custom_command(TARGET ${name} POST_BUILD COMMAND xcrun dsymutil $ ${strip_command}) else() message(FATAL_ERROR "COMPILER_RT_EXTERNALIZE_DEBUGINFO isn't implemented for non-darwin platforms!") endif() endfunction() Index: vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h =================================================================== --- vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h (revision 317686) +++ vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h (revision 317687) @@ -1,136 +1,137 @@ //===-- tsan_interface.h ----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Public interface header for TSan. //===----------------------------------------------------------------------===// #ifndef SANITIZER_TSAN_INTERFACE_H #define SANITIZER_TSAN_INTERFACE_H #include #ifdef __cplusplus extern "C" { #endif // __tsan_release establishes a happens-before relation with a preceding // __tsan_acquire on the same address. void __tsan_acquire(void *addr); void __tsan_release(void *addr); // Annotations for custom mutexes. // The annotations allow to get better reports (with sets of locked mutexes), // detect more types of bugs (e.g. mutex misuses, races between lock/unlock and // destruction and potential deadlocks) and improve precision and performance // (by ignoring individual atomic operations in mutex code). However, the // downside is that annotated mutex code itself is not checked for correctness. // Mutex creation flags are passed to __tsan_mutex_create annotation. // If mutex has no constructor and __tsan_mutex_create is not called, // the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock // annotations. // Mutex has static storage duration and no-op constructor and destructor. // This effectively makes tsan ignore destroy annotation. const unsigned __tsan_mutex_linker_init = 1 << 0; // Mutex is write reentrant. const unsigned __tsan_mutex_write_reentrant = 1 << 1; // Mutex is read reentrant. const unsigned __tsan_mutex_read_reentrant = 1 << 2; // Mutex operation flags: // Denotes read lock operation. const unsigned __tsan_mutex_read_lock = 1 << 3; // Denotes try lock operation. const unsigned __tsan_mutex_try_lock = 1 << 4; // Denotes that a try lock operation has failed to acquire the mutex. const unsigned __tsan_mutex_try_lock_failed = 1 << 5; // Denotes that the lock operation acquires multiple recursion levels. // Number of levels is passed in recursion parameter. // This is useful for annotation of e.g. Java builtin monitors, // for which wait operation releases all recursive acquisitions of the mutex. const unsigned __tsan_mutex_recursive_lock = 1 << 6; // Denotes that the unlock operation releases all recursion levels. // Number of released levels is returned and later must be passed to // the corresponding __tsan_mutex_post_lock annotation. const unsigned __tsan_mutex_recursive_unlock = 1 << 7; // Annotate creation of a mutex. // Supported flags: mutex creation flags. void __tsan_mutex_create(void *addr, unsigned flags); // Annotate destruction of a mutex. -// Supported flags: none. +// Supported flags: +// - __tsan_mutex_linker_init void __tsan_mutex_destroy(void *addr, unsigned flags); // Annotate start of lock operation. // Supported flags: // - __tsan_mutex_read_lock // - __tsan_mutex_try_lock // - all mutex creation flags void __tsan_mutex_pre_lock(void *addr, unsigned flags); // Annotate end of lock operation. // Supported flags: // - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock) // - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock) // - __tsan_mutex_try_lock_failed // - __tsan_mutex_recursive_lock // - all mutex creation flags void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion); // Annotate start of unlock operation. // Supported flags: // - __tsan_mutex_read_lock // - __tsan_mutex_recursive_unlock int __tsan_mutex_pre_unlock(void *addr, unsigned flags); // Annotate end of unlock operation. // Supported flags: // - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock) void __tsan_mutex_post_unlock(void *addr, unsigned flags); // Annotate start/end of notify/signal/broadcast operation. // Supported flags: none. void __tsan_mutex_pre_signal(void *addr, unsigned flags); void __tsan_mutex_post_signal(void *addr, unsigned flags); // Annotate start/end of a region of code where lock/unlock/signal operation // diverts to do something else unrelated to the mutex. This can be used to // annotate, for example, calls into cooperative scheduler or contention // profiling code. // These annotations must be called only from within // __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock, // __tsan_mutex_pre/post_signal regions. // Supported flags: none. void __tsan_mutex_pre_divert(void *addr, unsigned flags); void __tsan_mutex_post_divert(void *addr, unsigned flags); // External race detection API. // Can be used by non-instrumented libraries to detect when their objects are // being used in an unsafe manner. // - __tsan_external_read/__tsan_external_write annotates the logical reads // and writes of the object at the specified address. 'caller_pc' should // be the PC of the library user, which the library can obtain with e.g. // `__builtin_return_address(0)`. // - __tsan_external_register_tag registers a 'tag' with the specified name, // which is later used in read/write annotations to denote the object type // - __tsan_external_assign_tag can optionally mark a heap object with a tag void *__tsan_external_register_tag(const char *object_type); void __tsan_external_assign_tag(void *addr, void *tag); void __tsan_external_read(void *addr, void *caller_pc, void *tag); void __tsan_external_write(void *addr, void *caller_pc, void *tag); #ifdef __cplusplus } // extern "C" #endif #endif // SANITIZER_TSAN_INTERFACE_H Index: vendor/compiler-rt/dist/include/xray/xray_log_interface.h =================================================================== --- vendor/compiler-rt/dist/include/xray/xray_log_interface.h (revision 317686) +++ vendor/compiler-rt/dist/include/xray/xray_log_interface.h (revision 317687) @@ -1,60 +1,231 @@ //===-- xray_log_interface.h ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a function call tracing system. // // APIs for installing a new logging implementation. +// //===----------------------------------------------------------------------===// +/// +/// XRay allows users to implement their own logging handlers and install them +/// to replace the default runtime-controllable implementation that comes with +/// compiler-rt/xray. The "flight data recorder" (FDR) mode implementation uses +/// this API to install itself in an XRay-enabled binary. See +/// compiler-rt/lib/xray_fdr_logging.{h,cc} for details of that implementation. +/// +/// The high-level usage pattern for these APIs look like the following: +/// +/// // Before we try initializing the log implementation, we must set it as +/// // the log implementation. We provide the function pointers that define +/// // the various initialization, finalization, and other pluggable hooks +/// // that we need. +/// __xray_set_log_impl({...}); +/// +/// // Once that's done, we can now initialize the implementation. Each +/// // implementation has a chance to let users customize the implementation +/// // with a struct that their implementation supports. Roughly this might +/// // look like: +/// MyImplementationOptions opts; +/// opts.enable_feature = true; +/// ... +/// auto init_status = __xray_log_init( +/// BufferSize, MaxBuffers, &opts, sizeof opts); +/// if (init_status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) { +/// // deal with the error here, if there is one. +/// } +/// +/// // When the log implementation has had the chance to initialize, we can +/// // now patch the sleds. +/// auto patch_status = __xray_patch(); +/// if (patch_status != XRayPatchingStatus::SUCCESS) { +/// // deal with the error here, if it is an error. +/// } +/// +/// // If we want to stop the implementation, we can then finalize it (before +/// // optionally flushing the log). +/// auto fin_status = __xray_log_finalize(); +/// if (fin_status != XRayLogInitStatus::XRAY_LOG_FINALIZED) { +/// // deal with the error here, if it is an error. +/// } +/// +/// // We can optionally wait before flushing the log to give other threads a +/// // chance to see that the implementation is already finalized. Also, at +/// // this point we can optionally unpatch the sleds to reduce overheads at +/// // runtime. +/// auto unpatch_status = __xray_unpatch(); +/// if (unpatch_status != XRayPatchingStatus::SUCCESS) { +// // deal with the error here, if it is an error. +// } +/// +/// // If there are logs or data to be flushed somewhere, we can do so only +/// // after we've finalized the log. Some implementations may not actually +/// // have anything to log (it might keep the data in memory, or periodically +/// // be logging the data anyway). +/// auto flush_status = __xray_log_flushLog(); +/// if (flush_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) { +/// // deal with the error here, if it is an error. +/// } +/// +/// +/// NOTE: Before calling __xray_patch() again, consider re-initializing the +/// implementation first. Some implementations might stay in an "off" state when +/// they are finalized, while some might be in an invalid/unknown state. +/// #ifndef XRAY_XRAY_LOG_INTERFACE_H #define XRAY_XRAY_LOG_INTERFACE_H #include "xray/xray_interface.h" #include extern "C" { +/// This enum defines the valid states in which the logging implementation can +/// be at. enum XRayLogInitStatus { + /// The default state is uninitialized, and in case there were errors in the + /// initialization, the implementation MUST return XRAY_LOG_UNINITIALIZED. XRAY_LOG_UNINITIALIZED = 0, + + /// Some implementations support multi-stage init (or asynchronous init), and + /// may return XRAY_LOG_INITIALIZING to signal callers of the API that + /// there's an ongoing initialization routine running. This allows + /// implementations to support concurrent threads attempting to initialize, + /// while only signalling success in one. XRAY_LOG_INITIALIZING = 1, + + /// When an implementation is done initializing, it MUST return + /// XRAY_LOG_INITIALIZED. When users call `__xray_patch()`, they are + /// guaranteed that the implementation installed with + /// `__xray_set_log_impl(...)` has been initialized. XRAY_LOG_INITIALIZED = 2, + + /// Some implementations might support multi-stage finalization (or + /// asynchronous finalization), and may return XRAY_LOG_FINALIZING to signal + /// callers of the API that there's an ongoing finalization routine running. + /// This allows implementations to support concurrent threads attempting to + /// finalize, while only signalling success/completion in one. XRAY_LOG_FINALIZING = 3, + + /// When an implementation is done finalizing, it MUST return + /// XRAY_LOG_FINALIZED. It is up to the implementation to determine what the + /// semantics of a finalized implementation is. Some implementations might + /// allow re-initialization once the log is finalized, while some might always + /// be on (and that finalization is a no-op). XRAY_LOG_FINALIZED = 4, }; +/// This enum allows an implementation to signal log flushing operations via +/// `__xray_log_flushLog()`, and the state of flushing the log. enum XRayLogFlushStatus { XRAY_LOG_NOT_FLUSHING = 0, XRAY_LOG_FLUSHING = 1, XRAY_LOG_FLUSHED = 2, }; +/// A valid XRay logging implementation MUST provide all of the function +/// pointers in XRayLogImpl when being installed through `__xray_set_log_impl`. +/// To be precise, ALL the functions pointers MUST NOT be nullptr. struct XRayLogImpl { + /// The log initialization routine provided by the implementation, always + /// provided with the following parameters: + /// + /// - buffer size + /// - maximum number of buffers + /// - a pointer to an argument struct that the implementation MUST handle + /// - the size of the argument struct + /// + /// See XRayLogInitStatus for details on what the implementation MUST return + /// when called. + /// + /// If the implementation needs to install handlers aside from the 0-argument + /// function call handler, it MUST do so in this initialization handler. + /// + /// See xray_interface.h for available handler installation routines. XRayLogInitStatus (*log_init)(size_t, size_t, void *, size_t); + + /// The log finalization routine provided by the implementation. + /// + /// See XRayLogInitStatus for details on what the implementation MUST return + /// when called. XRayLogInitStatus (*log_finalize)(); + + /// The 0-argument function call handler. XRay logging implementations MUST + /// always have a handler for function entry and exit events. In case the + /// implementation wants to support arg1 (or other future extensions to XRay + /// logging) those MUST be installed by the installed 'log_init' handler. void (*handle_arg0)(int32_t, XRayEntryType); + + /// The log implementation provided routine for when __xray_log_flushLog() is + /// called. + /// + /// See XRayLogFlushStatus for details on what the implementation MUST return + /// when called. XRayLogFlushStatus (*flush_log)(); }; +/// This function installs a new logging implementation that XRay will use. In +/// case there are any nullptr members in Impl, XRay will *uninstall any +/// existing implementations*. It does NOT patch the instrumentation sleds. +/// +/// NOTE: This function does NOT attempt to finalize the currently installed +/// implementation. Use with caution. +/// +/// It is guaranteed safe to call this function in the following states: +/// +/// - When the implementation is UNINITIALIZED. +/// - When the implementation is FINALIZED. +/// - When there is no current implementation installed. +/// +/// It is logging implementation defined what happens when this function is +/// called while in any other states. void __xray_set_log_impl(XRayLogImpl Impl); + +/// This function removes the currently installed implementation. It will also +/// uninstall any handlers that have been previously installed. It does NOT +/// unpatch the instrumentation sleds. +/// +/// NOTE: This function does NOT attempt to finalize the currently installed +/// implementation. Use with caution. +/// +/// It is guaranteed safe to call this function in the following states: +/// +/// - When the implementation is UNINITIALIZED. +/// - When the implementation is FINALIZED. +/// - When there is no current implementation installed. +/// +/// It is logging implementation defined what happens when this function is +/// called while in any other states. +void __xray_remove_log_impl(); + +/// Invokes the installed implementation initialization routine. See +/// XRayLogInitStatus for what the return values mean. XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers, void *Args, size_t ArgsSize); + +/// Invokes the installed implementation finalization routine. See +/// XRayLogInitStatus for what the return values mean. XRayLogInitStatus __xray_log_finalize(); + +/// Invokes the install implementation log flushing routine. See +/// XRayLogFlushStatus for what the return values mean. XRayLogFlushStatus __xray_log_flushLog(); } // extern "C" namespace __xray { + // Options used by the LLVM XRay FDR implementation. struct FDRLoggingOptions { bool ReportErrors = false; int Fd = -1; }; } // namespace __xray #endif // XRAY_XRAY_LOG_INTERFACE_H Index: vendor/compiler-rt/dist/lib/asan/asan_globals.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_globals.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/asan/asan_globals.cc (revision 317687) @@ -1,432 +1,452 @@ //===-- asan_globals.cc ---------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Handle globals. //===----------------------------------------------------------------------===// #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_symbolizer.h" namespace __asan { typedef __asan_global Global; struct ListOfGlobals { const Global *g; ListOfGlobals *next; }; static BlockingMutex mu_for_globals(LINKER_INITIALIZED); static LowLevelAllocator allocator_for_globals; static ListOfGlobals *list_of_all_globals; static const int kDynamicInitGlobalsInitialCapacity = 512; struct DynInitGlobal { Global g; bool initialized; }; typedef InternalMmapVector VectorOfGlobals; // Lazy-initialized and never deleted. static VectorOfGlobals *dynamic_init_globals; // We want to remember where a certain range of globals was registered. struct GlobalRegistrationSite { u32 stack_id; Global *g_first, *g_last; }; typedef InternalMmapVector GlobalRegistrationSiteVector; static GlobalRegistrationSiteVector *global_registration_site_vector; ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { FastPoisonShadow(g->beg, g->size_with_redzone, value); } ALWAYS_INLINE void PoisonRedZones(const Global &g) { uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, kAsanGlobalRedzoneMagic); if (g.size != aligned_size) { FastPoisonShadowPartialRightRedzone( g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), g.size % SHADOW_GRANULARITY, SHADOW_GRANULARITY, kAsanGlobalRedzoneMagic); } } const uptr kMinimalDistanceFromAnotherGlobal = 64; static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; if (addr >= g.beg + g.size_with_redzone) return false; return true; } static void ReportGlobal(const Global &g, const char *prefix) { Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n", prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, g.module_name, g.has_dynamic_init); if (g.location) { Report(" location (%p): name=%s[%p], %d %d\n", g.location, g.location->filename, g.location->filename, g.location->line_no, g.location->column_no); } } static u32 FindRegistrationSite(const Global *g) { mu_for_globals.CheckLocked(); CHECK(global_registration_site_vector); for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; if (g >= grs.g_first && g <= grs.g_last) return grs.stack_id; } return 0; } int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, int max_globals) { if (!flags()->report_globals) return 0; BlockingMutexLock lock(&mu_for_globals); int res = 0; for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { const Global &g = *l->g; if (flags()->report_globals >= 2) ReportGlobal(g, "Search"); if (IsAddressNearGlobal(addr, g)) { globals[res] = g; if (reg_sites) reg_sites[res] = FindRegistrationSite(&g); res++; if (res == max_globals) break; } } return res; } enum GlobalSymbolState { UNREGISTERED = 0, REGISTERED = 1 }; // Check ODR violation for given global G via special ODR indicator. We use // this method in case compiler instruments global variables through their // local aliases. static void CheckODRViolationViaIndicator(const Global *g) { u8 *odr_indicator = reinterpret_cast(g->odr_indicator); if (*odr_indicator == UNREGISTERED) { *odr_indicator = REGISTERED; return; } // If *odr_indicator is DEFINED, some module have already registered // externally visible symbol with the same name. This is an ODR violation. for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { if (g->odr_indicator == l->g->odr_indicator && (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && !IsODRViolationSuppressed(g->name)) ReportODRViolation(g, FindRegistrationSite(g), l->g, FindRegistrationSite(l->g)); } } // Check ODR violation for given global G by checking if it's already poisoned. // We use this method in case compiler doesn't use private aliases for global // variables. static void CheckODRViolationViaPoisoning(const Global *g) { if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { // This check may not be enough: if the first global is much larger // the entire redzone of the second global may be within the first global. for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { if (g->beg == l->g->beg && (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && !IsODRViolationSuppressed(g->name)) ReportODRViolation(g, FindRegistrationSite(g), l->g, FindRegistrationSite(l->g)); } } } // Clang provides two different ways for global variables protection: // it can poison the global itself or its private alias. In former // case we may poison same symbol multiple times, that can help us to // cheaply detect ODR violation: if we try to poison an already poisoned // global, we have ODR violation error. // In latter case, we poison each symbol exactly once, so we use special // indicator symbol to perform similar check. // In either case, compiler provides a special odr_indicator field to Global // structure, that can contain two kinds of values: // 1) Non-zero value. In this case, odr_indicator is an address of // corresponding indicator variable for given global. // 2) Zero. This means that we don't use private aliases for global variables // and can freely check ODR violation with the first method. // // This routine chooses between two different methods of ODR violation // detection. static inline bool UseODRIndicator(const Global *g) { // Use ODR indicator method iff use_odr_indicator flag is set and // indicator symbol address is not 0. return flags()->use_odr_indicator && g->odr_indicator > 0; } // Register a global variable. // This function may be called more than once for every global // so we store the globals in a map. static void RegisterGlobal(const Global *g) { CHECK(asan_inited); if (flags()->report_globals >= 2) ReportGlobal(*g, "Added"); CHECK(flags()->report_globals); CHECK(AddrIsInMem(g->beg)); if (!AddrIsAlignedByGranularity(g->beg)) { Report("The following global variable is not properly aligned.\n"); Report("This may happen if another global with the same name\n"); Report("resides in another non-instrumented module.\n"); Report("Or the global comes from a C file built w/o -fno-common.\n"); Report("In either case this is likely an ODR violation bug,\n"); Report("but AddressSanitizer can not provide more details.\n"); ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g)); CHECK(AddrIsAlignedByGranularity(g->beg)); } CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); if (flags()->detect_odr_violation) { // Try detecting ODR (One Definition Rule) violation, i.e. the situation // where two globals with the same name are defined in different modules. if (UseODRIndicator(g)) CheckODRViolationViaIndicator(g); else CheckODRViolationViaPoisoning(g); } if (CanPoisonMemory()) PoisonRedZones(*g); ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; l->g = g; l->next = list_of_all_globals; list_of_all_globals = l; if (g->has_dynamic_init) { if (!dynamic_init_globals) { dynamic_init_globals = new(allocator_for_globals) VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); } DynInitGlobal dyn_global = { *g, false }; dynamic_init_globals->push_back(dyn_global); } } static void UnregisterGlobal(const Global *g) { CHECK(asan_inited); if (flags()->report_globals >= 2) ReportGlobal(*g, "Removed"); CHECK(flags()->report_globals); CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); if (CanPoisonMemory()) PoisonShadowForGlobal(g, 0); // We unpoison the shadow memory for the global but we do not remove it from // the list because that would require O(n^2) time with the current list // implementation. It might not be worth doing anyway. // Release ODR indicator. if (UseODRIndicator(g)) { u8 *odr_indicator = reinterpret_cast(g->odr_indicator); *odr_indicator = UNREGISTERED; } } void StopInitOrderChecking() { BlockingMutexLock lock(&mu_for_globals); if (!flags()->check_initialization_order || !dynamic_init_globals) return; flags()->check_initialization_order = false; for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; const Global *g = &dyn_g.g; // Unpoison the whole global. PoisonShadowForGlobal(g, 0); // Poison redzones back. PoisonRedZones(*g); } } static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; } const char *MaybeDemangleGlobalName(const char *name) { // We can spoil names of globals with C linkage, so use an heuristic // approach to check if the name should be demangled. bool should_demangle = false; if (name[0] == '_' && name[1] == 'Z') should_demangle = true; else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?') should_demangle = true; return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name; } // Check if the global is a zero-terminated ASCII string. If so, print it. void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) { for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { unsigned char c = *(unsigned char *)p; if (c == '\0' || !IsASCII(c)) return; } if (*(char *)(g.beg + g.size - 1) != '\0') return; str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name), (char *)g.beg); } static const char *GlobalFilename(const __asan_global &g) { const char *res = g.module_name; // Prefer the filename from source location, if is available. if (g.location) res = g.location->filename; CHECK(res); return res; } void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) { str->append("%s", GlobalFilename(g)); if (!g.location) return; if (g.location->line_no) str->append(":%d", g.location->line_no); if (g.location->column_no) str->append(":%d", g.location->column_no); } } // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT // Apply __asan_register_globals to all globals found in the same loaded // executable or shared library as `flag'. The flag tracks whether globals have // already been registered or not for this image. void __asan_register_image_globals(uptr *flag) { if (*flag) return; AsanApplyToGlobals(__asan_register_globals, flag); *flag = 1; } // This mirrors __asan_register_image_globals. void __asan_unregister_image_globals(uptr *flag) { if (!*flag) return; AsanApplyToGlobals(__asan_unregister_globals, flag); *flag = 0; } +void __asan_register_elf_globals(uptr *flag, void *start, void *stop) { + if (*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_register_globals(globals_start, globals_stop - globals_start); + *flag = 1; +} + +void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) { + if (!*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_unregister_globals(globals_start, globals_stop - globals_start); + *flag = 0; +} + // Register an array of globals. void __asan_register_globals(__asan_global *globals, uptr n) { if (!flags()->report_globals) return; GET_STACK_TRACE_MALLOC; u32 stack_id = StackDepotPut(stack); BlockingMutexLock lock(&mu_for_globals); if (!global_registration_site_vector) global_registration_site_vector = new(allocator_for_globals) GlobalRegistrationSiteVector(128); GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; global_registration_site_vector->push_back(site); if (flags()->report_globals >= 2) { PRINT_CURRENT_STACK(); Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]); } for (uptr i = 0; i < n; i++) { if (SANITIZER_WINDOWS && globals[i].beg == 0) { // The MSVC incremental linker may pad globals out to 256 bytes. As long // as __asan_global is less than 256 bytes large and its size is a power // of two, we can skip over the padding. static_assert( sizeof(__asan_global) < 256 && (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0, "sizeof(__asan_global) incompatible with incremental linker padding"); // If these are padding bytes, the rest of the global should be zero. CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 && globals[i].name == nullptr && globals[i].module_name == nullptr && globals[i].odr_indicator == 0); continue; } RegisterGlobal(&globals[i]); } } // Unregister an array of globals. // We must do this when a shared objects gets dlclosed. void __asan_unregister_globals(__asan_global *globals, uptr n) { if (!flags()->report_globals) return; BlockingMutexLock lock(&mu_for_globals); for (uptr i = 0; i < n; i++) { if (SANITIZER_WINDOWS && globals[i].beg == 0) { // Skip globals that look like padding from the MSVC incremental linker. // See comment in __asan_register_globals. continue; } UnregisterGlobal(&globals[i]); } } // This method runs immediately prior to dynamic initialization in each TU, // when all dynamically initialized globals are unpoisoned. This method // poisons all global variables not defined in this TU, so that a dynamic // initializer can only touch global variables in the same TU. void __asan_before_dynamic_init(const char *module_name) { if (!flags()->check_initialization_order || !CanPoisonMemory() || !dynamic_init_globals) return; bool strict_init_order = flags()->strict_init_order; CHECK(module_name); CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); if (flags()->report_globals >= 3) Printf("DynInitPoison module: %s\n", module_name); for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; const Global *g = &dyn_g.g; if (dyn_g.initialized) continue; if (g->module_name != module_name) PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); else if (!strict_init_order) dyn_g.initialized = true; } } // This method runs immediately after dynamic initialization in each TU, when // all dynamically initialized globals except for those defined in the current // TU are poisoned. It simply unpoisons all dynamically initialized globals. void __asan_after_dynamic_init() { if (!flags()->check_initialization_order || !CanPoisonMemory() || !dynamic_init_globals) return; CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); // FIXME: Optionally report that we're unpoisoning globals from a module. for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; const Global *g = &dyn_g.g; if (!dyn_g.initialized) { // Unpoison the whole global. PoisonShadowForGlobal(g, 0); // Poison redzones back. PoisonRedZones(*g); } } } Index: vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc (revision 317687) @@ -1,787 +1,794 @@ //===-- asan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // Intercept various libc functions. //===----------------------------------------------------------------------===// #include "asan_interceptors.h" #include "asan_allocator.h" #include "asan_internal.h" #include "asan_mapping.h" #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_suppressions.h" #include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_libc.h" #if SANITIZER_POSIX #include "sanitizer_common/sanitizer_posix.h" #endif #if defined(__i386) && SANITIZER_LINUX #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" #elif defined(__mips__) && SANITIZER_LINUX #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" #endif namespace __asan { // Return true if we can quickly decide that the region is unpoisoned. +// We assume that a redzone is at least 16 bytes. static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { if (size == 0) return true; if (size <= 32) return !AddressIsPoisoned(beg) && !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + size / 2); + if (size <= 64) + return !AddressIsPoisoned(beg) && + !AddressIsPoisoned(beg + size / 4) && + !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + 3 * size / 4) && !AddressIsPoisoned(beg + size / 2); return false; } struct AsanInterceptorContext { const char *interceptor_name; }; // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // and ASAN_WRITE_RANGE as macro instead of function so // that no extra frames are created, and stack trace contains // relevant information only. // We check all shadow bytes. #define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ uptr __offset = (uptr)(offset); \ uptr __size = (uptr)(size); \ uptr __bad = 0; \ if (__offset > __offset + __size) { \ GET_STACK_TRACE_FATAL_HERE; \ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \ } \ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ (__bad = __asan_region_is_poisoned(__offset, __size))) { \ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ bool suppressed = false; \ if (_ctx) { \ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ if (!suppressed && HaveStackTraceBasedSuppressions()) { \ GET_STACK_TRACE_FATAL_HERE; \ suppressed = IsStackTraceSuppressed(&stack); \ } \ } \ if (!suppressed) { \ GET_CURRENT_PC_BP_SP; \ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\ } \ } \ } while (0) // memcpy is called during __asan_init() from the internals of printf(...). // We do not treat memcpy with to==from as a bug. // See http://llvm.org/bugs/show_bug.cgi?id=11763. #define ASAN_MEMCPY_IMPL(ctx, to, from, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ if (asan_init_is_running) { \ return REAL(memcpy)(to, from, size); \ } \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ if (to != from) { \ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ } \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ } \ return REAL(memcpy)(to, from, size); \ } while (0) // memset is called inside Printf. #define ASAN_MEMSET_IMPL(ctx, block, c, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ if (asan_init_is_running) { \ return REAL(memset)(block, c, size); \ } \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ ASAN_WRITE_RANGE(ctx, block, size); \ } \ return REAL(memset)(block, c, size); \ } while (0) #define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \ do { \ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \ ENSURE_ASAN_INITED(); \ if (flags()->replace_intrin) { \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ } \ return internal_memmove(to, from, size); \ } while (0) #define ASAN_READ_RANGE(ctx, offset, size) \ ACCESS_MEMORY_RANGE(ctx, offset, size, false) #define ASAN_WRITE_RANGE(ctx, offset, size) \ ACCESS_MEMORY_RANGE(ctx, offset, size, true) #define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ ASAN_READ_RANGE((ctx), (s), \ common_flags()->strict_string_checks ? (len) + 1 : (n)) #define ASAN_READ_STRING(ctx, s, n) \ ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) // Behavior of functions like "memcpy" or "strcpy" is undefined // if memory intervals overlap. We report error in this case. // Macro is used to avoid creation of new frames. static inline bool RangesOverlap(const char *offset1, uptr length1, const char *offset2, uptr length2) { return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); } #define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \ const char *offset1 = (const char*)_offset1; \ const char *offset2 = (const char*)_offset2; \ if (RangesOverlap(offset1, length1, offset2, length2)) { \ GET_STACK_TRACE_FATAL_HERE; \ ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ offset2, length2, &stack); \ } \ } while (0) static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if SANITIZER_INTERCEPT_STRNLEN if (REAL(strnlen)) { return REAL(strnlen)(s, maxlen); } #endif return internal_strnlen(s, maxlen); } void SetThreadName(const char *name) { AsanThread *t = GetCurrentThread(); if (t) asanThreadRegistry().SetThreadName(t->tid(), name); } int OnExit() { // FIXME: ask frontend whether we need to return failure. return 0; } } // namespace __asan // ---------------------- Wrappers ---------------- {{{1 using namespace __asan; // NOLINT DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) #define ASAN_INTERCEPTOR_ENTER(ctx, func) \ AsanInterceptorContext _ctx = {#func}; \ ctx = (void *)&_ctx; \ (void) ctx; \ #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ ASAN_INTERCEPT_FUNC_VER(name, ver) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ ASAN_WRITE_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ ASAN_READ_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ ASAN_INTERCEPTOR_ENTER(ctx, func); \ do { \ if (asan_init_is_running) \ return REAL(func)(__VA_ARGS__); \ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ return REAL(func)(__VA_ARGS__); \ ENSURE_ASAN_INITED(); \ } while (false) #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ do { \ } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) // Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) // But asan does not remember UserId's for threads (pthread_t); // and remembers all ever existed threads, so the linear search by UserId // can be slow. #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ do { \ } while (false) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) // Strict init-order checking is dlopen-hostile: // https://github.com/google/sanitizers/issues/178 #define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ do { \ if (flags()->strict_init_order) \ StopInitOrderChecking(); \ CheckNoDeepBind(filename, flag); \ } while (false) #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ CoverageUpdateMapping() #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping() #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (AsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ *end = t->tls_end(); \ } else { \ *begin = *end = 0; \ } #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \ } while (false) #define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \ ASAN_MEMCPY_IMPL(ctx, to, from, size); \ } while (false) #define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ do { \ ASAN_INTERCEPTOR_ENTER(ctx, memset); \ ASAN_MEMSET_IMPL(ctx, block, c, size); \ } while (false) #include "sanitizer_common/sanitizer_common_interceptors.inc" // Syscall interceptors don't have contexts, we don't support suppressions // for them. #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ do { \ (void)(p); \ (void)(s); \ } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" struct ThreadStartParam { atomic_uintptr_t t; atomic_uintptr_t is_registered; }; #if ASAN_INTERCEPT_PTHREAD_CREATE static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { ThreadStartParam *param = reinterpret_cast(arg); AsanThread *t = nullptr; while ((t = reinterpret_cast( atomic_load(¶m->t, memory_order_acquire))) == nullptr) internal_sched_yield(); SetCurrentThread(t); return t->ThreadStart(GetTid(), ¶m->is_registered); } INTERCEPTOR(int, pthread_create, void *thread, void *attr, void *(*start_routine)(void*), void *arg) { EnsureMainThreadIDIsCorrect(); // Strict init-order checking is thread-hostile. if (flags()->strict_init_order) StopInitOrderChecking(); GET_STACK_TRACE_THREAD; int detached = 0; if (attr) REAL(pthread_attr_getdetachstate)(attr, &detached); ThreadStartParam param; atomic_store(¶m.t, 0, memory_order_relaxed); atomic_store(¶m.is_registered, 0, memory_order_relaxed); int result; { // Ignore all allocations made by pthread_create: thread stack/TLS may be // stored by pthread for future reuse even after thread destruction, and // the linked list it's stored in doesn't even hold valid pointers to the // objects, the latter are calculated by obscure pointer arithmetic. #if CAN_SANITIZE_LEAKS __lsan::ScopedInterceptorDisabler disabler; #endif result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); } if (result == 0) { u32 current_tid = GetCurrentTidOrInvalid(); AsanThread *t = AsanThread::Create(start_routine, arg, current_tid, &stack, detached); atomic_store(¶m.t, reinterpret_cast(t), memory_order_release); // Wait until the AsanThread object is initialized and the ThreadRegistry // entry is in "started" state. One reason for this is that after this // interceptor exits, the child thread's stack may be the only thing holding // the |arg| pointer. This may cause LSan to report a leak if leak checking // happens at a point when the interceptor has already exited, but the stack // range for the child thread is not yet known. while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) internal_sched_yield(); } return result; } INTERCEPTOR(int, pthread_join, void *t, void **arg) { return real_pthread_join(t, arg); } DEFINE_REAL_PTHREAD_FUNCTIONS #endif // ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if SANITIZER_ANDROID INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { if (!IsHandledDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(bsd_signal)(signum, handler); } return 0; } #endif INTERCEPTOR(void*, signal, int signum, void *handler) { if (!IsHandledDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(signal)(signum, handler); } return nullptr; } INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) { if (!IsHandledDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(sigaction)(signum, act, oldact); } return 0; } namespace __sanitizer { int real_sigaction(int signum, const void *act, void *oldact) { return REAL(sigaction)(signum, (const struct sigaction *)act, (struct sigaction *)oldact); } } // namespace __sanitizer #elif SANITIZER_POSIX // We need to have defined REAL(sigaction) on posix systems. DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) #endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if ASAN_INTERCEPT_SWAPCONTEXT static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { // Align to page size. uptr PageSize = GetPageSizeCached(); uptr bottom = stack & ~(PageSize - 1); ssize += stack - bottom; ssize = RoundUpTo(ssize, PageSize); static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { PoisonShadow(bottom, ssize, 0); } } INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, struct ucontext_t *ucp) { static bool reported_warning = false; if (!reported_warning) { Report("WARNING: ASan doesn't fully support makecontext/swapcontext " "functions and may produce false positives in some cases!\n"); reported_warning = true; } // Clear shadow memory for new context (it may share stack // with current context). uptr stack, ssize; ReadContextStack(ucp, &stack, &ssize); ClearShadowMemoryForContextStack(stack, ssize); int res = REAL(swapcontext)(oucp, ucp); // swapcontext technically does not return, but program may swap context to // "oucp" later, that would look as if swapcontext() returned 0. // We need to clear shadow for ucp once again, as it may be in arbitrary // state. ClearShadowMemoryForContextStack(stack, ssize); return res; } #endif // ASAN_INTERCEPT_SWAPCONTEXT INTERCEPTOR(void, longjmp, void *env, int val) { __asan_handle_no_return(); REAL(longjmp)(env, val); } #if ASAN_INTERCEPT__LONGJMP INTERCEPTOR(void, _longjmp, void *env, int val) { __asan_handle_no_return(); REAL(_longjmp)(env, val); } #endif #if ASAN_INTERCEPT_SIGLONGJMP INTERCEPTOR(void, siglongjmp, void *env, int val) { __asan_handle_no_return(); REAL(siglongjmp)(env, val); } #endif #if ASAN_INTERCEPT___CXA_THROW INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { CHECK(REAL(__cxa_throw)); __asan_handle_no_return(); REAL(__cxa_throw)(a, b, c); } #endif void *__asan_memcpy(void *to, const void *from, uptr size) { ASAN_MEMCPY_IMPL(nullptr, to, from, size); } void *__asan_memset(void *block, int c, uptr size) { ASAN_MEMSET_IMPL(nullptr, block, c, size); } void *__asan_memmove(void *to, const void *from, uptr size) { ASAN_MEMMOVE_IMPL(nullptr, to, from, size); } #if ASAN_INTERCEPT_INDEX # if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX INTERCEPTOR(char*, index, const char *string, int c) ALIAS(WRAPPER_NAME(strchr)); # else # if SANITIZER_MAC DECLARE_REAL(char*, index, const char *string, int c) OVERRIDE_FUNCTION(index, strchr); # else DEFINE_REAL(char*, index, const char *string, int c) # endif # endif #endif // ASAN_INTERCEPT_INDEX // For both strcat() and strncat() we need to check the validity of |to| // argument irrespective of the |from| length. INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = REAL(strlen)(from); ASAN_READ_RANGE(ctx, from, from_length + 1); uptr to_length = REAL(strlen)(to); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); // If the copying actually happens, the |from| string should not overlap // with the resulting string starting at |to|, which has a length of // to_length + from_length + 1. if (from_length > 0) { CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, from, from_length + 1); } } return REAL(strcat)(to, from); // NOLINT } INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strncat); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = MaybeRealStrnlen(from, size); uptr copy_length = Min(size, from_length + 1); ASAN_READ_RANGE(ctx, from, copy_length); uptr to_length = REAL(strlen)(to); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); if (from_length > 0) { CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, from, copy_length); } } return REAL(strncat)(to, from, size); } INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT #endif // strcpy is called from malloc_default_purgeable_zone() // in __asan::ReplaceSystemAlloc() on Mac. if (asan_init_is_running) { return REAL(strcpy)(to, from); // NOLINT } ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = REAL(strlen)(from) + 1; CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); ASAN_READ_RANGE(ctx, from, from_size); ASAN_WRITE_RANGE(ctx, to, from_size); } return REAL(strcpy)(to, from); // NOLINT } INTERCEPTOR(char*, strdup, const char *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); REAL(memcpy)(new_mem, s, length + 1); return reinterpret_cast(new_mem); } #if ASAN_INTERCEPT___STRDUP INTERCEPTOR(char*, __strdup, const char *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); REAL(memcpy)(new_mem, s, length + 1); return reinterpret_cast(new_mem); } #endif // ASAN_INTERCEPT___STRDUP INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, wcslen); SIZE_T length = internal_wcslen(s); if (!asan_init_is_running) { ENSURE_ASAN_INITED(); ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t)); } return length; } INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strncpy); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); ASAN_READ_RANGE(ctx, from, from_size); ASAN_WRITE_RANGE(ctx, to, size); } return REAL(strncpy)(to, from, size); } INTERCEPTOR(long, strtol, const char *nptr, // NOLINT char **endptr, int base) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strtol); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtol)(nptr, endptr, base); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(int, atoi, const char *nptr) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atoi); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); #endif ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoi)(nptr); } char *real_endptr; // "man atoi" tells that behavior of atoi(nptr) is the same as // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the // parsed integer can't be stored in *long* type (even if it's // different from int). So, we just imitate this behavior. int result = REAL(strtol)(nptr, &real_endptr, 10); FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } INTERCEPTOR(long, atol, const char *nptr) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atol); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); #endif ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atol)(nptr); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT char **endptr, int base) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strtoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtoll)(nptr, endptr, base); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, atoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoll)(nptr); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL #if ASAN_INTERCEPT___CXA_ATEXIT static void AtCxaAtexit(void *unused) { (void)unused; StopInitOrderChecking(); } INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, void *dso_handle) { #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); #endif ENSURE_ASAN_INITED(); int res = REAL(__cxa_atexit)(func, arg, dso_handle); REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); return res; } #endif // ASAN_INTERCEPT___CXA_ATEXIT #if ASAN_INTERCEPT_FORK INTERCEPTOR(int, fork, void) { ENSURE_ASAN_INITED(); if (common_flags()->coverage) CovBeforeFork(); int pid = REAL(fork)(); if (common_flags()->coverage) CovAfterFork(pid); return pid; } #endif // ASAN_INTERCEPT_FORK // ---------------------- InitializeAsanInterceptors ---------------- {{{1 namespace __asan { void InitializeAsanInterceptors() { static bool was_called_once; CHECK(!was_called_once); was_called_once = true; InitializeCommonInterceptors(); // Intercept str* functions. ASAN_INTERCEPT_FUNC(strcat); // NOLINT ASAN_INTERCEPT_FUNC(strcpy); // NOLINT ASAN_INTERCEPT_FUNC(wcslen); ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncpy); ASAN_INTERCEPT_FUNC(strdup); #if ASAN_INTERCEPT___STRDUP ASAN_INTERCEPT_FUNC(__strdup); #endif #if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX ASAN_INTERCEPT_FUNC(index); #endif ASAN_INTERCEPT_FUNC(atoi); ASAN_INTERCEPT_FUNC(atol); ASAN_INTERCEPT_FUNC(strtol); #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL ASAN_INTERCEPT_FUNC(atoll); ASAN_INTERCEPT_FUNC(strtoll); #endif // Intecept signal- and jump-related functions. ASAN_INTERCEPT_FUNC(longjmp); #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION ASAN_INTERCEPT_FUNC(sigaction); #if SANITIZER_ANDROID ASAN_INTERCEPT_FUNC(bsd_signal); #endif ASAN_INTERCEPT_FUNC(signal); #endif #if ASAN_INTERCEPT_SWAPCONTEXT ASAN_INTERCEPT_FUNC(swapcontext); #endif #if ASAN_INTERCEPT__LONGJMP ASAN_INTERCEPT_FUNC(_longjmp); #endif #if ASAN_INTERCEPT_SIGLONGJMP ASAN_INTERCEPT_FUNC(siglongjmp); #endif // Intercept exception handling functions. #if ASAN_INTERCEPT___CXA_THROW ASAN_INTERCEPT_FUNC(__cxa_throw); #endif // Intercept threading-related functions #if ASAN_INTERCEPT_PTHREAD_CREATE #if defined(ASAN_PTHREAD_CREATE_VERSION) ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); #else ASAN_INTERCEPT_FUNC(pthread_create); #endif ASAN_INTERCEPT_FUNC(pthread_join); #endif // Intercept atexit function. #if ASAN_INTERCEPT___CXA_ATEXIT ASAN_INTERCEPT_FUNC(__cxa_atexit); #endif #if ASAN_INTERCEPT_FORK ASAN_INTERCEPT_FUNC(fork); #endif InitializePlatformInterceptors(); VReport(1, "AddressSanitizer: libc interceptors initialized\n"); } } // namespace __asan Index: vendor/compiler-rt/dist/lib/asan/asan_interface.inc =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_interface.inc (revision 317686) +++ vendor/compiler-rt/dist/lib/asan/asan_interface.inc (revision 317687) @@ -1,167 +1,169 @@ //===-- asan_interface.inc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Asan interface list. //===----------------------------------------------------------------------===// INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack) INTERFACE_FUNCTION(__asan_address_is_poisoned) INTERFACE_FUNCTION(__asan_after_dynamic_init) INTERFACE_FUNCTION(__asan_alloca_poison) INTERFACE_FUNCTION(__asan_allocas_unpoison) INTERFACE_FUNCTION(__asan_before_dynamic_init) INTERFACE_FUNCTION(__asan_describe_address) INTERFACE_FUNCTION(__asan_exp_load1) INTERFACE_FUNCTION(__asan_exp_load2) INTERFACE_FUNCTION(__asan_exp_load4) INTERFACE_FUNCTION(__asan_exp_load8) INTERFACE_FUNCTION(__asan_exp_load16) INTERFACE_FUNCTION(__asan_exp_loadN) INTERFACE_FUNCTION(__asan_exp_store1) INTERFACE_FUNCTION(__asan_exp_store2) INTERFACE_FUNCTION(__asan_exp_store4) INTERFACE_FUNCTION(__asan_exp_store8) INTERFACE_FUNCTION(__asan_exp_store16) INTERFACE_FUNCTION(__asan_exp_storeN) INTERFACE_FUNCTION(__asan_get_alloc_stack) INTERFACE_FUNCTION(__asan_get_current_fake_stack) INTERFACE_FUNCTION(__asan_get_free_stack) INTERFACE_FUNCTION(__asan_get_report_access_size) INTERFACE_FUNCTION(__asan_get_report_access_type) INTERFACE_FUNCTION(__asan_get_report_address) INTERFACE_FUNCTION(__asan_get_report_bp) INTERFACE_FUNCTION(__asan_get_report_description) INTERFACE_FUNCTION(__asan_get_report_pc) INTERFACE_FUNCTION(__asan_get_report_sp) INTERFACE_FUNCTION(__asan_get_shadow_mapping) INTERFACE_FUNCTION(__asan_handle_no_return) INTERFACE_FUNCTION(__asan_init) INTERFACE_FUNCTION(__asan_load_cxx_array_cookie) INTERFACE_FUNCTION(__asan_load1) INTERFACE_FUNCTION(__asan_load2) INTERFACE_FUNCTION(__asan_load4) INTERFACE_FUNCTION(__asan_load8) INTERFACE_FUNCTION(__asan_load16) INTERFACE_FUNCTION(__asan_loadN) INTERFACE_FUNCTION(__asan_load1_noabort) INTERFACE_FUNCTION(__asan_load2_noabort) INTERFACE_FUNCTION(__asan_load4_noabort) INTERFACE_FUNCTION(__asan_load8_noabort) INTERFACE_FUNCTION(__asan_load16_noabort) INTERFACE_FUNCTION(__asan_loadN_noabort) INTERFACE_FUNCTION(__asan_locate_address) INTERFACE_FUNCTION(__asan_memcpy) INTERFACE_FUNCTION(__asan_memmove) INTERFACE_FUNCTION(__asan_memset) INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie) INTERFACE_FUNCTION(__asan_poison_intra_object_redzone) INTERFACE_FUNCTION(__asan_poison_memory_region) INTERFACE_FUNCTION(__asan_poison_stack_memory) INTERFACE_FUNCTION(__asan_print_accumulated_stats) INTERFACE_FUNCTION(__asan_region_is_poisoned) INTERFACE_FUNCTION(__asan_register_globals) +INTERFACE_FUNCTION(__asan_register_elf_globals) INTERFACE_FUNCTION(__asan_register_image_globals) INTERFACE_FUNCTION(__asan_report_error) INTERFACE_FUNCTION(__asan_report_exp_load1) INTERFACE_FUNCTION(__asan_report_exp_load2) INTERFACE_FUNCTION(__asan_report_exp_load4) INTERFACE_FUNCTION(__asan_report_exp_load8) INTERFACE_FUNCTION(__asan_report_exp_load16) INTERFACE_FUNCTION(__asan_report_exp_load_n) INTERFACE_FUNCTION(__asan_report_exp_store1) INTERFACE_FUNCTION(__asan_report_exp_store2) INTERFACE_FUNCTION(__asan_report_exp_store4) INTERFACE_FUNCTION(__asan_report_exp_store8) INTERFACE_FUNCTION(__asan_report_exp_store16) INTERFACE_FUNCTION(__asan_report_exp_store_n) INTERFACE_FUNCTION(__asan_report_load1) INTERFACE_FUNCTION(__asan_report_load2) INTERFACE_FUNCTION(__asan_report_load4) INTERFACE_FUNCTION(__asan_report_load8) INTERFACE_FUNCTION(__asan_report_load16) INTERFACE_FUNCTION(__asan_report_load_n) INTERFACE_FUNCTION(__asan_report_load1_noabort) INTERFACE_FUNCTION(__asan_report_load2_noabort) INTERFACE_FUNCTION(__asan_report_load4_noabort) INTERFACE_FUNCTION(__asan_report_load8_noabort) INTERFACE_FUNCTION(__asan_report_load16_noabort) INTERFACE_FUNCTION(__asan_report_load_n_noabort) INTERFACE_FUNCTION(__asan_report_present) INTERFACE_FUNCTION(__asan_report_store1) INTERFACE_FUNCTION(__asan_report_store2) INTERFACE_FUNCTION(__asan_report_store4) INTERFACE_FUNCTION(__asan_report_store8) INTERFACE_FUNCTION(__asan_report_store16) INTERFACE_FUNCTION(__asan_report_store_n) INTERFACE_FUNCTION(__asan_report_store1_noabort) INTERFACE_FUNCTION(__asan_report_store2_noabort) INTERFACE_FUNCTION(__asan_report_store4_noabort) INTERFACE_FUNCTION(__asan_report_store8_noabort) INTERFACE_FUNCTION(__asan_report_store16_noabort) INTERFACE_FUNCTION(__asan_report_store_n_noabort) INTERFACE_FUNCTION(__asan_set_death_callback) INTERFACE_FUNCTION(__asan_set_error_report_callback) INTERFACE_FUNCTION(__asan_set_shadow_00) INTERFACE_FUNCTION(__asan_set_shadow_f1) INTERFACE_FUNCTION(__asan_set_shadow_f2) INTERFACE_FUNCTION(__asan_set_shadow_f3) INTERFACE_FUNCTION(__asan_set_shadow_f5) INTERFACE_FUNCTION(__asan_set_shadow_f8) INTERFACE_FUNCTION(__asan_stack_free_0) INTERFACE_FUNCTION(__asan_stack_free_1) INTERFACE_FUNCTION(__asan_stack_free_2) INTERFACE_FUNCTION(__asan_stack_free_3) INTERFACE_FUNCTION(__asan_stack_free_4) INTERFACE_FUNCTION(__asan_stack_free_5) INTERFACE_FUNCTION(__asan_stack_free_6) INTERFACE_FUNCTION(__asan_stack_free_7) INTERFACE_FUNCTION(__asan_stack_free_8) INTERFACE_FUNCTION(__asan_stack_free_9) INTERFACE_FUNCTION(__asan_stack_free_10) INTERFACE_FUNCTION(__asan_stack_malloc_0) INTERFACE_FUNCTION(__asan_stack_malloc_1) INTERFACE_FUNCTION(__asan_stack_malloc_2) INTERFACE_FUNCTION(__asan_stack_malloc_3) INTERFACE_FUNCTION(__asan_stack_malloc_4) INTERFACE_FUNCTION(__asan_stack_malloc_5) INTERFACE_FUNCTION(__asan_stack_malloc_6) INTERFACE_FUNCTION(__asan_stack_malloc_7) INTERFACE_FUNCTION(__asan_stack_malloc_8) INTERFACE_FUNCTION(__asan_stack_malloc_9) INTERFACE_FUNCTION(__asan_stack_malloc_10) INTERFACE_FUNCTION(__asan_store1) INTERFACE_FUNCTION(__asan_store2) INTERFACE_FUNCTION(__asan_store4) INTERFACE_FUNCTION(__asan_store8) INTERFACE_FUNCTION(__asan_store16) INTERFACE_FUNCTION(__asan_storeN) INTERFACE_FUNCTION(__asan_store1_noabort) INTERFACE_FUNCTION(__asan_store2_noabort) INTERFACE_FUNCTION(__asan_store4_noabort) INTERFACE_FUNCTION(__asan_store8_noabort) INTERFACE_FUNCTION(__asan_store16_noabort) INTERFACE_FUNCTION(__asan_storeN_noabort) INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone) INTERFACE_FUNCTION(__asan_unpoison_memory_region) INTERFACE_FUNCTION(__asan_unpoison_stack_memory) INTERFACE_FUNCTION(__asan_unregister_globals) +INTERFACE_FUNCTION(__asan_unregister_elf_globals) INTERFACE_FUNCTION(__asan_unregister_image_globals) INTERFACE_FUNCTION(__asan_version_mismatch_check_v8) INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber) INTERFACE_FUNCTION(__sanitizer_print_stack_trace) INTERFACE_FUNCTION(__sanitizer_ptr_cmp) INTERFACE_FUNCTION(__sanitizer_ptr_sub) INTERFACE_FUNCTION(__sanitizer_start_switch_fiber) INTERFACE_FUNCTION(__sanitizer_unaligned_load16) INTERFACE_FUNCTION(__sanitizer_unaligned_load32) INTERFACE_FUNCTION(__sanitizer_unaligned_load64) INTERFACE_FUNCTION(__sanitizer_unaligned_store16) INTERFACE_FUNCTION(__sanitizer_unaligned_store32) INTERFACE_FUNCTION(__sanitizer_unaligned_store64) INTERFACE_WEAK_FUNCTION(__asan_default_options) INTERFACE_WEAK_FUNCTION(__asan_default_suppressions) INTERFACE_WEAK_FUNCTION(__asan_on_error) Index: vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h =================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h (revision 317686) +++ vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h (revision 317687) @@ -1,250 +1,255 @@ //===-- asan_interface_internal.h -------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // // This header declares the AddressSanitizer runtime interface functions. // The runtime library has to define these functions so the instrumented program // could call them. // // See also include/sanitizer/asan_interface.h //===----------------------------------------------------------------------===// #ifndef ASAN_INTERFACE_INTERNAL_H #define ASAN_INTERFACE_INTERNAL_H #include "sanitizer_common/sanitizer_internal_defs.h" #include "asan_init_version.h" using __sanitizer::uptr; using __sanitizer::u64; using __sanitizer::u32; extern "C" { // This function should be called at the very beginning of the process, // before any instrumented code is executed and before any call to malloc. SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); // This function exists purely to get a linker/loader error when using // incompatible versions of instrumentation and runtime library. Please note // that __asan_version_mismatch_check is a macro that is replaced with // __asan_version_mismatch_check_vXXX at compile-time. SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check(); // This structure is used to describe the source location of a place where // global was defined. struct __asan_global_source_location { const char *filename; int line_no; int column_no; }; // This structure describes an instrumented global variable. struct __asan_global { uptr beg; // The address of the global. uptr size; // The original size of the global. uptr size_with_redzone; // The size with the redzone. const char *name; // Name as a C string. const char *module_name; // Module name as a C string. This pointer is a // unique identifier of a module. uptr has_dynamic_init; // Non-zero if the global has dynamic initializer. __asan_global_source_location *location; // Source location of a global, // or NULL if it is unknown. uptr odr_indicator; // The address of the ODR indicator symbol. }; // These functions can be called on some platforms to find globals in the same // loaded image as `flag' and apply __asan_(un)register_globals to them, // filtering out redundant calls. SANITIZER_INTERFACE_ATTRIBUTE void __asan_register_image_globals(uptr *flag); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unregister_image_globals(uptr *flag); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_register_elf_globals(uptr *flag, void *start, void *stop); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop); + // These two functions should be called by the instrumented code. // 'globals' is an array of structures describing 'n' globals. SANITIZER_INTERFACE_ATTRIBUTE void __asan_register_globals(__asan_global *globals, uptr n); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unregister_globals(__asan_global *globals, uptr n); // These two functions should be called before and after dynamic initializers // of a single module run, respectively. SANITIZER_INTERFACE_ATTRIBUTE void __asan_before_dynamic_init(const char *module_name); SANITIZER_INTERFACE_ATTRIBUTE void __asan_after_dynamic_init(); // Sets bytes of the given range of the shadow memory into specific value. SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_00(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_f1(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_f2(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_f3(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_f5(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_shadow_f8(uptr addr, uptr size); // These two functions are used by instrumented code in the // use-after-scope mode. They mark memory for local variables as // unaddressable when they leave scope and addressable before the // function exits. SANITIZER_INTERFACE_ATTRIBUTE void __asan_poison_stack_memory(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unpoison_stack_memory(uptr addr, uptr size); // Performs cleanup before a NoReturn function. Must be called before things // like _exit and execl to avoid false positives on stack. SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return(); SANITIZER_INTERFACE_ATTRIBUTE void __asan_poison_memory_region(void const volatile *addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unpoison_memory_region(void const volatile *addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE int __asan_address_is_poisoned(void const volatile *addr); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_region_is_poisoned(uptr beg, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_describe_address(uptr addr); SANITIZER_INTERFACE_ATTRIBUTE int __asan_report_present(); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_report_pc(); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_report_bp(); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_report_sp(); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_report_address(); SANITIZER_INTERFACE_ATTRIBUTE int __asan_get_report_access_type(); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_report_access_size(); SANITIZER_INTERFACE_ATTRIBUTE const char * __asan_get_report_description(); SANITIZER_INTERFACE_ATTRIBUTE const char * __asan_locate_address(uptr addr, char *name, uptr name_size, uptr *region_address, uptr *region_size); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id); SANITIZER_INTERFACE_ATTRIBUTE void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset); SANITIZER_INTERFACE_ATTRIBUTE void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, uptr access_size, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_death_callback(void (*callback)(void)); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_error_report_callback(void (*callback)(const char*)); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __asan_on_error(); SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats(); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char* __asan_default_options(); SANITIZER_INTERFACE_ATTRIBUTE extern uptr __asan_shadow_memory_dynamic_address; // Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return SANITIZER_INTERFACE_ATTRIBUTE extern int __asan_option_detect_stack_use_after_return; SANITIZER_INTERFACE_ATTRIBUTE extern uptr *__asan_test_only_reported_buggy_pointer; SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16_noabort(uptr p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE void* __asan_memcpy(void *dst, const void *src, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void* __asan_memset(void *s, int c, uptr n); SANITIZER_INTERFACE_ATTRIBUTE void* __asan_memmove(void* dest, const void* src, uptr n); SANITIZER_INTERFACE_ATTRIBUTE void __asan_poison_cxx_array_cookie(uptr p); SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_load_cxx_array_cookie(uptr *p); SANITIZER_INTERFACE_ATTRIBUTE void __asan_poison_intra_object_redzone(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unpoison_intra_object_redzone(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_alloca_poison(uptr addr, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_allocas_unpoison(uptr top, uptr bottom); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char* __asan_default_suppressions(); } // extern "C" #endif // ASAN_INTERFACE_INTERNAL_H Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc (revision 317687) @@ -1,684 +1,662 @@ //===-- sanitizer_coverage.cc ---------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Sanitizer Coverage. // This file implements run-time support for a poor man's coverage tool. // // Compiler instrumentation: // For every interesting basic block the compiler injects the following code: // if (Guard < 0) { // __sanitizer_cov(&Guard); // } // At the module start up time __sanitizer_cov_module_init sets the guards // to consecutive negative numbers (-1, -2, -3, ...). // It's fine to call __sanitizer_cov more than once for a given block. // // Run-time: // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC). // and atomically set Guard to -Guard. // - __sanitizer_cov_dump: dump the coverage data to disk. // For every module of the current process that has coverage data // this will create a file module_name.PID.sancov. // // The file format is simple: the first 8 bytes is the magic, // one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the // magic defines the size of the following offsets. // The rest of the data is the offsets in the module. // // Eventually, this coverage implementation should be obsoleted by a more // powerful general purpose Clang/LLVM coverage instrumentation. // Consider this implementation as prototype. // // FIXME: support (or at least test with) dlclose. //===----------------------------------------------------------------------===// #include "sanitizer_allocator_internal.h" #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" #include "sanitizer_flags.h" using namespace __sanitizer; static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL; static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL; static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2; static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32; static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. static atomic_uintptr_t coverage_counter; // pc_array is the array containing the covered PCs. // To make the pc_array thread- and async-signal-safe it has to be large enough. // 128M counters "ought to be enough for anybody" (4M on 32-bit). // With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file. // In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping() // dump current memory layout to another file. static bool cov_sandboxed = false; static fd_t cov_fd = kInvalidFd; static unsigned int cov_max_block_size = 0; static bool coverage_enabled = false; static const char *coverage_dir; namespace __sanitizer { class CoverageData { public: void Init(); void Enable(); void Disable(); void ReInit(); void BeforeFork(); void AfterFork(int child_pid); void Extend(uptr npcs); void Add(uptr pc, u32 *guard); - void DumpAsBitSet(); void DumpOffsets(); void DumpAll(); void InitializeGuardArray(s32 *guards); void InitializeGuards(s32 *guards, uptr n, const char *module_name, uptr caller_pc); void ReinitializeGuards(); uptr *data(); uptr size() const; private: struct NamedPcRange { const char *copied_module_name; uptr beg, end; // elements [beg,end) in pc_array. }; void DirectOpen(); void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end); void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s, InternalMmapVector* offsets) const; // Maximal size pc array may ever grow. // We MmapNoReserve this space to ensure that the array is contiguous. static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27); // The amount file mapping for the pc array is grown by. static const uptr kPcArrayMmapSize = 64 * 1024; // pc_array is allocated with MmapNoReserveOrDie and so it uses only as // much RAM as it really needs. uptr *pc_array; // Index of the first available pc_array slot. atomic_uintptr_t pc_array_index; // Array size. atomic_uintptr_t pc_array_size; // Current file mapped size of the pc array. uptr pc_array_mapped_size; // Descriptor of the file mapped pc array. fd_t pc_fd; // Vector of coverage guard arrays, protected by mu. InternalMmapVectorNoCtor guard_array_vec; // Vector of module and compilation unit pc ranges. InternalMmapVectorNoCtor comp_unit_name_vec; InternalMmapVectorNoCtor module_name_vec; StaticSpinMutex mu; }; static CoverageData coverage_data; void CovUpdateMapping(const char *path, uptr caller_pc = 0); void CoverageData::DirectOpen() { InternalScopedString path(kMaxPathLength); internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw", coverage_dir, internal_getpid()); pc_fd = OpenFile(path.data(), RdWr); if (pc_fd == kInvalidFd) { Report("Coverage: failed to open %s for reading/writing\n", path.data()); Die(); } pc_array_mapped_size = 0; CovUpdateMapping(coverage_dir); } void CoverageData::Init() { pc_fd = kInvalidFd; + + if (!common_flags()->coverage) return; + Printf("**\n***\n***\n"); + Printf("**WARNING: this implementation of SanitizerCoverage is deprecated\n"); + Printf("**WARNING: and will be removed in future versions\n"); + Printf("**WARNING: See https://clang.llvm.org/docs/SanitizerCoverage.html\n"); + Printf("**\n***\n***\n"); } void CoverageData::Enable() { if (pc_array) return; pc_array = reinterpret_cast( MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit")); atomic_store(&pc_array_index, 0, memory_order_relaxed); if (common_flags()->coverage_direct) { + Report("coverage_direct=1 is deprecated, don't use it.\n"); + Die(); atomic_store(&pc_array_size, 0, memory_order_relaxed); } else { atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed); } } void CoverageData::InitializeGuardArray(s32 *guards) { Enable(); // Make sure coverage is enabled at this point. s32 n = guards[0]; for (s32 j = 1; j <= n; j++) { uptr idx = atomic_load_relaxed(&pc_array_index); atomic_store_relaxed(&pc_array_index, idx + 1); guards[j] = -static_cast(idx + 1); } } void CoverageData::Disable() { if (pc_array) { UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize); pc_array = nullptr; } if (pc_fd != kInvalidFd) { CloseFile(pc_fd); pc_fd = kInvalidFd; } } void CoverageData::ReinitializeGuards() { // Assuming single thread. atomic_store(&pc_array_index, 0, memory_order_relaxed); for (uptr i = 0; i < guard_array_vec.size(); i++) InitializeGuardArray(guard_array_vec[i]); } void CoverageData::ReInit() { Disable(); if (coverage_enabled) { if (common_flags()->coverage_direct) { // In memory-mapped mode we must extend the new file to the known array // size. uptr size = atomic_load(&pc_array_size, memory_order_relaxed); uptr npcs = size / sizeof(uptr); Enable(); if (size) Extend(npcs); if (coverage_enabled) CovUpdateMapping(coverage_dir); } else { Enable(); } } // Re-initialize the guards. // We are single-threaded now, no need to grab any lock. CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0); ReinitializeGuards(); } void CoverageData::BeforeFork() { mu.Lock(); } void CoverageData::AfterFork(int child_pid) { // We are single-threaded so it's OK to release the lock early. mu.Unlock(); if (child_pid == 0) ReInit(); } // Extend coverage PC array to fit additional npcs elements. void CoverageData::Extend(uptr npcs) { if (!common_flags()->coverage_direct) return; SpinMutexLock l(&mu); uptr size = atomic_load(&pc_array_size, memory_order_relaxed); size += npcs * sizeof(uptr); if (coverage_enabled && size > pc_array_mapped_size) { if (pc_fd == kInvalidFd) DirectOpen(); CHECK_NE(pc_fd, kInvalidFd); uptr new_mapped_size = pc_array_mapped_size; while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize; CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize); // Extend the file and map the new space at the end of pc_array. uptr res = internal_ftruncate(pc_fd, new_mapped_size); int err; if (internal_iserror(res, &err)) { Printf("failed to extend raw coverage file: %d\n", err); Die(); } uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size; void *p = MapWritableFileToMemory((void *)next_map_base, new_mapped_size - pc_array_mapped_size, pc_fd, pc_array_mapped_size); CHECK_EQ((uptr)p, next_map_base); pc_array_mapped_size = new_mapped_size; } atomic_store(&pc_array_size, size, memory_order_release); } void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end) { auto sym = Symbolizer::GetOrInit(); if (!sym) return; const char *module_name = sym->GetModuleNameForPc(caller_pc); if (!module_name) return; if (module_name_vec.empty() || module_name_vec.back().copied_module_name != module_name) module_name_vec.push_back({module_name, range_beg, range_end}); else module_name_vec.back().end = range_end; } void CoverageData::InitializeGuards(s32 *guards, uptr n, const char *comp_unit_name, uptr caller_pc) { // The array 'guards' has n+1 elements, we use the element zero // to store 'n'. CHECK_LT(n, 1 << 30); guards[0] = static_cast(n); InitializeGuardArray(guards); SpinMutexLock l(&mu); uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed); uptr range_beg = range_end - n; comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end}); guard_array_vec.push_back(guards); UpdateModuleNameVec(caller_pc, range_beg, range_end); } static const uptr kBundleCounterBits = 16; // When coverage_order_pcs==true and SANITIZER_WORDSIZE==64 // we insert the global counter into the first 16 bits of the PC. uptr BundlePcAndCounter(uptr pc, uptr counter) { if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) return pc; static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1; if (counter > kMaxCounter) counter = kMaxCounter; CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits)); return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits)); } uptr UnbundlePc(uptr bundle) { if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) return bundle; return (bundle << kBundleCounterBits) >> kBundleCounterBits; } uptr UnbundleCounter(uptr bundle) { if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) return 0; return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits); } // If guard is negative, atomically set it to -guard and store the PC in // pc_array. void CoverageData::Add(uptr pc, u32 *guard) { atomic_uint32_t *atomic_guard = reinterpret_cast(guard); s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed); if (guard_value >= 0) return; atomic_store(atomic_guard, -guard_value, memory_order_relaxed); if (!pc_array) return; uptr idx = -guard_value - 1; if (idx >= atomic_load(&pc_array_index, memory_order_acquire)) return; // May happen after fork when pc_array_index becomes 0. CHECK_LT(idx, atomic_load(&pc_array_size, memory_order_acquire)); uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed); pc_array[idx] = BundlePcAndCounter(pc, counter); } uptr *CoverageData::data() { return pc_array; } uptr CoverageData::size() const { return atomic_load(&pc_array_index, memory_order_relaxed); } // Block layout for packed file format: header, followed by module name (no // trailing zero), followed by data blob. struct CovHeader { int pid; unsigned int module_name_length; unsigned int data_length; }; static void CovWritePacked(int pid, const char *module, const void *blob, unsigned int blob_size) { if (cov_fd == kInvalidFd) return; unsigned module_name_length = internal_strlen(module); CovHeader header = {pid, module_name_length, blob_size}; if (cov_max_block_size == 0) { // Writing to a file. Just go ahead. WriteToFile(cov_fd, &header, sizeof(header)); WriteToFile(cov_fd, module, module_name_length); WriteToFile(cov_fd, blob, blob_size); } else { // Writing to a socket. We want to split the data into appropriately sized // blocks. InternalScopedBuffer block(cov_max_block_size); CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data()); uptr header_size_with_module = sizeof(header) + module_name_length; CHECK_LT(header_size_with_module, cov_max_block_size); unsigned int max_payload_size = cov_max_block_size - header_size_with_module; char *block_pos = block.data(); internal_memcpy(block_pos, &header, sizeof(header)); block_pos += sizeof(header); internal_memcpy(block_pos, module, module_name_length); block_pos += module_name_length; char *block_data_begin = block_pos; const char *blob_pos = (const char *)blob; while (blob_size > 0) { unsigned int payload_size = Min(blob_size, max_payload_size); blob_size -= payload_size; internal_memcpy(block_data_begin, blob_pos, payload_size); blob_pos += payload_size; ((CovHeader *)block.data())->data_length = payload_size; WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size); } } } // If packed = false: .. (name = module name). // If packed = true and name == 0: ... // If packed = true and name != 0: .. (name is // user-supplied). static fd_t CovOpenFile(InternalScopedString *path, bool packed, const char *name, const char *extension = "sancov") { path->clear(); if (!packed) { CHECK(name); path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(), extension); } else { if (!name) path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(), extension); else path->append("%s/%s.%s.packed", coverage_dir, name, extension); } error_t err; fd_t fd = OpenFile(path->data(), WrOnly, &err); if (fd == kInvalidFd) Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n", path->data(), err); return fd; } -void CoverageData::DumpAsBitSet() { - if (!common_flags()->coverage_bitset) return; - if (!size()) return; - InternalScopedBuffer out(size()); - InternalScopedString path(kMaxPathLength); - for (uptr m = 0; m < module_name_vec.size(); m++) { - uptr n_set_bits = 0; - auto r = module_name_vec[m]; - CHECK(r.copied_module_name); - CHECK_LE(r.beg, r.end); - CHECK_LE(r.end, size()); - for (uptr i = r.beg; i < r.end; i++) { - uptr pc = UnbundlePc(pc_array[i]); - out[i] = pc ? '1' : '0'; - if (pc) - n_set_bits++; - } - const char *base_name = StripModuleName(r.copied_module_name); - fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov"); - if (fd == kInvalidFd) return; - WriteToFile(fd, out.data() + r.beg, r.end - r.beg); - CloseFile(fd); - VReport(1, - " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n", - r.end - r.beg, base_name, n_set_bits); - } -} - - void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym, InternalMmapVector* offsets) const { offsets->clear(); for (uptr i = 0; i < kNumWordsForMagic; i++) offsets->push_back(0); CHECK(r.copied_module_name); CHECK_LE(r.beg, r.end); CHECK_LE(r.end, size()); for (uptr i = r.beg; i < r.end; i++) { uptr pc = UnbundlePc(pc_array[i]); uptr counter = UnbundleCounter(pc_array[i]); if (!pc) continue; // Not visited. uptr offset = 0; sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset); offsets->push_back(BundlePcAndCounter(offset, counter)); } CHECK_GE(offsets->size(), kNumWordsForMagic); SortArray(offsets->data(), offsets->size()); for (uptr i = 0; i < offsets->size(); i++) (*offsets)[i] = UnbundlePc((*offsets)[i]); } static void GenerateHtmlReport(const InternalMmapVector &cov_files) { if (!common_flags()->html_cov_report) { return; } char *sancov_path = FindPathToBinary(common_flags()->sancov_path); if (sancov_path == nullptr) { return; } InternalMmapVector sancov_argv(cov_files.size() * 2 + 3); sancov_argv.push_back(sancov_path); sancov_argv.push_back(internal_strdup("-html-report")); auto argv_deleter = at_scope_exit([&] { for (uptr i = 0; i < sancov_argv.size(); ++i) { InternalFree(sancov_argv[i]); } }); for (const auto &cov_file : cov_files) { sancov_argv.push_back(internal_strdup(cov_file)); } { ListOfModules modules; modules.init(); for (const LoadedModule &module : modules) { sancov_argv.push_back(internal_strdup(module.full_name())); } } InternalScopedString report_path(kMaxPathLength); fd_t report_fd = CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html"); int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(), kInvalidFd /* stdin */, report_fd /* std_out */); if (pid > 0) { int result = WaitForProcess(pid); if (result == 0) Printf("coverage report generated to %s\n", report_path.data()); } } void CoverageData::DumpOffsets() { auto sym = Symbolizer::GetOrInit(); if (!common_flags()->coverage_pcs) return; CHECK_NE(sym, nullptr); InternalMmapVector offsets(0); InternalScopedString path(kMaxPathLength); InternalMmapVector cov_files(module_name_vec.size()); auto cov_files_deleter = at_scope_exit([&] { for (uptr i = 0; i < cov_files.size(); ++i) { InternalFree(cov_files[i]); } }); for (uptr m = 0; m < module_name_vec.size(); m++) { auto r = module_name_vec[m]; GetRangeOffsets(r, sym, &offsets); uptr num_offsets = offsets.size() - kNumWordsForMagic; u64 *magic_p = reinterpret_cast(offsets.data()); CHECK_EQ(*magic_p, 0ULL); // FIXME: we may want to write 32-bit offsets even in 64-mode // if all the offsets are small enough. *magic_p = kMagic; const char *module_name = StripModuleName(r.copied_module_name); if (cov_sandboxed) { if (cov_fd != kInvalidFd) { CovWritePacked(internal_getpid(), module_name, offsets.data(), offsets.size() * sizeof(offsets[0])); VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets); } } else { // One file per module per process. fd_t fd = CovOpenFile(&path, false /* packed */, module_name); if (fd == kInvalidFd) continue; WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0])); CloseFile(fd); cov_files.push_back(internal_strdup(path.data())); VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets); } } if (cov_fd != kInvalidFd) CloseFile(cov_fd); GenerateHtmlReport(cov_files); } void CoverageData::DumpAll() { if (!coverage_enabled || common_flags()->coverage_direct) return; if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed)) return; - DumpAsBitSet(); DumpOffsets(); } void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) { if (!args) return; if (!coverage_enabled) return; cov_sandboxed = args->coverage_sandboxed; if (!cov_sandboxed) return; cov_max_block_size = args->coverage_max_block_size; if (args->coverage_fd >= 0) { cov_fd = (fd_t)args->coverage_fd; } else { InternalScopedString path(kMaxPathLength); // Pre-open the file now. The sandbox won't allow us to do it later. cov_fd = CovOpenFile(&path, true /* packed */, nullptr); } } fd_t MaybeOpenCovFile(const char *name) { CHECK(name); if (!coverage_enabled) return kInvalidFd; InternalScopedString path(kMaxPathLength); return CovOpenFile(&path, true /* packed */, name); } void CovBeforeFork() { coverage_data.BeforeFork(); } void CovAfterFork(int child_pid) { coverage_data.AfterFork(child_pid); } static void MaybeDumpCoverage() { if (common_flags()->coverage) __sanitizer_cov_dump(); } void InitializeCoverage(bool enabled, const char *dir) { if (coverage_enabled) return; // May happen if two sanitizer enable coverage in the same process. coverage_enabled = enabled; coverage_dir = dir; coverage_data.Init(); if (enabled) coverage_data.Enable(); if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump); AddDieCallback(MaybeDumpCoverage); } void ReInitializeCoverage(bool enabled, const char *dir) { coverage_enabled = enabled; coverage_dir = dir; coverage_data.ReInit(); } void CoverageUpdateMapping() { if (coverage_enabled) CovUpdateMapping(coverage_dir); } } // namespace __sanitizer extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) { coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()), guard); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) { atomic_uint32_t *atomic_guard = reinterpret_cast(guard); if (static_cast( __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0) coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()), guard); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { coverage_enabled = true; coverage_dir = common_flags()->coverage_dir; coverage_data.Init(); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { coverage_data.DumpAll(); __sanitizer_dump_trace_pc_guard_coverage(); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters, const char *comp_unit_name) { coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC()); if (!common_flags()->coverage_direct) return; if (SANITIZER_ANDROID && coverage_enabled) { // dlopen/dlclose interceptors do not work on Android, so we rely on // Extend() calls to update .sancov.map. CovUpdateMapping(coverage_dir, GET_CALLER_PC()); } coverage_data.Extend(npcs); } SANITIZER_INTERFACE_ATTRIBUTE sptr __sanitizer_maybe_open_cov_file(const char *name) { return (sptr)MaybeOpenCovFile(name); } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_total_unique_coverage() { return atomic_load(&coverage_counter, memory_order_relaxed); } // Default empty implementations (weak). Users should redefine them. SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} } // extern "C" Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc (revision 317687) @@ -1,173 +1,169 @@ //===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Sanitizer Coverage Controller for Trace PC Guard. #include "sancov_flags.h" #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_symbolizer.h" using namespace __sanitizer; using AddressRange = LoadedModule::AddressRange; namespace __sancov { namespace { static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL; static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL; static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32; static fd_t OpenFile(const char* path) { error_t err; fd_t fd = OpenFile(path, WrOnly, &err); if (fd == kInvalidFd) Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n", path, err); return fd; } static void GetCoverageFilename(char* path, const char* name, const char* extension) { CHECK(name); internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s", common_flags()->coverage_dir, name, internal_getpid(), extension); } static void WriteModuleCoverage(char* file_path, const char* module_name, const uptr* pcs, uptr len) { GetCoverageFilename(file_path, StripModuleName(module_name), "sancov"); fd_t fd = OpenFile(file_path); WriteToFile(fd, &Magic, sizeof(Magic)); WriteToFile(fd, pcs, len * sizeof(*pcs)); CloseFile(fd); Printf("SanitizerCoverage: %s %zd PCs written\n", file_path, len); } static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) { if (!len) return; char* file_path = static_cast(InternalAlloc(kMaxPathLength)); char* module_name = static_cast(InternalAlloc(kMaxPathLength)); uptr* pcs = static_cast(InternalAlloc(len * sizeof(uptr))); internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr)); SortArray(pcs, len); bool module_found = false; uptr last_base = 0; uptr module_start_idx = 0; for (uptr i = 0; i < len; ++i) { const uptr pc = pcs[i]; if (!pc) continue; if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) { Printf("ERROR: bad pc %x\n", pc); continue; } uptr module_base = pc - pcs[i]; if (module_base != last_base || !module_found) { if (module_found) { WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx], i - module_start_idx); } last_base = module_base; module_start_idx = i; module_found = true; __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength, &pcs[i]); } } if (module_found) { WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx], len - module_start_idx); } InternalFree(file_path); InternalFree(module_name); InternalFree(pcs); - - if (sancov_flags()->symbolize) { - Printf("TODO(aizatsky): call sancov to symbolize\n"); - } } // Collects trace-pc guard coverage. // This class relies on zero-initialization. class TracePcGuardController { public: void Initialize() { CHECK(!initialized); initialized = true; InitializeSancovFlags(); pc_vector.Initialize(0); } void InitTracePcGuard(u32* start, u32* end) { if (!initialized) Initialize(); CHECK(!*start); CHECK_NE(start, end); u32 i = pc_vector.size(); for (u32* p = start; p < end; p++) *p = ++i; pc_vector.resize(i); } void TracePcGuard(u32* guard, uptr pc) { atomic_uint32_t* guard_ptr = reinterpret_cast(guard); u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed); if (!idx) return; // we start indices from 1. pc_vector[idx - 1] = pc; } void Dump() { if (!initialized || !common_flags()->coverage) return; __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size()); } private: bool initialized; InternalMmapVectorNoCtor pc_vector; }; static TracePcGuardController pc_guard_controller; } // namespace } // namespace __sancov extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT const uptr* pcs, uptr len) { return __sancov::SanitizerDumpCoverage(pcs, len); } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) { if (!*guard) return; __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1); } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32* start, u32* end) { if (start == end || *start) return; __sancov::pc_guard_controller.InitTracePcGuard(start, end); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() { __sancov::pc_guard_controller.Dump(); } } // extern "C" Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc (revision 317686) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc (revision 317687) @@ -1,239 +1,233 @@ //===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file describes common flags available in all sanitizers. // //===----------------------------------------------------------------------===// #ifndef COMMON_FLAG #error "Define COMMON_FLAG prior to including this file!" #endif // COMMON_FLAG(Type, Name, DefaultValue, Description) // Supported types: bool, const char *, int, uptr. // Default value must be a compile-time constant. // Description must be a string literal. COMMON_FLAG( bool, symbolize, true, "If set, use the online symbolizer from common sanitizer runtime to turn " "virtual addresses to file/line locations.") COMMON_FLAG( const char *, external_symbolizer_path, nullptr, "Path to external symbolizer. If empty, the tool will search $PATH for " "the symbolizer.") COMMON_FLAG( bool, allow_addr2line, false, "If set, allows online symbolizer to run addr2line binary to symbolize " "stack traces (addr2line will only be used if llvm-symbolizer binary is " "unavailable.") COMMON_FLAG(const char *, strip_path_prefix, "", "Strips this prefix from file paths in error reports.") COMMON_FLAG(bool, fast_unwind_on_check, false, "If available, use the fast frame-pointer-based unwinder on " "internal CHECK failures.") COMMON_FLAG(bool, fast_unwind_on_fatal, false, "If available, use the fast frame-pointer-based unwinder on fatal " "errors.") COMMON_FLAG(bool, fast_unwind_on_malloc, true, "If available, use the fast frame-pointer-based unwinder on " "malloc/free.") COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.") COMMON_FLAG(int, malloc_context_size, 1, "Max number of stack frames kept for each allocation/deallocation.") COMMON_FLAG( const char *, log_path, "stderr", "Write logs to \"log_path.pid\". The special values are \"stdout\" and " "\"stderr\". The default is \"stderr\".") COMMON_FLAG( bool, log_exe_name, false, "Mention name of executable when reporting error and " "append executable name to logs (as in \"log_path.exe_name.pid\").") COMMON_FLAG( bool, log_to_syslog, SANITIZER_ANDROID || SANITIZER_MAC, "Write all sanitizer output to syslog in addition to other means of " "logging.") COMMON_FLAG( int, verbosity, 0, "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).") COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.") COMMON_FLAG( bool, leak_check_at_exit, true, "Invoke leak checking in an atexit handler. Has no effect if " "detect_leaks=false, or if __lsan_do_leak_check() is called before the " "handler has a chance to run.") COMMON_FLAG(bool, allocator_may_return_null, false, "If false, the allocator will crash instead of returning 0 on " "out-of-memory.") COMMON_FLAG(bool, print_summary, true, "If false, disable printing error summaries in addition to error " "reports.") COMMON_FLAG(int, print_module_map, 0, "OS X only. 0 = don't print, 1 = print only once before process " "exits, 2 = print after each report.") COMMON_FLAG(bool, check_printf, true, "Check printf arguments.") COMMON_FLAG(bool, handle_segv, true, "If set, registers the tool's custom SIGSEGV handler.") COMMON_FLAG(bool, handle_sigbus, true, "If set, registers the tool's custom SIGBUS handler.") COMMON_FLAG(bool, handle_abort, false, "If set, registers the tool's custom SIGABRT handler.") COMMON_FLAG(bool, handle_sigill, false, "If set, registers the tool's custom SIGILL handler.") COMMON_FLAG(bool, handle_sigfpe, true, "If set, registers the tool's custom SIGFPE handler.") COMMON_FLAG(bool, allow_user_segv_handler, false, "If set, allows user to register a SEGV handler even if the tool " "registers one.") COMMON_FLAG(bool, use_sigaltstack, true, "If set, uses alternate stack for signal handling.") COMMON_FLAG(bool, detect_deadlocks, false, "If set, deadlock detection is enabled.") COMMON_FLAG( uptr, clear_shadow_mmap_threshold, 64 * 1024, "Large shadow regions are zero-filled using mmap(NORESERVE) instead of " "memset(). This is the threshold size in bytes.") COMMON_FLAG(const char *, color, "auto", "Colorize reports: (always|never|auto).") COMMON_FLAG( bool, legacy_pthread_cond, false, "Enables support for dynamic libraries linked with libpthread 2.2.5.") COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.") COMMON_FLAG(bool, help, false, "Print the flag descriptions.") COMMON_FLAG(uptr, mmap_limit_mb, 0, "Limit the amount of mmap-ed memory (excluding shadow) in Mb; " "not a user-facing flag, used mosly for testing the tools") COMMON_FLAG(uptr, hard_rss_limit_mb, 0, "Hard RSS limit in Mb." " If non-zero, a background thread is spawned at startup" " which periodically reads RSS and aborts the process if the" " limit is reached") COMMON_FLAG(uptr, soft_rss_limit_mb, 0, "Soft RSS limit in Mb." " If non-zero, a background thread is spawned at startup" " which periodically reads RSS. If the limit is reached" " all subsequent malloc/new calls will fail or return NULL" " (depending on the value of allocator_may_return_null)" " until the RSS goes below the soft limit." " This limit does not affect memory allocations other than" " malloc/new.") COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only") COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever, "Experimental. Only affects a 64-bit allocator. If set, tries to " "release unused memory to the OS, but not more often than this " "interval (in milliseconds). Negative values mean do not attempt " "to release memory to the OS.\n") COMMON_FLAG(bool, can_use_proc_maps_statm, true, "If false, do not attempt to read /proc/maps/statm." " Mostly useful for testing sanitizers.") COMMON_FLAG( bool, coverage, false, "If set, coverage information will be dumped at program shutdown (if the " "coverage instrumentation was enabled at compile time).") COMMON_FLAG(bool, coverage_pcs, true, "If set (and if 'coverage' is set too), the coverage information " "will be dumped as a set of PC offsets for every module.") COMMON_FLAG(bool, coverage_order_pcs, false, "If true, the PCs will be dumped in the order they've" " appeared during the execution.") -COMMON_FLAG(bool, coverage_bitset, false, - "If set (and if 'coverage' is set too), the coverage information " - "will also be dumped as a bitset to a separate file.") -COMMON_FLAG(bool, coverage_counters, false, - "If set (and if 'coverage' is set too), the bitmap that corresponds" - " to coverage counters will be dumped.") COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID, "If set, coverage information will be dumped directly to a memory " "mapped file. This way data is not lost even if the process is " "suddenly killed.") COMMON_FLAG(const char *, coverage_dir, ".", "Target directory for coverage dumps. Defaults to the current " "directory.") COMMON_FLAG(bool, full_address_space, false, "Sanitize complete address space; " "by default kernel area on 32-bit platforms will not be sanitized") COMMON_FLAG(bool, print_suppressions, true, "Print matched suppressions at exit.") COMMON_FLAG( bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO, "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid" " dumping a 16T+ core file. Ignored on OSes that don't dump core by" " default and for sanitizers that don't reserve lots of virtual memory.") COMMON_FLAG(bool, use_madv_dontdump, true, "If set, instructs kernel to not store the (huge) shadow " "in core file.") COMMON_FLAG(bool, symbolize_inline_frames, true, "Print inlined frames in stacktraces. Defaults to true.") COMMON_FLAG(bool, symbolize_vs_style, false, "Print file locations in Visual Studio style (e.g: " " file(10,42): ...") COMMON_FLAG(int, dedup_token_length, 0, "If positive, after printing a stack trace also print a short " "string token based on this number of frames that will simplify " "deduplication of the reports. " "Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.") COMMON_FLAG(const char *, stack_trace_format, "DEFAULT", "Format string used to render stack frames. " "See sanitizer_stacktrace_printer.h for the format description. " "Use DEFAULT to get default format.") COMMON_FLAG(bool, no_huge_pages_for_shadow, true, "If true, the shadow is not allowed to use huge pages. ") COMMON_FLAG(bool, strict_string_checks, false, "If set check that string arguments are properly null-terminated") COMMON_FLAG(bool, intercept_strstr, true, "If set, uses custom wrappers for strstr and strcasestr functions " "to find more errors.") COMMON_FLAG(bool, intercept_strspn, true, "If set, uses custom wrappers for strspn and strcspn function " "to find more errors.") COMMON_FLAG(bool, intercept_strtok, true, "If set, uses a custom wrapper for the strtok function " "to find more errors.") COMMON_FLAG(bool, intercept_strpbrk, true, "If set, uses custom wrappers for strpbrk function " "to find more errors.") COMMON_FLAG(bool, intercept_strlen, true, "If set, uses custom wrappers for strlen and strnlen functions " "to find more errors.") COMMON_FLAG(bool, intercept_strchr, true, "If set, uses custom wrappers for strchr, strchrnul, and strrchr " "functions to find more errors.") COMMON_FLAG(bool, intercept_memcmp, true, "If set, uses custom wrappers for memcmp function " "to find more errors.") COMMON_FLAG(bool, strict_memcmp, true, "If true, assume that memcmp(p1, p2, n) always reads n bytes before " "comparing p1 and p2.") COMMON_FLAG(bool, intercept_memmem, true, "If set, uses a wrapper for memmem() to find more errors.") COMMON_FLAG(bool, intercept_intrin, true, "If set, uses custom wrappers for memset/memcpy/memmove " "intrinsics to find more errors.") COMMON_FLAG(bool, intercept_stat, true, "If set, uses custom wrappers for *stat functions " "to find more errors.") COMMON_FLAG(bool, intercept_send, true, "If set, uses custom wrappers for send* functions " "to find more errors.") COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer " "mappings in /proc/self/maps with " "user-readable names") COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool " "found an error") COMMON_FLAG( bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC, "If set, the tool calls abort() instead of _exit() after printing the " "error report.") COMMON_FLAG(bool, suppress_equal_pcs, true, "Deduplicate multiple reports for single source location in " "halt_on_error=false mode (asan only).") COMMON_FLAG(bool, print_cmdline, false, "Print command line on crash " "(asan only).") COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.") COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.") Index: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h =================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h (revision 317686) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h (revision 317687) @@ -1,94 +1,134 @@ //===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Linux-specific syscall wrappers and classes. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_LINUX_H #define SANITIZER_LINUX_H #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_posix.h" #include "sanitizer_platform_limits_posix.h" struct link_map; // Opaque type returned by dlopen(). struct sigaltstack; namespace __sanitizer { // Dirent structure for getdents(). Note that this structure is different from // the one in , which is used by readdir(). struct linux_dirent; // Syscall wrappers. uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count); uptr internal_sigaltstack(const struct sigaltstack* ss, struct sigaltstack* oss); uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset); // Linux-only syscalls. #if SANITIZER_LINUX uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); // Used only by sanitizer_stoptheworld. Signal handlers that are actually used // (like the process-wide error reporting SEGV handler) must use // internal_sigaction instead. int internal_sigaction_norestorer(int signum, const void *act, void *oldact); #if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO // Uses a raw system call to avoid interceptors. int internal_sigaction_syscall(int signum, const void *act, void *oldact); #endif void internal_sigdelset(__sanitizer_sigset_t *set, int signum); #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \ || defined(__arm__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif #endif // SANITIZER_LINUX // This class reads thread IDs from /proc//task using only syscalls. class ThreadLister { public: explicit ThreadLister(int pid); ~ThreadLister(); // GetNextTID returns -1 if the list of threads is exhausted, or if there has // been an error. int GetNextTID(); void Reset(); bool error(); private: bool GetDirectoryEntries(); int pid_; int descriptor_; InternalScopedBuffer buffer_; bool error_; struct linux_dirent* entry_; int bytes_read_; }; // Exposed for testing. uptr ThreadDescriptorSize(); uptr ThreadSelf(); uptr ThreadSelfOffset(); // Matches a library's file name against a base name (stripping path and version // information). bool LibraryNameIs(const char *full_name, const char *base_name); // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)); + +#if SANITIZER_ANDROID + +#if defined(__aarch64__) +# define __get_tls() \ + ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; }) +#elif defined(__arm__) +# define __get_tls() \ + ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; }) +#elif defined(__mips__) +// On mips32r1, this goes via a kernel illegal instruction trap that's +// optimized for v1. +# define __get_tls() \ + ({ register void** __v asm("v1"); \ + __asm__(".set push\n" \ + ".set mips32r2\n" \ + "rdhwr %0,$29\n" \ + ".set pop\n" : "=r"(__v)); \ + __v; }) +#elif defined(__i386__) +# define __get_tls() \ + ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; }) +#elif defined(__x86_64__) +# define __get_tls() \ + ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; }) +#else +#error "Unsupported architecture." +#endif + +// The Android Bionic team has allocated a TLS slot for TSan starting with N, +// given that Android currently doesn't support ELF TLS. It is used to store +// Sanitizers thread specific data. +static const int TLS_SLOT_TSAN = 8; + +ALWAYS_INLINE uptr *get_android_tls_ptr() { + return reinterpret_cast(&__get_tls()[TLS_SLOT_TSAN]); +} + +#endif // SANITIZER_ANDROID + } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX #endif // SANITIZER_LINUX_H Index: vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt (revision 317686) +++ vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt (revision 317687) @@ -1,42 +1,43 @@ add_compiler_rt_component(scudo) include_directories(..) set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS}) # SANITIZER_COMMON_CFLAGS include -fno-builtin, but we actually want builtins! list(APPEND SCUDO_CFLAGS -fbuiltin) append_rtti_flag(OFF SCUDO_CFLAGS) set(SCUDO_SOURCES scudo_allocator.cpp scudo_flags.cpp scudo_crc32.cpp scudo_interceptors.cpp scudo_new_delete.cpp scudo_termination.cpp + scudo_tls_linux.cpp scudo_utils.cpp) # Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available. if (COMPILER_RT_HAS_MSSE4_2_FLAG) set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2) endif() # Enable the AArch64 CRC32 feature for scudo_crc32.cpp, if available. # Note that it is enabled by default starting with armv8.1-a. if (COMPILER_RT_HAS_MCRC_FLAG) set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -mcrc) endif() if(COMPILER_RT_HAS_SCUDO) foreach(arch ${SCUDO_SUPPORTED_ARCH}) add_compiler_rt_runtime(clang_rt.scudo STATIC ARCHS ${arch} SOURCES ${SCUDO_SOURCES} $ $ $ CFLAGS ${SCUDO_CFLAGS} PARENT_TARGET scudo) endforeach() endif() Index: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp (revision 317686) +++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp (revision 317687) @@ -1,734 +1,705 @@ //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Scudo Hardened Allocator implementation. /// It uses the sanitizer_common allocator as a base and aims at mitigating /// heap corruption vulnerabilities. It provides a checksum-guarded chunk /// header, a delayed free list, and additional sanity checks. /// //===----------------------------------------------------------------------===// #include "scudo_allocator.h" +#include "scudo_tls.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_quarantine.h" #include #include #include namespace __scudo { -#if SANITIZER_CAN_USE_ALLOCATOR64 -const uptr AllocatorSpace = ~0ULL; -const uptr AllocatorSize = 0x40000000000ULL; -typedef DefaultSizeClassMap SizeClassMap; -struct AP { - static const uptr kSpaceBeg = AllocatorSpace; - static const uptr kSpaceSize = AllocatorSize; - static const uptr kMetadataSize = 0; - typedef __scudo::SizeClassMap SizeClassMap; - typedef NoOpMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = - SizeClassAllocator64FlagMasks::kRandomShuffleChunks; -}; -typedef SizeClassAllocator64 PrimaryAllocator; -#else -// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the -// security improvements brought to the 64-bit one. This makes the 32-bit -// version of Scudo slightly less toughened. -static const uptr RegionSizeLog = 20; -static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; -# if SANITIZER_WORDSIZE == 32 -typedef FlatByteMap ByteMap; -# elif SANITIZER_WORDSIZE == 64 -typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; -# endif // SANITIZER_WORDSIZE -typedef DefaultSizeClassMap SizeClassMap; -typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, - RegionSizeLog, ByteMap> PrimaryAllocator; -#endif // SANITIZER_CAN_USE_ALLOCATOR64 - -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef CombinedAllocator - ScudoBackendAllocator; - -static ScudoBackendAllocator &getBackendAllocator(); - -static thread_local Xorshift128Plus Prng; // Global static cookie, initialized at start-up. static uptr Cookie; // We default to software CRC32 if the alternatives are not supported, either // at compilation or at runtime. static atomic_uint8_t HashAlgorithm = { CRC32Software }; SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data); INLINE u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) { // If SSE4.2 is defined here, it was enabled everywhere, as opposed to only // for scudo_crc32.cpp. This means that other SSE instructions were likely // emitted at other places, and as a result there is no reason to not use // the hardware version of the CRC32. #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) return computeHardwareCRC32(Crc, Data); #else if (computeHardwareCRC32 && HashType == CRC32Hardware) return computeHardwareCRC32(Crc, Data); else return computeSoftwareCRC32(Crc, Data); #endif // defined(__SSE4_2__) } +static ScudoBackendAllocator &getBackendAllocator(); + struct ScudoChunk : UnpackedHeader { // We can't use the offset member of the chunk itself, as we would double // fetch it without any warranty that it wouldn't have been tampered. To // prevent this, we work with a local copy of the header. void *getAllocBeg(UnpackedHeader *Header) { return reinterpret_cast( reinterpret_cast(this) - (Header->Offset << MinAlignmentLog)); } // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { uptr Size = getBackendAllocator().GetActuallyAllocatedSize( getAllocBeg(Header)); if (Size == 0) return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); } // Compute the checksum of the Chunk pointer and its ChunkHeader. u16 computeChecksum(UnpackedHeader *Header) const { UnpackedHeader ZeroChecksumHeader = *Header; ZeroChecksumHeader.Checksum = 0; uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); u8 HashType = atomic_load_relaxed(&HashAlgorithm); u32 Crc = computeCRC32(Cookie, reinterpret_cast(this), HashType); for (uptr i = 0; i < ARRAY_SIZE(HeaderHolder); i++) Crc = computeCRC32(Crc, HeaderHolder[i], HashType); return static_cast(Crc); } // Checks the validity of a chunk by verifying its checksum. It doesn't // incur termination in the event of an invalid chunk. bool isValid() { UnpackedHeader NewUnpackedHeader; const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); NewUnpackedHeader = bit_cast(NewPackedHeader); return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader)); } // Nulls out a chunk header. When returning the chunk to the backend, there // is no need to store a valid ChunkAvailable header, as this would be // computationally expensive. Zeroing out serves the same purpose by making // the header invalid. In the extremely rare event where 0 would be a valid // checksum for the chunk, the state of the chunk is ChunkAvailable anyway. COMPILER_CHECK(ChunkAvailable == 0); void eraseHeader() { PackedHeader NullPackedHeader = 0; AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NullPackedHeader); } // Loads and unpacks the header, verifying the checksum in the process. void loadHeader(UnpackedHeader *NewUnpackedHeader) const { const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); *NewUnpackedHeader = bit_cast(NewPackedHeader); if (UNLIKELY(NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader))) { dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); } } // Packs and stores the header, computing the checksum in the process. void storeHeader(UnpackedHeader *NewUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); atomic_store_relaxed(AtomicHeader, NewPackedHeader); } // Packs and stores the header, computing the checksum in the process. We // compare the current header with the expected provided one to ensure that // we are not being raced by a corruption occurring in another thread. void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader, UnpackedHeader *OldUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); PackedHeader OldPackedHeader = bit_cast(*OldUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader, &OldPackedHeader, NewPackedHeader, memory_order_relaxed))) { dieWithMessage("ERROR: race on chunk header at address %p\n", this); } } }; ScudoChunk *getScudoChunk(uptr UserBeg) { return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); } -static bool ScudoInitIsRunning = false; +struct AllocatorOptions { + u32 QuarantineSizeMb; + u32 ThreadLocalQuarantineSizeKb; + bool MayReturnNull; + s32 ReleaseToOSIntervalMs; + bool DeallocationTypeMismatch; + bool DeleteSizeMismatch; + bool ZeroContents; -static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; -static pthread_key_t PThreadKey; + void setFrom(const Flags *f, const CommonFlags *cf); + void copyTo(Flags *f, CommonFlags *cf) const; +}; -static thread_local bool ThreadInited = false; -static thread_local bool ThreadTornDown = false; -static thread_local AllocatorCache Cache; +void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { + MayReturnNull = cf->allocator_may_return_null; + ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; + QuarantineSizeMb = f->QuarantineSizeMb; + ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; + DeallocationTypeMismatch = f->DeallocationTypeMismatch; + DeleteSizeMismatch = f->DeleteSizeMismatch; + ZeroContents = f->ZeroContents; +} -static void teardownThread(void *p) { - uptr v = reinterpret_cast(p); - // The glibc POSIX thread-local-storage deallocation routine calls user - // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. - // We want to be called last since other destructors might call free and the - // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the - // quarantine and swallowing the cache. - if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { - pthread_setspecific(PThreadKey, reinterpret_cast(v + 1)); - return; - } - drainQuarantine(); - getBackendAllocator().DestroyCache(&Cache); - ThreadTornDown = true; +void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { + cf->allocator_may_return_null = MayReturnNull; + cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; + f->QuarantineSizeMb = QuarantineSizeMb; + f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; + f->DeallocationTypeMismatch = DeallocationTypeMismatch; + f->DeleteSizeMismatch = DeleteSizeMismatch; + f->ZeroContents = ZeroContents; } -static void initInternal() { +static void initScudoInternal(const AllocatorOptions &Options); + +static bool ScudoInitIsRunning = false; + +void initScudo() { SanitizerToolName = "Scudo"; CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); ScudoInitIsRunning = true; // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version. if (testCPUFeature(CRC32CPUFeature)) { atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); } initFlags(); AllocatorOptions Options; Options.setFrom(getFlags(), common_flags()); - initAllocator(Options); + initScudoInternal(Options); - MaybeStartBackgroudThread(); + // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use. ScudoInitIsRunning = false; } -static void initGlobal() { - pthread_key_create(&PThreadKey, teardownThread); - initInternal(); -} - -static void NOINLINE initThread() { - pthread_once(&GlobalInited, initGlobal); - pthread_setspecific(PThreadKey, reinterpret_cast(1)); - getBackendAllocator().InitCache(&Cache); - ThreadInited = true; -} - struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} // Chunk recycling function, returns a quarantined chunk to the backend. void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); if (UNLIKELY(Header.State != ChunkQuarantine)) { dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", Chunk); } Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); getBackendAllocator().Deallocate(Cache_, Ptr); } /// Internal quarantine allocation and deallocation functions. void *Allocate(uptr Size) { // TODO(kostyak): figure out the best way to protect the batches. return getBackendAllocator().Allocate(Cache_, Size, MinAlignment); } void Deallocate(void *Ptr) { getBackendAllocator().Deallocate(Cache_, Ptr); } AllocatorCache *Cache_; }; typedef Quarantine ScudoQuarantine; typedef ScudoQuarantine::Cache ScudoQuarantineCache; -static thread_local ScudoQuarantineCache ThreadQuarantineCache; +COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= + sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); -void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { - MayReturnNull = cf->allocator_may_return_null; - ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; - QuarantineSizeMb = f->QuarantineSizeMb; - ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; - DeallocationTypeMismatch = f->DeallocationTypeMismatch; - DeleteSizeMismatch = f->DeleteSizeMismatch; - ZeroContents = f->ZeroContents; +AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Cache; } -void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { - cf->allocator_may_return_null = MayReturnNull; - cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; - f->QuarantineSizeMb = QuarantineSizeMb; - f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; - f->DeallocationTypeMismatch = DeallocationTypeMismatch; - f->DeleteSizeMismatch = DeleteSizeMismatch; - f->ZeroContents = ZeroContents; +ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { + return reinterpret_cast< + ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); } +Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Prng; +} + struct ScudoAllocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; // The fallback caches are used when the thread local caches have been // 'detroyed' on thread tear-down. They are protected by a Mutex as they can // be accessed by different threads. StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; ScudoQuarantineCache FallbackQuarantineCache; + Xorshift128Plus FallbackPrng; bool DeallocationTypeMismatch; bool ZeroContents; bool DeleteSizeMismatch; explicit ScudoAllocator(LinkerInitialized) : AllocatorQuarantine(LINKER_INITIALIZED), FallbackQuarantineCache(LINKER_INITIALIZED) {} void init(const AllocatorOptions &Options) { // Verify that the header offset field can hold the maximum offset. In the // case of the Secondary allocator, it takes care of alignment and the // offset will always be 0. In the case of the Primary, the worst case // scenario happens in the last size class, when the backend allocation // would already be aligned on the requested alignment, which would happen // to be the maximum alignment that would fit in that size class. As a // result, the maximum offset will be at most the maximum alignment for the // last size class minus the header size, in multiples of MinAlignment. UnpackedHeader Header = {}; uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex( SizeClassMap::kMaxSize - MinAlignment); uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; Header.Offset = MaxOffset; if (Header.Offset != MaxOffset) { dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " "header\n"); } // Verify that we can fit the maximum size or amount of unused bytes in the // header. Given that the Secondary fits the allocation to a page, the worst // case scenario happens in the Primary. It will depend on the second to // last and last class sizes, as well as the dynamic base for the Primary. // The following is an over-approximation that works for our needs. uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) { dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in " "the header\n"); } DeallocationTypeMismatch = Options.DeallocationTypeMismatch; DeleteSizeMismatch = Options.DeleteSizeMismatch; ZeroContents = Options.ZeroContents; BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs); AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); BackendAllocator.InitCache(&FallbackAllocatorCache); - Cookie = Prng.getNext(); + FallbackPrng.initFromURandom(); + Cookie = FallbackPrng.getNext(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (!UserPtr) return false; uptr UserBeg = reinterpret_cast(UserPtr); if (!IsAligned(UserBeg, MinAlignment)) return false; return getScudoChunk(UserBeg)->isValid(); } // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (UNLIKELY(!IsPowerOfTwo(Alignment))) { dieWithMessage("ERROR: alignment is not a power of 2\n"); } if (Alignment > MaxAlignment) return BackendAllocator.ReturnNullOrDieOnBadRequest(); if (Alignment < MinAlignment) Alignment = MinAlignment; if (Size >= MaxAllowedMallocSize) return BackendAllocator.ReturnNullOrDieOnBadRequest(); if (Size == 0) Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; if (Alignment > MinAlignment) NeededSize += Alignment; if (NeededSize >= MaxAllowedMallocSize) return BackendAllocator.ReturnNullOrDieOnBadRequest(); // Primary backed and Secondary backed allocations have a different // treatment. We deal with alignment requirements of Primary serviced // allocations here, but the Secondary will take care of its own alignment // needs, which means we also have to work around some limitations of the // combined allocator to accommodate the situation. bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); void *Ptr; + uptr Salt; uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; - if (LIKELY(!ThreadTornDown)) { - Ptr = BackendAllocator.Allocate(&Cache, NeededSize, AllocationAlignment); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + Salt = getPrng(ThreadContext)->getNext(); + Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), + NeededSize, AllocationAlignment); } else { SpinMutexLock l(&FallbackMutex); + Salt = FallbackPrng.getNext(); Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, AllocationAlignment); } if (!Ptr) return BackendAllocator.ReturnNullOrDieOnOOM(); uptr AllocBeg = reinterpret_cast(Ptr); // If the allocation was serviced by the secondary, the returned pointer // accounts for ChunkHeaderSize to pass the alignment check of the combined // allocator. Adjust it here. if (!FromPrimary) { AllocBeg -= AlignedChunkHeaderSize; if (Alignment > MinAlignment) NeededSize -= Alignment; } // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr)); uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; if (!IsAligned(UserBeg, Alignment)) UserBeg = RoundUpTo(UserBeg, Alignment); CHECK_LE(UserBeg + Size, AllocBeg + NeededSize); UnpackedHeader Header = {}; Header.State = ChunkAllocated; uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; Header.AllocType = Type; if (FromPrimary) { Header.FromPrimary = FromPrimary; Header.SizeOrUnusedBytes = Size; } else { // The secondary fits the allocations to a page, so the amount of unused // bytes is the difference between the end of the user allocation and the // next page boundary. uptr PageSize = GetPageSizeCached(); uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1); if (TrailingBytes) Header.SizeOrUnusedBytes = PageSize - TrailingBytes; } - Header.Salt = static_cast(Prng.getNext()); + Header.Salt = static_cast(Salt); getScudoChunk(UserBeg)->storeHeader(&Header); void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); return UserPtr; } // Place a chunk in the quarantine. In the event of a zero-sized quarantine, // we directly deallocate the chunk, otherwise the flow would lead to the - // chunk being checksummed twice, once before Put and once in Recycle, with - // no additional security value. + // chunk being loaded (and checked) twice, and stored (and checksummed) once, + // with no additional security value. void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, uptr Size) { bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); - if (LIKELY(!ThreadTornDown)) { - getBackendAllocator().Deallocate(&Cache, Ptr); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr); } else { SpinMutexLock Lock(&FallbackMutex); getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr); } } else { UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); - if (LIKELY(!ThreadTornDown)) { - AllocatorQuarantine.Put(&ThreadQuarantineCache, - QuarantineCallback(&Cache), Chunk, Size); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), + QuarantineCallback( + getAllocatorCache(ThreadContext)), + Chunk, Size); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, QuarantineCallback(&FallbackAllocatorCache), Chunk, Size); } } } // Deallocates a Chunk, which means adding it to the delayed free list (or // Quarantine). void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (!UserPtr) return; uptr UserBeg = reinterpret_cast(UserPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to deallocate a chunk not properly " "aligned at address %p\n", UserPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when deallocating address " "%p\n", UserPtr); } if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. if (OldHeader.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) { dieWithMessage("ERROR: allocation type mismatch on address %p\n", UserPtr); } } } uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes; if (DeleteSizeMismatch) { if (DeleteSize && DeleteSize != Size) { dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", UserPtr); } } // If a small memory amount was allocated with a larger alignment, we want // to take that into account. Otherwise the Quarantine would be filled with // tiny chunks, taking a lot of VA memory. This is an approximation of the // usable size, that allows us to not call GetActuallyAllocatedSize. uptr LiableSize = Size + (OldHeader.Offset << MinAlignment); quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize); } // Reallocates a chunk. We can save on a new allocation if the new requested // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr UserBeg = reinterpret_cast(OldPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to reallocate a chunk not properly " "aligned at address %p\n", OldPtr); } ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when reallocating address " "%p\n", OldPtr); } if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", OldPtr); } uptr UsableSize = Chunk->getUsableSize(&OldHeader); // The new size still fits in the current chunk, and the size difference // is reasonable. if (NewSize <= UsableSize && (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = OldHeader.FromPrimary ? NewSize : UsableSize - NewSize; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); return OldPtr; } // Otherwise, we have to allocate a new chunk and copy the contents of the // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : UsableSize - OldHeader.SizeOrUnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize); } return NewPtr; } // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (!Ptr) return 0; uptr UserBeg = reinterpret_cast(Ptr); ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader Header; Chunk->loadHeader(&Header); // Getting the usable size of a chunk only makes sense if it's allocated. if (UNLIKELY(Header.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", Ptr); } return Chunk->getUsableSize(&Header); } void *calloc(uptr NMemB, uptr Size) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr Total = NMemB * Size; if (Size != 0 && Total / Size != NMemB) // Overflow check return BackendAllocator.ReturnNullOrDieOnBadRequest(); return allocate(Total, MinAlignment, FromMalloc, true); } - void drainQuarantine() { - AllocatorQuarantine.Drain(&ThreadQuarantineCache, - QuarantineCallback(&Cache)); + void commitBack(ScudoThreadContext *ThreadContext) { + AllocatorCache *Cache = getAllocatorCache(ThreadContext); + AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), + QuarantineCallback(Cache)); + BackendAllocator.DestroyCache(Cache); } uptr getStats(AllocatorStat StatType) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr stats[AllocatorStatCount]; BackendAllocator.GetStats(stats); return stats[StatType]; } }; static ScudoAllocator Instance(LINKER_INITIALIZED); static ScudoBackendAllocator &getBackendAllocator() { return Instance.BackendAllocator; } -void initAllocator(const AllocatorOptions &Options) { +static void initScudoInternal(const AllocatorOptions &Options) { Instance.init(Options); } -void drainQuarantine() { - Instance.drainQuarantine(); +void ScudoThreadContext::init() { + getBackendAllocator().InitCache(&Cache); + Prng.initFromURandom(); + memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); +} + +void ScudoThreadContext::commitBack() { + Instance.commitBack(this); } void *scudoMalloc(uptr Size, AllocType Type) { return Instance.allocate(Size, MinAlignment, Type); } void scudoFree(void *Ptr, AllocType Type) { Instance.deallocate(Ptr, 0, Type); } void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { Instance.deallocate(Ptr, Size, Type); } void *scudoRealloc(void *Ptr, uptr Size) { if (!Ptr) return Instance.allocate(Size, MinAlignment, FromMalloc); if (Size == 0) { Instance.deallocate(Ptr, 0, FromMalloc); return nullptr; } return Instance.reallocate(Ptr, Size); } void *scudoCalloc(uptr NMemB, uptr Size) { return Instance.calloc(NMemB, Size); } void *scudoValloc(uptr Size) { return Instance.allocate(Size, GetPageSizeCached(), FromMemalign); } void *scudoMemalign(uptr Alignment, uptr Size) { return Instance.allocate(Size, Alignment, FromMemalign); } void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); Size = RoundUpTo(Size, PageSize); if (Size == 0) { // pvalloc(0) should allocate one page. Size = PageSize; } return Instance.allocate(Size, PageSize, FromMemalign); } int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { *MemPtr = Instance.allocate(Size, Alignment, FromMemalign); return 0; } void *scudoAlignedAlloc(uptr Alignment, uptr Size) { // size must be a multiple of the alignment. To avoid a division, we first // make sure that alignment is a power of 2. CHECK(IsPowerOfTwo(Alignment)); CHECK_EQ((Size & (Alignment - 1)), 0); return Instance.allocate(Size, Alignment, FromMalloc); } uptr scudoMallocUsableSize(void *Ptr) { return Instance.getUsableSize(Ptr); } } // namespace __scudo using namespace __scudo; // MallocExtension helper functions uptr __sanitizer_get_current_allocated_bytes() { return Instance.getStats(AllocatorStatAllocated); } uptr __sanitizer_get_heap_size() { return Instance.getStats(AllocatorStatMapped); } uptr __sanitizer_get_free_bytes() { return 1; } uptr __sanitizer_get_unmapped_bytes() { return 1; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } int __sanitizer_get_ownership(const void *Ptr) { return Instance.isValidPointer(Ptr); } uptr __sanitizer_get_allocated_size(const void *Ptr) { return Instance.getUsableSize(Ptr); } Index: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h (revision 317686) +++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h (revision 317687) @@ -1,105 +1,126 @@ //===-- scudo_allocator.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Header for scudo_allocator.cpp. /// //===----------------------------------------------------------------------===// #ifndef SCUDO_ALLOCATOR_H_ #define SCUDO_ALLOCATOR_H_ #include "scudo_flags.h" #include "sanitizer_common/sanitizer_allocator.h" #if !SANITIZER_LINUX # error "The Scudo hardened allocator is currently only supported on Linux." #endif namespace __scudo { enum AllocType : u8 { FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc. FromNew = 1, // Memory block came from operator new. FromNewArray = 2, // Memory block came from operator new []. FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc. }; enum ChunkState : u8 { ChunkAvailable = 0, ChunkAllocated = 1, ChunkQuarantine = 2 }; // Our header requires 64 bits of storage. Having the offset saves us from // using functions such as GetBlockBegin, that is fairly costly. Our first // implementation used the MetaData as well, which offers the advantage of // being stored away from the chunk itself, but accessing it was costly as // well. The header will be atomically loaded and stored. typedef u64 PackedHeader; struct UnpackedHeader { u64 Checksum : 16; u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of // unused bytes in the chunk for Secondary ones. u64 FromPrimary : 1; u64 State : 2; // available, allocated, or quarantined u64 AllocType : 2; // malloc, new, new[], or memalign u64 Offset : 16; // Offset from the beginning of the backend // allocation to the beginning of the chunk // itself, in multiples of MinAlignment. See - /// comment about its maximum value and in init(). + // comment about its maximum value and in init(). u64 Salt : 8; }; typedef atomic_uint64_t AtomicPackedHeader; COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); // Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4); -const uptr MaxAlignmentLog = 24; // 16 MB +const uptr MaxAlignmentLog = 24; // 16 MB const uptr MinAlignment = 1 << MinAlignmentLog; const uptr MaxAlignment = 1 << MaxAlignmentLog; const uptr ChunkHeaderSize = sizeof(PackedHeader); const uptr AlignedChunkHeaderSize = (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1); -struct AllocatorOptions { - u32 QuarantineSizeMb; - u32 ThreadLocalQuarantineSizeKb; - bool MayReturnNull; - s32 ReleaseToOSIntervalMs; - bool DeallocationTypeMismatch; - bool DeleteSizeMismatch; - bool ZeroContents; - - void setFrom(const Flags *f, const CommonFlags *cf); - void copyTo(Flags *f, CommonFlags *cf) const; +#if SANITIZER_CAN_USE_ALLOCATOR64 +const uptr AllocatorSpace = ~0ULL; +const uptr AllocatorSize = 0x40000000000ULL; // 4TB. +typedef DefaultSizeClassMap SizeClassMap; +struct AP { + static const uptr kSpaceBeg = AllocatorSpace; + static const uptr kSpaceSize = AllocatorSize; + static const uptr kMetadataSize = 0; + typedef __scudo::SizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = + SizeClassAllocator64FlagMasks::kRandomShuffleChunks; }; +typedef SizeClassAllocator64 PrimaryAllocator; +#else +// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the +// security improvements brought to the 64-bit one. This makes the 32-bit +// version of Scudo slightly less toughened. +static const uptr RegionSizeLog = 20; +static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +typedef FlatByteMap ByteMap; +# elif SANITIZER_WORDSIZE == 64 +typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; +# endif // SANITIZER_WORDSIZE +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, + RegionSizeLog, ByteMap> PrimaryAllocator; +#endif // SANITIZER_CAN_USE_ALLOCATOR64 -void initAllocator(const AllocatorOptions &options); -void drainQuarantine(); +#include "scudo_allocator_secondary.h" +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef ScudoLargeMmapAllocator SecondaryAllocator; +typedef CombinedAllocator + ScudoBackendAllocator; + +void initScudo(); + void *scudoMalloc(uptr Size, AllocType Type); void scudoFree(void *Ptr, AllocType Type); void scudoSizedFree(void *Ptr, uptr Size, AllocType Type); void *scudoRealloc(void *Ptr, uptr Size); void *scudoCalloc(uptr NMemB, uptr Size); void *scudoMemalign(uptr Alignment, uptr Size); void *scudoValloc(uptr Size); void *scudoPvalloc(uptr Size); int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size); void *scudoAlignedAlloc(uptr Alignment, uptr Size); uptr scudoMallocUsableSize(void *Ptr); - -#include "scudo_allocator_secondary.h" } // namespace __scudo #endif // SCUDO_ALLOCATOR_H_ Index: vendor/compiler-rt/dist/lib/scudo/scudo_tls.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_tls.h (nonexistent) +++ vendor/compiler-rt/dist/lib/scudo/scudo_tls.h (revision 317687) @@ -0,0 +1,40 @@ +//===-- scudo_tls.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure definition. +/// Implementation will differ based on the thread local storage primitives +/// offered by the underlying platform. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_TLS_H_ +#define SCUDO_TLS_H_ + +#include "scudo_allocator.h" +#include "scudo_utils.h" + +namespace __scudo { + +struct ALIGNED(64) ScudoThreadContext { + public: + AllocatorCache Cache; + Xorshift128Plus Prng; + uptr QuarantineCachePlaceHolder[4]; + void init(); + void commitBack(); +}; + +void initThread(); + +// Fastpath functions are defined in the following platform specific headers. +#include "scudo_tls_linux.h" + +} // namespace __scudo + +#endif // SCUDO_TLS_H_ Property changes on: vendor/compiler-rt/dist/lib/scudo/scudo_tls.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.cpp =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.cpp (nonexistent) +++ vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.cpp (revision 317687) @@ -0,0 +1,62 @@ +//===-- scudo_tls_linux.cpp -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure implementation for platforms supporting +/// thread_local. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX + +#include "scudo_tls.h" + +#include +#include + +namespace __scudo { + +static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT; +static pthread_key_t PThreadKey; + +thread_local ThreadState ScudoThreadState = ThreadNotInitialized; +thread_local ScudoThreadContext ThreadLocalContext; + +static void teardownThread(void *Ptr) { + uptr Iteration = reinterpret_cast(Ptr); + // The glibc POSIX thread-local-storage deallocation routine calls user + // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. + // We want to be called last since other destructors might call free and the + // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the + // quarantine and swallowing the cache. + if (Iteration < PTHREAD_DESTRUCTOR_ITERATIONS) { + pthread_setspecific(PThreadKey, reinterpret_cast(Iteration + 1)); + return; + } + ThreadLocalContext.commitBack(); + ScudoThreadState = ThreadTornDown; +} + + +static void initOnce() { + CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0); + initScudo(); +} + +void initThread() { + pthread_once(&GlobalInitialized, initOnce); + pthread_setspecific(PThreadKey, reinterpret_cast(1)); + ThreadLocalContext.init(); + ScudoThreadState = ThreadInitialized; +} + +} // namespace __scudo + +#endif // SANITIZER_LINUX Property changes on: vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.cpp ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.h (nonexistent) +++ vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.h (revision 317687) @@ -0,0 +1,48 @@ +//===-- scudo_tls_linux.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure fastpath functions implementation for platforms +/// supporting thread_local. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_TLS_LINUX_H_ +#define SCUDO_TLS_LINUX_H_ + +#ifndef SCUDO_TLS_H_ +# error "This file must be included inside scudo_tls.h." +#endif // SCUDO_TLS_H_ + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX + +enum ThreadState : u8 { + ThreadNotInitialized = 0, + ThreadInitialized, + ThreadTornDown, +}; +extern thread_local ThreadState ScudoThreadState; +extern thread_local ScudoThreadContext ThreadLocalContext; + +ALWAYS_INLINE void initThreadMaybe() { + if (LIKELY(ScudoThreadState != ThreadNotInitialized)) + return; + initThread(); +} + +ALWAYS_INLINE ScudoThreadContext *getThreadContext() { + if (UNLIKELY(ScudoThreadState == ThreadTornDown)) + return nullptr; + return &ThreadLocalContext; +} + +#endif // SANITIZER_LINUX + +#endif // SCUDO_TLS_LINUX_H_ Property changes on: vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp (revision 317686) +++ vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp (revision 317687) @@ -1,162 +1,162 @@ //===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Platform specific utility functions. /// //===----------------------------------------------------------------------===// #include "scudo_utils.h" #include #include #include #include #if defined(__x86_64__) || defined(__i386__) # include #endif #if defined(__arm__) || defined(__aarch64__) # include #endif // TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less // complicated string formatting code. The following is a // temporary workaround to be able to use __sanitizer::VSNPrintf. namespace __sanitizer { extern int VSNPrintf(char *buff, int buff_length, const char *format, va_list args); } // namespace __sanitizer namespace __scudo { FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) { // Our messages are tiny, 256 characters is more than enough. char Message[256]; va_list Args; va_start(Args, Format); __sanitizer::VSNPrintf(Message, sizeof(Message), Format, Args); va_end(Args); RawWrite(Message); Die(); } #if defined(__x86_64__) || defined(__i386__) // i386 and x86_64 specific code to detect CRC32 hardware support via CPUID. // CRC32 requires the SSE 4.2 instruction set. typedef struct { u32 Eax; u32 Ebx; u32 Ecx; u32 Edx; } CPUIDRegs; static void getCPUID(CPUIDRegs *Regs, u32 Level) { __get_cpuid(Level, &Regs->Eax, &Regs->Ebx, &Regs->Ecx, &Regs->Edx); } CPUIDRegs getCPUFeatures() { CPUIDRegs VendorRegs = {}; getCPUID(&VendorRegs, 0); bool IsIntel = (VendorRegs.Ebx == signature_INTEL_ebx) && (VendorRegs.Edx == signature_INTEL_edx) && (VendorRegs.Ecx == signature_INTEL_ecx); bool IsAMD = (VendorRegs.Ebx == signature_AMD_ebx) && (VendorRegs.Edx == signature_AMD_edx) && (VendorRegs.Ecx == signature_AMD_ecx); // Default to an empty feature set if not on a supported CPU. CPUIDRegs FeaturesRegs = {}; if (IsIntel || IsAMD) { getCPUID(&FeaturesRegs, 1); } return FeaturesRegs; } #ifndef bit_SSE4_2 # define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines. #endif bool testCPUFeature(CPUFeature Feature) { CPUIDRegs FeaturesRegs = getCPUFeatures(); switch (Feature) { case CRC32CPUFeature: // CRC32 is provided by SSE 4.2. return !!(FeaturesRegs.Ecx & bit_SSE4_2); default: break; } return false; } #elif defined(__arm__) || defined(__aarch64__) // For ARM and AArch64, hardware CRC32 support is indicated in the // AT_HWVAL auxiliary vector. #ifndef HWCAP_CRC32 # define HWCAP_CRC32 (1<<7) // HWCAP_CRC32 is missing on older platforms. #endif bool testCPUFeature(CPUFeature Feature) { uptr HWCap = getauxval(AT_HWCAP); switch (Feature) { case CRC32CPUFeature: return !!(HWCap & HWCAP_CRC32); default: break; } return false; } #else bool testCPUFeature(CPUFeature Feature) { return false; } #endif // defined(__x86_64__) || defined(__i386__) // readRetry will attempt to read Count bytes from the Fd specified, and if // interrupted will retry to read additional bytes to reach Count. static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) { ssize_t AmountRead = 0; while (static_cast(AmountRead) < Count) { ssize_t Result = read(Fd, Buffer + AmountRead, Count - AmountRead); if (Result > 0) AmountRead += Result; else if (!Result) break; else if (errno != EINTR) { AmountRead = -1; break; } } return AmountRead; } static void fillRandom(u8 *Data, ssize_t Size) { int Fd = open("/dev/urandom", O_RDONLY); if (Fd < 0) { dieWithMessage("ERROR: failed to open /dev/urandom.\n"); } bool Success = readRetry(Fd, Data, Size) == Size; close(Fd); if (!Success) { dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n"); } } -// Default constructor for Xorshift128Plus seeds the state with /dev/urandom. +// Seeds the xorshift state with /dev/urandom. // TODO(kostyak): investigate using getrandom() if available. -Xorshift128Plus::Xorshift128Plus() { +void Xorshift128Plus::initFromURandom() { fillRandom(reinterpret_cast(State), sizeof(State)); } } // namespace __scudo Index: vendor/compiler-rt/dist/lib/scudo/scudo_utils.h =================================================================== --- vendor/compiler-rt/dist/lib/scudo/scudo_utils.h (revision 317686) +++ vendor/compiler-rt/dist/lib/scudo/scudo_utils.h (revision 317687) @@ -1,117 +1,117 @@ //===-- scudo_utils.h -------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// Header for scudo_utils.cpp. /// //===----------------------------------------------------------------------===// #ifndef SCUDO_UTILS_H_ #define SCUDO_UTILS_H_ #include #include "sanitizer_common/sanitizer_common.h" namespace __scudo { template inline Dest bit_cast(const Source& source) { static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!"); Dest dest; memcpy(&dest, &source, sizeof(dest)); return dest; } void NORETURN dieWithMessage(const char *Format, ...); enum CPUFeature { CRC32CPUFeature = 0, MaxCPUFeature, }; bool testCPUFeature(CPUFeature feature); // Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B // The state (128 bits) will be stored in thread local storage. struct Xorshift128Plus { public: - Xorshift128Plus(); + void initFromURandom(); u64 getNext() { u64 x = State[0]; const u64 y = State[1]; State[0] = y; x ^= x << 23; State[1] = x ^ y ^ (x >> 17) ^ (y >> 26); return State[1] + y; } private: u64 State[2]; }; enum : u8 { CRC32Software = 0, CRC32Hardware = 1, }; const static u32 CRC32Table[] = { 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d }; INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) { for (uptr i = 0; i < sizeof(Data); i++) { Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); Data >>= 8; } return Crc; } } // namespace __scudo #endif // SCUDO_UTILS_H_ Index: vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh =================================================================== --- vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh (revision 317687) @@ -1,135 +1,136 @@ #!/bin/sh set -e SRCS=" tsan_go.cc ../rtl/tsan_clock.cc + ../rtl/tsan_external.cc ../rtl/tsan_flags.cc ../rtl/tsan_interface_atomic.cc ../rtl/tsan_md5.cc ../rtl/tsan_mutex.cc ../rtl/tsan_report.cc ../rtl/tsan_rtl.cc ../rtl/tsan_rtl_mutex.cc ../rtl/tsan_rtl_report.cc ../rtl/tsan_rtl_thread.cc ../rtl/tsan_rtl_proc.cc ../rtl/tsan_stack_trace.cc ../rtl/tsan_stat.cc ../rtl/tsan_suppressions.cc ../rtl/tsan_sync.cc ../../sanitizer_common/sanitizer_allocator.cc ../../sanitizer_common/sanitizer_common.cc ../../sanitizer_common/sanitizer_common_libcdep.cc ../../sanitizer_common/sanitizer_deadlock_detector2.cc ../../sanitizer_common/sanitizer_flag_parser.cc ../../sanitizer_common/sanitizer_flags.cc ../../sanitizer_common/sanitizer_libc.cc ../../sanitizer_common/sanitizer_persistent_allocator.cc ../../sanitizer_common/sanitizer_printf.cc ../../sanitizer_common/sanitizer_suppressions.cc ../../sanitizer_common/sanitizer_thread_registry.cc ../../sanitizer_common/sanitizer_stackdepot.cc ../../sanitizer_common/sanitizer_stacktrace.cc ../../sanitizer_common/sanitizer_symbolizer.cc ../../sanitizer_common/sanitizer_termination.cc " if [ "`uname -a | grep Linux`" != "" ]; then SUFFIX="linux_amd64" OSCFLAGS="-fPIC -ffreestanding -Wno-maybe-uninitialized -Wno-unused-const-variable -Werror -Wno-unknown-warning-option" OSLDFLAGS="-lpthread -fPIC -fpie" SRCS=" $SRCS ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_common.cc ../../sanitizer_common/sanitizer_procmaps_linux.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc " elif [ "`uname -a | grep FreeBSD`" != "" ]; then SUFFIX="freebsd_amd64" OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" OSLDFLAGS="-lpthread -fPIC -fpie" SRCS=" $SRCS ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_common.cc ../../sanitizer_common/sanitizer_procmaps_freebsd.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc " elif [ "`uname -a | grep Darwin`" != "" ]; then SUFFIX="darwin_amd64" OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -mmacosx-version-min=10.7" OSLDFLAGS="-lpthread -fPIC -fpie -mmacosx-version-min=10.7" SRCS=" $SRCS ../rtl/tsan_platform_mac.cc ../../sanitizer_common/sanitizer_mac.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_procmaps_mac.cc " elif [ "`uname -a | grep MINGW`" != "" ]; then SUFFIX="windows_amd64" OSCFLAGS="-Wno-error=attributes -Wno-attributes -Wno-unused-const-variable -Wno-unknown-warning-option" OSLDFLAGS="" SRCS=" $SRCS ../rtl/tsan_platform_windows.cc ../../sanitizer_common/sanitizer_win.cc " else echo Unknown platform exit 1 fi CC=${CC:-gcc} IN_TMPDIR=${IN_TMPDIR:-0} SILENT=${SILENT:-0} if [ $IN_TMPDIR != "0" ]; then DIR=$(mktemp -qd /tmp/gotsan.XXXXXXXXXX) cleanup() { rm -rf $DIR } trap cleanup EXIT else DIR=. fi SRCS="$SRCS $ADD_SRCS" rm -f $DIR/gotsan.cc for F in $SRCS; do cat $F >> $DIR/gotsan.cc done FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS" if [ "$DEBUG" = "" ]; then FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer" else FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g" fi if [ "$SILENT" != "1" ]; then echo $CC gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS fi $CC $DIR/gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS $CC $OSCFLAGS test.c $DIR/race_$SUFFIX.syso -m64 -g -o $DIR/test $OSLDFLAGS export GORACE="exitcode=0 atexit_sleep_ms=0" if [ "$SILENT" != "1" ]; then $DIR/test else $DIR/test 2>/dev/null fi Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc (revision 317687) @@ -1,81 +1,94 @@ //===-- tsan_external.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_rtl.h" #include "tsan_interceptors.h" namespace __tsan { #define CALLERPC ((uptr)__builtin_return_address(0)) -const uptr kMaxTag = 128; // Limited to 65,536, since MBlock only stores tags - // as 16-bit values, see tsan_defs.h. +const char *registered_tags[kExternalTagMax]; +static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable}; // NOLINT. -const char *registered_tags[kMaxTag]; -static atomic_uint32_t used_tags{1}; // Tag 0 means "no tag". NOLINT - const char *GetObjectTypeFromTag(uptr tag) { if (tag == 0) return nullptr; // Invalid/corrupted tag? Better return NULL and let the caller deal with it. if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr; return registered_tags[tag]; } +void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) { + FuncEntry(thr, (uptr)®istered_tags[tag]); +} + +uptr TagFromShadowStackFrame(uptr pc) { + uptr tag_count = atomic_load(&used_tags, memory_order_relaxed); + void *pc_ptr = (void *)pc; + if (pc_ptr < ®istered_tags[0] || pc_ptr >= ®istered_tags[tag_count]) + return 0; + return (const char **)pc_ptr - ®istered_tags[0]; +} + +#if !SANITIZER_GO + typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int); void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) { CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); ThreadState *thr = cur_thread(); - thr->external_tag = (uptr)tag; if (caller_pc) FuncEntry(thr, (uptr)caller_pc); + InsertShadowStackFrameForTag(thr, (uptr)tag); bool in_ignored_lib; if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) { access(thr, CALLERPC, (uptr)addr, kSizeLog1); } + FuncExit(thr); if (caller_pc) FuncExit(thr); - thr->external_tag = 0; } extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void *__tsan_external_register_tag(const char *object_type) { uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed); - CHECK_LT(new_tag, kMaxTag); + CHECK_LT(new_tag, kExternalTagMax); registered_tags[new_tag] = internal_strdup(object_type); return (void *)new_tag; } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_external_assign_tag(void *addr, void *tag) { CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); Allocator *a = allocator(); MBlock *b = nullptr; if (a->PointerIsMine((void *)addr)) { void *block_begin = a->GetBlockBegin((void *)addr); if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin); } if (b) { b->tag = (uptr)tag; } } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_external_read(void *addr, void *caller_pc, void *tag) { ExternalAccess(addr, caller_pc, tag, MemoryRead); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_external_write(void *addr, void *caller_pc, void *tag) { ExternalAccess(addr, caller_pc, tag, MemoryWrite); } } // extern "C" + +#endif // !SANITIZER_GO } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc (revision 317687) @@ -1,553 +1,553 @@ //===-- tsan_interface_ann.cc ---------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_interface_ann.h" #include "tsan_mutex.h" #include "tsan_report.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_flags.h" #include "tsan_platform.h" #include "tsan_vector.h" #define CALLERPC ((uptr)__builtin_return_address(0)) using namespace __tsan; // NOLINT namespace __tsan { class ScopedAnnotation { public: ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc) : thr_(thr) { FuncEntry(thr_, pc); DPrintf("#%d: annotation %s()\n", thr_->tid, aname); } ~ScopedAnnotation() { FuncExit(thr_); CheckNoLocks(thr_); } private: ThreadState *const thr_; }; #define SCOPED_ANNOTATION_RET(typ, ret) \ if (!flags()->enable_annotations) \ return ret; \ ThreadState *thr = cur_thread(); \ const uptr caller_pc = (uptr)__builtin_return_address(0); \ StatInc(thr, StatAnnotation); \ StatInc(thr, Stat##typ); \ ScopedAnnotation sa(thr, __func__, caller_pc); \ const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ /**/ #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, ) static const int kMaxDescLen = 128; struct ExpectRace { ExpectRace *next; ExpectRace *prev; atomic_uintptr_t hitcount; atomic_uintptr_t addcount; uptr addr; uptr size; char *file; int line; char desc[kMaxDescLen]; }; struct DynamicAnnContext { Mutex mtx; ExpectRace expect; ExpectRace benign; DynamicAnnContext() : mtx(MutexTypeAnnotations, StatMtxAnnotations) { } }; static DynamicAnnContext *dyn_ann_ctx; static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); static void AddExpectRace(ExpectRace *list, char *f, int l, uptr addr, uptr size, char *desc) { ExpectRace *race = list->next; for (; race != list; race = race->next) { if (race->addr == addr && race->size == size) { atomic_store_relaxed(&race->addcount, atomic_load_relaxed(&race->addcount) + 1); return; } } race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace)); race->addr = addr; race->size = size; race->file = f; race->line = l; race->desc[0] = 0; atomic_store_relaxed(&race->hitcount, 0); atomic_store_relaxed(&race->addcount, 1); if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) race->desc[i] = desc[i]; race->desc[i] = 0; } race->prev = list; race->next = list->next; race->next->prev = race; list->next = race; } static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) { for (ExpectRace *race = list->next; race != list; race = race->next) { uptr maxbegin = max(race->addr, addr); uptr minend = min(race->addr + race->size, addr + size); if (maxbegin < minend) return race; } return 0; } static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { ExpectRace *race = FindRace(list, addr, size); if (race == 0) return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); return true; } static void InitList(ExpectRace *list) { list->next = list; list->prev = list; } void InitializeDynamicAnnotations() { dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext; InitList(&dyn_ann_ctx->expect); InitList(&dyn_ann_ctx->benign); } bool IsExpectedReport(uptr addr, uptr size) { ReadLock lock(&dyn_ann_ctx->mtx); if (CheckContains(&dyn_ann_ctx->expect, addr, size)) return true; if (CheckContains(&dyn_ann_ctx->benign, addr, size)) return true; return false; } static void CollectMatchedBenignRaces(Vector *matched, int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { ExpectRace *list = &dyn_ann_ctx->benign; for (ExpectRace *race = list->next; race != list; race = race->next) { (*unique_count)++; const uptr cnt = atomic_load_relaxed(&(race->*counter)); if (cnt == 0) continue; *hit_count += cnt; uptr i = 0; for (; i < matched->Size(); i++) { ExpectRace *race0 = &(*matched)[i]; if (race->line == race0->line && internal_strcmp(race->file, race0->file) == 0 && internal_strcmp(race->desc, race0->desc) == 0) { atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed); break; } } if (i == matched->Size()) matched->PushBack(*race); } } void PrintMatchedBenignRaces() { Lock lock(&dyn_ann_ctx->mtx); int unique_count = 0; int hit_count = 0; int add_count = 0; Vector hit_matched(MBlockScopedBuf); CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count, &ExpectRace::hitcount); Vector add_matched(MBlockScopedBuf); CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count, &ExpectRace::addcount); if (hit_matched.Size()) { Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n", hit_count, (int)internal_getpid()); for (uptr i = 0; i < hit_matched.Size(); i++) { Printf("%d %s:%d %s\n", atomic_load_relaxed(&hit_matched[i].hitcount), hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); } } if (hit_matched.Size()) { Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique" " (pid=%d):\n", add_count, unique_count, (int)internal_getpid()); for (uptr i = 0; i < add_matched.Size(); i++) { Printf("%d %s:%d %s\n", atomic_load_relaxed(&add_matched[i].addcount), add_matched[i].file, add_matched[i].line, add_matched[i].desc); } } } static void ReportMissedExpectedRace(ExpectRace *race) { Printf("==================\n"); Printf("WARNING: ThreadSanitizer: missed expected data race\n"); Printf(" %s addr=%zx %s:%d\n", race->desc, race->addr, race->file, race->line); Printf("==================\n"); } } // namespace __tsan using namespace __tsan; // NOLINT extern "C" { void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); Release(thr, pc, addr); } void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); Acquire(thr, pc, addr); } void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignal); } void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignalAll); } void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsNotPHB); } void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) { SCOPED_ANNOTATION(AnnotateCondVarWait); } void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreate); MutexCreate(thr, pc, m, MutexFlagWriteReentrant); } void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit); } void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockDestroy); MutexDestroy(thr, pc, m); } void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockAcquired); if (is_w) MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); else MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); } void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockReleased); if (is_w) MutexUnlock(thr, pc, m); else MutexReadUnlock(thr, pc, m); } void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateTraceMemory); } void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushState); } void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, uptr size) { SCOPED_ANNOTATION(AnnotateNewMemory); } void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateNoOp); } void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { ExpectRace *race = dyn_ann_ctx->expect.next; if (atomic_load_relaxed(&race->hitcount) == 0) { ctx->nmissed_expected++; ReportMissedExpectedRace(race); } race->prev->next = race->next; race->next->prev = race->prev; internal_free(race); } } void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( char *f, int l, int enable) { SCOPED_ANNOTATION(AnnotateEnableRaceDetection); // FIXME: Reconsider this functionality later. It may be irrelevant. } void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar); } void INTERFACE_ATTRIBUTE AnnotatePCQGet( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQGet); } void INTERFACE_ATTRIBUTE AnnotatePCQPut( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQPut); } void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQDestroy); } void INTERFACE_ATTRIBUTE AnnotatePCQCreate( char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQCreate); } void INTERFACE_ATTRIBUTE AnnotateExpectRace( char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateExpectRace); Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->expect, f, l, mem, 1, desc); DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l); } static void BenignRaceImpl( char *f, int l, uptr mem, uptr size, char *desc) { Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->benign, f, l, mem, size, desc); DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l); } // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm. void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( char *f, int l, uptr mem, uptr size, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); BenignRaceImpl(f, l, mem, size, desc); } void INTERFACE_ATTRIBUTE AnnotateBenignRace( char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRace); BenignRaceImpl(f, l, mem, 1, desc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); ThreadIgnoreBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); ThreadIgnoreEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); ThreadIgnoreBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); ThreadIgnoreEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); ThreadIgnoreSyncBegin(thr, pc); } void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); ThreadIgnoreSyncEnd(thr, pc); } void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotatePublishMemoryRange); } void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange); } void INTERFACE_ATTRIBUTE AnnotateThreadName( char *f, int l, char *name) { SCOPED_ANNOTATION(AnnotateThreadName); ThreadSetName(thr, name); } // We deliberately omit the implementation of WTFAnnotateHappensBefore() and // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate // atomic operations, which should be handled by ThreadSanitizer correctly. void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); } void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); } void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( char *f, int l, uptr mem, uptr sz, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); BenignRaceImpl(f, l, mem, sz, desc); } int INTERFACE_ATTRIBUTE RunningOnValgrind() { return flags()->running_on_valgrind; } double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { return 10.0; } const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { if (internal_strcmp(query, "pure_happens_before") == 0) return "1"; else return "0"; } void INTERFACE_ATTRIBUTE AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} void INTERFACE_ATTRIBUTE AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} // Note: the parameter is called flagz, because flags is already taken // by the global function that returns flags. INTERFACE_ATTRIBUTE void __tsan_mutex_create(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_create); MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask); } INTERFACE_ATTRIBUTE void __tsan_mutex_destroy(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_destroy); - MutexDestroy(thr, pc, (uptr)m); + MutexDestroy(thr, pc, (uptr)m, flagz); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_lock(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_lock); if (!(flagz & MutexFlagTryLock)) { if (flagz & MutexFlagReadLock) MutexPreReadLock(thr, pc, (uptr)m); else MutexPreLock(thr, pc, (uptr)m); } ThreadIgnoreBegin(thr, pc, false); ThreadIgnoreSyncBegin(thr, pc, false); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) { SCOPED_ANNOTATION(__tsan_mutex_post_lock); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); if (!(flagz & MutexFlagTryLockFailed)) { if (flagz & MutexFlagReadLock) MutexPostReadLock(thr, pc, (uptr)m, flagz); else MutexPostLock(thr, pc, (uptr)m, flagz, rec); } } INTERFACE_ATTRIBUTE int __tsan_mutex_pre_unlock(void *m, unsigned flagz) { SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0); int ret = 0; if (flagz & MutexFlagReadLock) { CHECK(!(flagz & MutexFlagRecursiveUnlock)); MutexReadUnlock(thr, pc, (uptr)m); } else { ret = MutexUnlock(thr, pc, (uptr)m, flagz); } ThreadIgnoreBegin(thr, pc, false); ThreadIgnoreSyncBegin(thr, pc, false); return ret; } INTERFACE_ATTRIBUTE void __tsan_mutex_post_unlock(void *m, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_unlock); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_signal(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_signal); ThreadIgnoreBegin(thr, pc, false); ThreadIgnoreSyncBegin(thr, pc, false); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_signal(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_signal); ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_pre_divert(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_pre_divert); // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal. ThreadIgnoreSyncEnd(thr, pc); ThreadIgnoreEnd(thr, pc); } INTERFACE_ATTRIBUTE void __tsan_mutex_post_divert(void *addr, unsigned flagz) { SCOPED_ANNOTATION(__tsan_mutex_post_divert); ThreadIgnoreBegin(thr, pc, false); ThreadIgnoreSyncBegin(thr, pc, false); } } // extern "C" Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc (revision 317687) @@ -1,407 +1,393 @@ //===-- tsan_platform_linux.cc --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Linux- and FreeBSD-specific code. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_flags.h" #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #include #include #endif #include #include #include #include #include #include #include #include #include #include #if SANITIZER_LINUX #define __need_res_state #include #endif #ifdef sa_handler # undef sa_handler #endif #ifdef sa_sigaction # undef sa_sigaction #endif #if SANITIZER_FREEBSD extern "C" void *__libc_stack_end; void *__libc_stack_end = 0; #endif #if SANITIZER_LINUX && defined(__aarch64__) void InitializeGuardPtr() __attribute__((visibility("hidden"))); #endif namespace __tsan { #ifdef TSAN_RUNTIME_VMA // Runtime detected VMA size. uptr vmaSize; #endif enum { MemTotal = 0, MemShadow = 1, MemMeta = 2, MemFile = 3, MemMmap = 4, MemTrace = 5, MemHeap = 6, MemOther = 7, MemCount = 8, }; void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem, uptr stats_size) { mem[MemTotal] += rss; if (p >= ShadowBeg() && p < ShadowEnd()) mem[MemShadow] += rss; else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) mem[MemMeta] += rss; #if !SANITIZER_GO else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #else else if (p >= AppMemBeg() && p < AppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #endif else if (p >= TraceMemBeg() && p < TraceMemEnd()) mem[MemTrace] += rss; else mem[MemOther] += rss; } void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { uptr mem[MemCount]; internal_memset(mem, 0, sizeof(mem[0]) * MemCount); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); StackDepotStats *stacks = StackDepotGetStats(); internal_snprintf(buf, buf_size, "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20, stacks->allocated >> 20, stacks->n_uniq_ids, nlive, nthread); } #if SANITIZER_LINUX void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, void *argument) { ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); } #endif void FlushShadowMemory() { #if SANITIZER_LINUX StopTheWorld(FlushShadowMemoryCallback, 0); #endif } #if !SANITIZER_GO // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { // First create temp file. const char *tmpdir = GetEnv("TMPDIR"); if (tmpdir == 0) tmpdir = GetEnv("TEST_TMPDIR"); #ifdef P_tmpdir if (tmpdir == 0) tmpdir = P_tmpdir; #endif if (tmpdir == 0) return; char name[256]; internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", tmpdir, (int)internal_getpid()); uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); if (internal_iserror(openrv)) return; internal_unlink(name); // Unlink it now, so that we can reuse the buffer. fd_t fd = openrv; // Fill the file with kShadowRodata. const uptr kMarkerSize = 512 * 1024 / sizeof(u64); InternalScopedBuffer marker(kMarkerSize); // volatile to prevent insertion of memset for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) *p = kShadowRodata; internal_write(fd, marker.data(), marker.size()); // Map the file into memory. uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); if (internal_iserror(page)) { internal_close(fd); return; } // Map the file into shadow of .rodata sections. MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr start, end, offset, prot; // Reusing the buffer 'name'. while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { if (name[0] != 0 && name[0] != '[' && (prot & MemoryMappingLayout::kProtectionRead) && (prot & MemoryMappingLayout::kProtectionExecute) && !(prot & MemoryMappingLayout::kProtectionWrite) && IsAppMem(start)) { // Assume it's .rodata char *shadow_start = (char*)MemToShadow(start); char *shadow_end = (char*)MemToShadow(end); for (char *p = shadow_start; p < shadow_end; p += marker.size()) { internal_mmap(p, Min(marker.size(), shadow_end - p), PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); } } } internal_close(fd); } void InitializeShadowMemoryPlatform() { MapRodata(); } #endif // #if !SANITIZER_GO void InitializePlatformEarly() { #ifdef TSAN_RUNTIME_VMA vmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); #if defined(__aarch64__) if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize); Die(); } #elif defined(__powerpc64__) if (vmaSize != 44 && vmaSize != 46) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize); Die(); } #endif #endif } void InitializePlatform() { DisableCoreDumperIfNecessary(); // Go maps shadow memory lazily and works fine with limited address space. // Unlimited stack is not a problem as well, because the executable // is not compiled with -pie. if (!SANITIZER_GO) { bool reexec = false; // TSan doesn't play well with unlimited stack size (as stack // overlaps with shadow memory). If we detect unlimited stack size, // we re-exec the program with limited stack size as a best effort. if (StackSizeIsUnlimited()) { const uptr kMaxStackSize = 32 * 1024 * 1024; VReport(1, "Program is run with unlimited stack size, which wouldn't " "work with ThreadSanitizer.\n" "Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize); SetStackSizeLimitInBytes(kMaxStackSize); reexec = true; } if (!AddressSpaceIsUnlimited()) { Report("WARNING: Program is run with limited virtual address space," " which wouldn't work with ThreadSanitizer.\n"); Report("Re-execing with unlimited virtual address space.\n"); SetAddressSpaceUnlimited(); reexec = true; } #if SANITIZER_LINUX && defined(__aarch64__) // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in // linux kernel, the random gap between stack and mapped area is increased // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover // this big range, we should disable randomized virtual space on aarch64. int old_personality = personality(0xffffffff); if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { VReport(1, "WARNING: Program is run with randomized virtual address " "space, which wouldn't work with ThreadSanitizer.\n" "Re-execing with fixed virtual address space.\n"); CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); reexec = true; } // Initialize the guard pointer used in {sig}{set,long}jump. InitializeGuardPtr(); #endif if (reexec) ReExec(); } #if !SANITIZER_GO CheckAndProtect(); InitTlsSize(); #endif } #if !SANITIZER_GO // Extract file descriptors passed to glibc internal __res_iclose function. // This is required to properly "close" the fds, because we do not see internal // closes within glibc. The code is a pure hack. int ExtractResolvFDs(void *state, int *fds, int nfd) { #if SANITIZER_LINUX && !SANITIZER_ANDROID int cnt = 0; __res_state *statp = (__res_state*)state; for (int i = 0; i < MAXNS && cnt < nfd; i++) { if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) fds[cnt++] = statp->_u._ext.nssocks[i]; } return cnt; #else return 0; #endif } // Extract file descriptors passed via UNIX domain sockets. // This is requried to properly handle "open" of these fds. // see 'man recvmsg' and 'man 3 cmsg'. int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { int res = 0; msghdr *msg = (msghdr*)msgp; struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) continue; int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); for (int i = 0; i < n; i++) { fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; if (res == nfd) return res; } } return res; } // Note: this function runs with async signals enabled, // so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { // pthread_cleanup_push/pop are hardcore macros mess. // We can't intercept nor call them w/o including pthread.h. int res; pthread_cleanup_push(cleanup, arg); res = fn(c, m, abstime); pthread_cleanup_pop(0); return res; } #endif #if !SANITIZER_GO void ReplaceSystemMalloc() { } #endif #if !SANITIZER_GO #if SANITIZER_ANDROID - -#if defined(__aarch64__) -# define __get_tls() \ - ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; }) -#elif defined(__x86_64__) -# define __get_tls() \ - ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; }) -#else -#error unsupported architecture -#endif - -// On Android, __thread is not supported. So we store the pointer to ThreadState -// in TLS_SLOT_TSAN, which is the tls slot allocated by Android bionic for tsan. -static const int TLS_SLOT_TSAN = 8; // On Android, one thread can call intercepted functions after // DestroyThreadState(), so add a fake thread state for "dead" threads. static ThreadState *dead_thread_state = nullptr; ThreadState *cur_thread() { - ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN]; + ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); - thr = reinterpret_cast(__get_tls()[TLS_SLOT_TSAN]); + thr = reinterpret_cast(*get_android_tls_ptr()); if (thr == nullptr) { thr = reinterpret_cast(MmapOrDie(sizeof(ThreadState), "ThreadState")); - __get_tls()[TLS_SLOT_TSAN] = thr; + *get_android_tls_ptr() = reinterpret_cast(thr); if (dead_thread_state == nullptr) { dead_thread_state = reinterpret_cast( MmapOrDie(sizeof(ThreadState), "ThreadState")); dead_thread_state->fast_state.SetIgnoreBit(); dead_thread_state->ignore_interceptors = 1; dead_thread_state->is_dead = true; *const_cast(&dead_thread_state->tid) = -1; CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), PROT_READ)); } } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } return thr; } void cur_thread_finalize() { __sanitizer_sigset_t emptyset; internal_sigfillset(&emptyset); __sanitizer_sigset_t oldset; CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); - ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN]; + ThreadState* thr = reinterpret_cast(*get_android_tls_ptr()); if (thr != dead_thread_state) { - __get_tls()[TLS_SLOT_TSAN] = dead_thread_state; + *get_android_tls_ptr() = reinterpret_cast(dead_thread_state); UnmapOrDie(thr, sizeof(ThreadState)); } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } #endif // SANITIZER_ANDROID #endif // if !SANITIZER_GO } // namespace __tsan #endif // SANITIZER_LINUX || SANITIZER_FREEBSD Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc (revision 317687) @@ -1,485 +1,485 @@ //===-- tsan_report.cc ----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_report.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stacktrace_printer.h" namespace __tsan { ReportStack::ReportStack() : frames(nullptr), suppressable(false) {} ReportStack *ReportStack::New() { void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack)); return new(mem) ReportStack(); } ReportLocation::ReportLocation(ReportLocationType type) : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0), fd(0), suppressable(false), stack(nullptr) {} ReportLocation *ReportLocation::New(ReportLocationType type) { void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation)); return new(mem) ReportLocation(type); } class Decorator: public __sanitizer::SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() { } const char *Warning() { return Red(); } const char *EndWarning() { return Default(); } const char *Access() { return Blue(); } const char *EndAccess() { return Default(); } const char *ThreadDescription() { return Cyan(); } const char *EndThreadDescription() { return Default(); } const char *Location() { return Green(); } const char *EndLocation() { return Default(); } const char *Sleep() { return Yellow(); } const char *EndSleep() { return Default(); } const char *Mutex() { return Magenta(); } const char *EndMutex() { return Default(); } }; ReportDesc::ReportDesc() : stacks(MBlockReportStack) , mops(MBlockReportMop) , locs(MBlockReportLoc) , mutexes(MBlockReportMutex) , threads(MBlockReportThread) , unique_tids(MBlockReportThread) , sleep() , count() { } ReportMop::ReportMop() : mset(MBlockReportMutex) { } ReportDesc::~ReportDesc() { // FIXME(dvyukov): it must be leaking a lot of memory. } #if !SANITIZER_GO const int kThreadBufSize = 32; const char *thread_name(char *buf, int tid) { if (tid == 0) return "main thread"; internal_snprintf(buf, kThreadBufSize, "thread T%d", tid); return buf; } static const char *ReportTypeString(ReportType typ) { if (typ == ReportTypeRace) return "data race"; if (typ == ReportTypeVptrRace) return "data race on vptr (ctor/dtor vs virtual call)"; if (typ == ReportTypeUseAfterFree) return "heap-use-after-free"; if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free (virtual call vs free)"; if (typ == ReportTypeExternalRace) return "race on a library object"; if (typ == ReportTypeThreadLeak) return "thread leak"; if (typ == ReportTypeMutexDestroyLocked) return "destroy of a locked mutex"; if (typ == ReportTypeMutexDoubleLock) return "double lock of a mutex"; if (typ == ReportTypeMutexInvalidAccess) return "use of an invalid mutex (e.g. uninitialized or destroyed)"; if (typ == ReportTypeMutexBadUnlock) return "unlock of an unlocked mutex (or by a wrong thread)"; if (typ == ReportTypeMutexBadReadLock) return "read lock of a write locked mutex"; if (typ == ReportTypeMutexBadReadUnlock) return "read unlock of a write locked mutex"; if (typ == ReportTypeSignalUnsafe) return "signal-unsafe call inside of a signal"; if (typ == ReportTypeErrnoInSignal) return "signal handler spoils errno"; if (typ == ReportTypeDeadlock) return "lock-order-inversion (potential deadlock)"; return ""; } #if SANITIZER_MAC static const char *const kInterposedFunctionPrefix = "wrap_"; #else static const char *const kInterposedFunctionPrefix = "__interceptor_"; #endif void PrintStack(const ReportStack *ent) { if (ent == 0 || ent->frames == 0) { Printf(" [failed to restore the stack]\n\n"); return; } SymbolizedStack *frame = ent->frames; for (int i = 0; frame && frame->info.address; frame = frame->next, i++) { InternalScopedString res(2 * GetPageSizeCached()); RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info, common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix, kInterposedFunctionPrefix); Printf("%s\n", res.data()); } Printf("\n"); } static void PrintMutexSet(Vector const& mset) { for (uptr i = 0; i < mset.Size(); i++) { if (i == 0) Printf(" (mutexes:"); const ReportMopMutex m = mset[i]; Printf(" %s M%llu", m.write ? "write" : "read", m.id); Printf(i == mset.Size() - 1 ? ")" : ","); } } static const char *MopDesc(bool first, bool write, bool atomic) { return atomic ? (first ? (write ? "Atomic write" : "Atomic read") : (write ? "Previous atomic write" : "Previous atomic read")) : (first ? (write ? "Write" : "Read") : (write ? "Previous write" : "Previous read")); } static const char *ExternalMopDesc(bool first, bool write) { return first ? (write ? "Mutating" : "Read-only") : (write ? "Previous mutating" : "Previous read-only"); } static void PrintMop(const ReportMop *mop, bool first) { Decorator d; char thrbuf[kThreadBufSize]; Printf("%s", d.Access()); const char *object_type = GetObjectTypeFromTag(mop->external_tag); - if (!object_type) { + if (mop->external_tag == kExternalTagNone || !object_type) { Printf(" %s of size %d at %p by %s", MopDesc(first, mop->write, mop->atomic), mop->size, (void *)mop->addr, thread_name(thrbuf, mop->tid)); } else { Printf(" %s access of %s at %p by %s", ExternalMopDesc(first, mop->write), object_type, (void *)mop->addr, thread_name(thrbuf, mop->tid)); } PrintMutexSet(mop->mset); Printf(":\n"); Printf("%s", d.EndAccess()); PrintStack(mop->stack); } static void PrintLocation(const ReportLocation *loc) { Decorator d; char thrbuf[kThreadBufSize]; bool print_stack = false; Printf("%s", d.Location()); if (loc->type == ReportLocationGlobal) { const DataInfo &global = loc->global; if (global.size != 0) Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n", global.name, global.size, global.start, StripModuleName(global.module), global.module_offset); else Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name, global.start, StripModuleName(global.module), global.module_offset); } else if (loc->type == ReportLocationHeap) { char thrbuf[kThreadBufSize]; const char *object_type = GetObjectTypeFromTag(loc->external_tag); if (!object_type) { Printf(" Location is heap block of size %zu at %p allocated by %s:\n", loc->heap_chunk_size, loc->heap_chunk_start, thread_name(thrbuf, loc->tid)); } else { Printf(" Location is %s of size %zu at %p allocated by %s:\n", object_type, loc->heap_chunk_size, loc->heap_chunk_start, thread_name(thrbuf, loc->tid)); } print_stack = true; } else if (loc->type == ReportLocationStack) { Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid)); } else if (loc->type == ReportLocationTLS) { Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid)); } else if (loc->type == ReportLocationFD) { Printf(" Location is file descriptor %d created by %s at:\n", loc->fd, thread_name(thrbuf, loc->tid)); print_stack = true; } Printf("%s", d.EndLocation()); if (print_stack) PrintStack(loc->stack); } static void PrintMutexShort(const ReportMutex *rm, const char *after) { Decorator d; Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after); } static void PrintMutexShortWithAddress(const ReportMutex *rm, const char *after) { Decorator d; Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after); } static void PrintMutex(const ReportMutex *rm) { Decorator d; if (rm->destroyed) { Printf("%s", d.Mutex()); Printf(" Mutex M%llu is already destroyed.\n\n", rm->id); Printf("%s", d.EndMutex()); } else { Printf("%s", d.Mutex()); Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr); Printf("%s", d.EndMutex()); PrintStack(rm->stack); } } static void PrintThread(const ReportThread *rt) { Decorator d; if (rt->id == 0) // Little sense in describing the main thread. return; Printf("%s", d.ThreadDescription()); Printf(" Thread T%d", rt->id); if (rt->name && rt->name[0] != '\0') Printf(" '%s'", rt->name); char thrbuf[kThreadBufSize]; const char *thread_status = rt->running ? "running" : "finished"; if (rt->workerthread) { Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status); Printf("\n"); Printf("%s", d.EndThreadDescription()); return; } Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status, thread_name(thrbuf, rt->parent_tid)); if (rt->stack) Printf(" at:"); Printf("\n"); Printf("%s", d.EndThreadDescription()); PrintStack(rt->stack); } static void PrintSleep(const ReportStack *s) { Decorator d; Printf("%s", d.Sleep()); Printf(" As if synchronized via sleep:\n"); Printf("%s", d.EndSleep()); PrintStack(s); } static ReportStack *ChooseSummaryStack(const ReportDesc *rep) { if (rep->mops.Size()) return rep->mops[0]->stack; if (rep->stacks.Size()) return rep->stacks[0]; if (rep->mutexes.Size()) return rep->mutexes[0]->stack; if (rep->threads.Size()) return rep->threads[0]->stack; return 0; } static bool FrameIsInternal(const SymbolizedStack *frame) { if (frame == 0) return false; const char *file = frame->info.file; const char *module = frame->info.module; if (file != 0 && (internal_strstr(file, "tsan_interceptors.cc") || internal_strstr(file, "sanitizer_common_interceptors.inc") || internal_strstr(file, "tsan_interface_"))) return true; if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_"))) return true; return false; } static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) { while (FrameIsInternal(frames) && frames->next) frames = frames->next; return frames; } void PrintReport(const ReportDesc *rep) { Decorator d; Printf("==================\n"); const char *rep_typ_str = ReportTypeString(rep->typ); Printf("%s", d.Warning()); Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str, (int)internal_getpid()); Printf("%s", d.EndWarning()); if (rep->typ == ReportTypeDeadlock) { char thrbuf[kThreadBufSize]; Printf(" Cycle in lock order graph: "); for (uptr i = 0; i < rep->mutexes.Size(); i++) PrintMutexShortWithAddress(rep->mutexes[i], " => "); PrintMutexShort(rep->mutexes[0], "\n\n"); CHECK_GT(rep->mutexes.Size(), 0U); CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1), rep->stacks.Size()); for (uptr i = 0; i < rep->mutexes.Size(); i++) { Printf(" Mutex "); PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()], " acquired here while holding mutex "); PrintMutexShort(rep->mutexes[i], " in "); Printf("%s", d.ThreadDescription()); Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i])); Printf("%s", d.EndThreadDescription()); if (flags()->second_deadlock_stack) { PrintStack(rep->stacks[2*i]); Printf(" Mutex "); PrintMutexShort(rep->mutexes[i], " previously acquired by the same thread here:\n"); PrintStack(rep->stacks[2*i+1]); } else { PrintStack(rep->stacks[i]); if (i == 0) Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 " "to get more informative warning message\n\n"); } } } else { for (uptr i = 0; i < rep->stacks.Size(); i++) { if (i) Printf(" and:\n"); PrintStack(rep->stacks[i]); } } for (uptr i = 0; i < rep->mops.Size(); i++) PrintMop(rep->mops[i], i == 0); if (rep->sleep) PrintSleep(rep->sleep); for (uptr i = 0; i < rep->locs.Size(); i++) PrintLocation(rep->locs[i]); if (rep->typ != ReportTypeDeadlock) { for (uptr i = 0; i < rep->mutexes.Size(); i++) PrintMutex(rep->mutexes[i]); } for (uptr i = 0; i < rep->threads.Size(); i++) PrintThread(rep->threads[i]); if (rep->typ == ReportTypeThreadLeak && rep->count > 1) Printf(" And %d more similar thread leaks.\n\n", rep->count - 1); if (ReportStack *stack = ChooseSummaryStack(rep)) { if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames)) ReportErrorSummary(rep_typ_str, frame->info); } if (common_flags()->print_module_map == 2) PrintModuleMap(); Printf("==================\n"); } #else // #if !SANITIZER_GO const int kMainThreadId = 1; void PrintStack(const ReportStack *ent) { if (ent == 0 || ent->frames == 0) { Printf(" [failed to restore the stack]\n"); return; } SymbolizedStack *frame = ent->frames; for (int i = 0; frame; frame = frame->next, i++) { const AddressInfo &info = frame->info; Printf(" %s()\n %s:%d +0x%zx\n", info.function, StripPathPrefix(info.file, common_flags()->strip_path_prefix), info.line, (void *)info.module_offset); } } static void PrintMop(const ReportMop *mop, bool first) { Printf("\n"); Printf("%s at %p by ", (first ? (mop->write ? "Write" : "Read") : (mop->write ? "Previous write" : "Previous read")), mop->addr); if (mop->tid == kMainThreadId) Printf("main goroutine:\n"); else Printf("goroutine %d:\n", mop->tid); PrintStack(mop->stack); } static void PrintLocation(const ReportLocation *loc) { switch (loc->type) { case ReportLocationHeap: { Printf("\n"); Printf("Heap block of size %zu at %p allocated by ", loc->heap_chunk_size, loc->heap_chunk_start); if (loc->tid == kMainThreadId) Printf("main goroutine:\n"); else Printf("goroutine %d:\n", loc->tid); PrintStack(loc->stack); break; } case ReportLocationGlobal: { Printf("\n"); Printf("Global var %s of size %zu at %p declared at %s:%zu\n", loc->global.name, loc->global.size, loc->global.start, loc->global.file, loc->global.line); break; } default: break; } } static void PrintThread(const ReportThread *rt) { if (rt->id == kMainThreadId) return; Printf("\n"); Printf("Goroutine %d (%s) created at:\n", rt->id, rt->running ? "running" : "finished"); PrintStack(rt->stack); } void PrintReport(const ReportDesc *rep) { Printf("==================\n"); if (rep->typ == ReportTypeRace) { Printf("WARNING: DATA RACE"); for (uptr i = 0; i < rep->mops.Size(); i++) PrintMop(rep->mops[i], i == 0); for (uptr i = 0; i < rep->locs.Size(); i++) PrintLocation(rep->locs[i]); for (uptr i = 0; i < rep->threads.Size(); i++) PrintThread(rep->threads[i]); } else if (rep->typ == ReportTypeDeadlock) { Printf("WARNING: DEADLOCK\n"); for (uptr i = 0; i < rep->mutexes.Size(); i++) { Printf("Goroutine %d lock mutex %d while holding mutex %d:\n", 999, rep->mutexes[i]->id, rep->mutexes[(i+1) % rep->mutexes.Size()]->id); PrintStack(rep->stacks[2*i]); Printf("\n"); Printf("Mutex %d was previously locked here:\n", rep->mutexes[(i+1) % rep->mutexes.Size()]->id); PrintStack(rep->stacks[2*i + 1]); Printf("\n"); } } Printf("==================\n"); } #endif } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc (revision 317687) @@ -1,1055 +1,1054 @@ //===-- tsan_rtl.cc -------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Main file (entry points) for the TSan run-time. //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "tsan_defs.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_suppressions.h" #include "tsan_symbolize.h" #include "ubsan/ubsan_init.h" #ifdef __SSE3__ // transitively includes , // and it's prohibited to include std headers into tsan runtime. // So we do this dirty trick. #define _MM_MALLOC_H_INCLUDED #define __MM_MALLOC_H #include typedef __m128i m128; #endif volatile int __tsan_resumed = 0; extern "C" void __tsan_resume() { __tsan_resumed = 1; } namespace __tsan { #if !SANITIZER_GO && !SANITIZER_MAC __attribute__((tls_model("initial-exec"))) THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); #endif static char ctx_placeholder[sizeof(Context)] ALIGNED(64); Context *ctx; // Can be overriden by a front-end. #ifdef TSAN_EXTERNAL_HOOKS bool OnFinalize(bool failed); void OnInitialize(); #else SANITIZER_WEAK_CXX_DEFAULT_IMPL bool OnFinalize(bool failed) { return failed; } SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnInitialize() {} #endif static char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ThreadContextBase *CreateThreadContext(u32 tid) { // Map thread trace when context is created. char name[50]; internal_snprintf(name, sizeof(name), "trace %u", tid); MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); const uptr hdr = GetThreadTraceHeader(tid); internal_snprintf(name, sizeof(name), "trace header %u", tid); MapThreadTrace(hdr, sizeof(Trace), name); new((void*)hdr) Trace(); // We are going to use only a small part of the trace with the default // value of history_size. However, the constructor writes to the whole trace. // Unmap the unused part. uptr hdr_end = hdr + sizeof(Trace); hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); hdr_end = RoundUp(hdr_end, GetPageSizeCached()); if (hdr_end < hdr + sizeof(Trace)) UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); return new(mem) ThreadContext(tid); } #if !SANITIZER_GO static const u32 kThreadQuarantineSize = 16; #else static const u32 kThreadQuarantineSize = 64; #endif Context::Context() : initialized() , report_mtx(MutexTypeReport, StatMtxReport) , nreported() , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) , fired_suppressions(8) { } // The objects are allocated in TLS, so one may rely on zero-initialization. ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, unsigned reuse_count, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size) : fast_state(tid, epoch) // Do not touch these, rely on zero initialization, // they may be accessed before the ctor. // , ignore_reads_and_writes() // , ignore_interceptors() , clock(tid, reuse_count) #if !SANITIZER_GO , jmp_bufs(MBlockJmpBuf) #endif , tid(tid) , unique_id(unique_id) , stk_addr(stk_addr) , stk_size(stk_size) , tls_addr(tls_addr) , tls_size(tls_size) #if !SANITIZER_GO , last_sleep_clock(tid) #endif { } #if !SANITIZER_GO static void MemoryProfiler(Context *ctx, fd_t fd, int i) { uptr n_threads; uptr n_running_threads; ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); InternalScopedBuffer buf(4096); WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } static void BackgroundThread(void *arg) { // This is a non-initialized non-user thread, nothing to see here. // We don't use ScopedIgnoreInterceptors, because we want ignores to be // enabled even when the thread function exits (e.g. during pthread thread // shutdown code). cur_thread()->ignore_interceptors++; const u64 kMs2Ns = 1000 * 1000; fd_t mprof_fd = kInvalidFd; if (flags()->profile_memory && flags()->profile_memory[0]) { if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { mprof_fd = 1; } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { mprof_fd = 2; } else { InternalScopedString filename(kMaxPathLength); filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); fd_t fd = OpenFile(filename.data(), WrOnly); if (fd == kInvalidFd) { Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", &filename[0]); } else { mprof_fd = fd; } } } u64 last_flush = NanoTime(); uptr last_rss = 0; for (int i = 0; atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; i++) { SleepForMillis(100); u64 now = NanoTime(); // Flush memory if requested. if (flags()->flush_memory_ms > 0) { if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); FlushShadowMemory(); last_flush = NanoTime(); } } // GetRSS can be expensive on huge programs, so don't do it every 100ms. if (flags()->memory_limit_mb > 0) { uptr rss = GetRSS(); uptr limit = uptr(flags()->memory_limit_mb) << 20; VPrintf(1, "ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); if (2 * rss > limit + last_rss) { VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); FlushShadowMemory(); rss = GetRSS(); VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); } last_rss = rss; } // Write memory profile if requested. if (mprof_fd != kInvalidFd) MemoryProfiler(ctx, mprof_fd, i); // Flush symbolizer cache if requested. if (flags()->flush_symbolizer_ms > 0) { u64 last = atomic_load(&ctx->last_symbolize_time_ns, memory_order_relaxed); if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { Lock l(&ctx->report_mtx); SpinMutexLock l2(&CommonSanitizerReportMutex); SymbolizeFlush(); atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); } } } } static void StartBackgroundThread() { ctx->background_thread = internal_start_thread(&BackgroundThread, 0); } #ifndef __mips__ static void StopBackgroundThread() { atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); internal_join_thread(ctx->background_thread); ctx->background_thread = 0; } #endif #endif void DontNeedShadowFor(uptr addr, uptr size) { ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); } void MapShadow(uptr addr, uptr size) { // Global data is not 64K aligned, but there are no adjacent mappings, // so we can get away with unaligned mapping. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment const uptr kPageSize = GetPageSizeCached(); uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"); // Meta shadow is 2:1, so tread carefully. static bool data_mapped = false; static uptr mapped_meta_end = 0; uptr meta_begin = (uptr)MemToMeta(addr); uptr meta_end = (uptr)MemToMeta(addr + size); meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (!data_mapped) { // First call maps data+bss. data_mapped = true; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); } else { // Mapping continous heap. // Windows wants 64K alignment. meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (meta_end <= mapped_meta_end) return; if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); mapped_meta_end = meta_end; } VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", addr, addr+size, meta_begin, meta_end); } void MapThreadTrace(uptr addr, uptr size, const char *name) { DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); CHECK_GE(addr, TraceMemBeg()); CHECK_LE(addr + size, TraceMemEnd()); CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); if (addr1 != addr) { Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", addr, size, addr1); Die(); } } static void CheckShadowMapping() { uptr beg, end; for (int i = 0; GetUserRegion(i, &beg, &end); i++) { // Skip cases for empty regions (heap definition for architectures that // do not use 64-bit allocator). if (beg == end) continue; VPrintf(3, "checking shadow region %p-%p\n", beg, end); uptr prev = 0; for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { const uptr p = RoundDown(p0 + x, kShadowCell); if (p < beg || p >= end) continue; const uptr s = MemToShadow(p); const uptr m = (uptr)MemToMeta(p); VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); CHECK(IsAppMem(p)); CHECK(IsShadowMem(s)); CHECK_EQ(p, ShadowToMem(s)); CHECK(IsMetaMem(m)); if (prev) { // Ensure that shadow and meta mappings are linear within a single // user range. Lots of code that processes memory ranges assumes it. const uptr prev_s = MemToShadow(prev); const uptr prev_m = (uptr)MemToMeta(prev); CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); CHECK_EQ((m - prev_m) / kMetaShadowSize, (p - prev) / kMetaShadowCell); } prev = p; } } } } void Initialize(ThreadState *thr) { // Thread safe because done before all threads exist. static bool is_initialized = false; if (is_initialized) return; is_initialized = true; // We are not ready to handle interceptors yet. ScopedIgnoreInterceptors ignore; SanitizerToolName = "ThreadSanitizer"; // Install tool-specific callbacks in sanitizer_common. SetCheckFailedCallback(TsanCheckFailed); ctx = new(ctx_placeholder) Context; const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"); CacheBinaryName(); InitializeFlags(&ctx->flags, options); AvoidCVE_2016_2143(); InitializePlatformEarly(); #if !SANITIZER_GO // Re-exec ourselves if we need to set additional env or command line args. MaybeReexec(); InitializeAllocator(); ReplaceSystemMalloc(); #endif if (common_flags()->detect_deadlocks) ctx->dd = DDetector::Create(flags()); Processor *proc = ProcCreate(); ProcWire(proc, thr); InitializeInterceptors(); CheckShadowMapping(); InitializePlatform(); InitializeMutex(); InitializeDynamicAnnotations(); #if !SANITIZER_GO InitializeShadowMemory(); InitializeAllocatorLate(); #endif // Setup correct file descriptor for error reports. __sanitizer_set_report_path(common_flags()->log_path); InitializeSuppressions(); #if !SANITIZER_GO InitializeLibIgnore(); Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); // On MIPS, TSan initialization is run before // __pthread_initialize_minimal_internal() is finished, so we can not spawn // new threads. #ifndef __mips__ StartBackgroundThread(); SetSandboxingCallback(StopBackgroundThread); #endif #endif VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", (int)internal_getpid()); // Initialize thread 0. int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); #if TSAN_CONTAINS_UBSAN __ubsan::InitAsPlugin(); #endif ctx->initialized = true; #if !SANITIZER_GO Symbolizer::LateInitialize(); #endif if (flags()->stop_on_start) { Printf("ThreadSanitizer is suspended at startup (pid %d)." " Call __tsan_resume().\n", (int)internal_getpid()); while (__tsan_resumed == 0) {} } OnInitialize(); } int Finalize(ThreadState *thr) { bool failed = false; if (common_flags()->print_module_map == 1) PrintModuleMap(); if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) SleepForMillis(flags()->atexit_sleep_ms); // Wait for pending reports. ctx->report_mtx.Lock(); CommonSanitizerReportMutex.Lock(); CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); #if !SANITIZER_GO if (Verbosity()) AllocatorPrintStats(); #endif ThreadFinalize(thr); if (ctx->nreported) { failed = true; #if !SANITIZER_GO Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); #else Printf("Found %d data race(s)\n", ctx->nreported); #endif } if (ctx->nmissed_expected) { failed = true; Printf("ThreadSanitizer: missed %d expected races\n", ctx->nmissed_expected); } if (common_flags()->print_suppressions) PrintMatchedSuppressions(); #if !SANITIZER_GO if (flags()->print_benign) PrintMatchedBenignRaces(); #endif failed = OnFinalize(failed); #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); StatOutput(ctx->stat); #endif return failed ? common_flags()->exitcode : 0; } #if !SANITIZER_GO void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); } void ForkParentAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } void ForkChildAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); uptr nthread = 0; ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); VPrintf(1, "ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread); if (nthread == 1) { StartBackgroundThread(); } else { // We've just forked a multi-threaded process. We cannot reasonably function // after that (some mutexes may be locked before fork). So just enable // ignores for everything in the hope that we will exec soon. ctx->after_multithreaded_fork = true; thr->ignore_interceptors++; ThreadIgnoreBegin(thr, pc); ThreadIgnoreSyncBegin(thr, pc); } } #endif #if SANITIZER_GO NOINLINE void GrowShadowStack(ThreadState *thr) { const int sz = thr->shadow_stack_end - thr->shadow_stack; const int newsz = 2 * sz; uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, newsz * sizeof(uptr)); internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); internal_free(thr->shadow_stack); thr->shadow_stack = newstack; thr->shadow_stack_pos = newstack + sz; thr->shadow_stack_end = newstack + newsz; } #endif u32 CurrentStackId(ThreadState *thr, uptr pc) { if (!thr->is_inited) // May happen during bootstrap. return 0; if (pc != 0) { #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) GrowShadowStack(thr); #endif thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } u32 id = StackDepotPut( StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); if (pc != 0) thr->shadow_stack_pos--; return id; } void TraceSwitch(ThreadState *thr) { thr->nomalloc++; Trace *thr_trace = ThreadTrace(thr->tid); Lock l(&thr_trace->mtx); unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); TraceHeader *hdr = &thr_trace->headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); ObtainCurrentStack(thr, 0, &hdr->stack0); hdr->mset0 = thr->mset; thr->nomalloc--; } Trace *ThreadTrace(int tid) { return (Trace*)GetThreadTraceHeader(tid); } uptr TraceTopPC(ThreadState *thr) { Event *events = (Event*)GetThreadTrace(thr->tid); uptr pc = events[thr->fast_state.GetTracePos()]; return pc; } uptr TraceSize() { return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); } uptr TraceParts() { return TraceSize() / kTracePartSize; } #if !SANITIZER_GO extern "C" void __tsan_trace_switch() { TraceSwitch(cur_thread()); } extern "C" void __tsan_report_race() { ReportRace(cur_thread()); } #endif ALWAYS_INLINE Shadow LoadShadow(u64 *p) { u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); return Shadow(raw); } ALWAYS_INLINE void StoreShadow(u64 *sp, u64 s) { atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); } ALWAYS_INLINE void StoreIfNotYetStored(u64 *sp, u64 *s) { StoreShadow(sp, *s); *s = 0; } ALWAYS_INLINE void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) { thr->racy_state[0] = cur.raw(); thr->racy_state[1] = old.raw(); thr->racy_shadow_addr = shadow_mem; #if !SANITIZER_GO HACKY_CALL(__tsan_report_race); #else ReportRace(thr); #endif } static inline bool HappensBefore(Shadow old, ThreadState *thr) { return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); } ALWAYS_INLINE void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); // This potentially can live in an MMX/SSE scratch register. // The required intrinsics are: // __m128i _mm_move_epi64(__m128i*); // _mm_storel_epi64(u64*, __m128i); u64 store_word = cur.raw(); // scan all the shadow values and dispatch to 4 categories: // same, replace, candidate and race (see comments below). // we consider only 3 cases regarding access sizes: // equal, intersect and not intersect. initially I considered // larger and smaller as well, it allowed to replace some // 'candidates' with 'same' or 'replace', but I think // it's just not worth it (performance- and complexity-wise). Shadow old(0); // It release mode we manually unroll the loop, // because empirically gcc generates better code this way. // However, we can't afford unrolling in debug mode, because the function // consumes almost 4K of stack. Gtest gives only 4K of stack to death test // threads, which is not enough for the unrolled loop. #if SANITIZER_DEBUG for (int idx = 0; idx < 4; idx++) { #include "tsan_update_shadow_word_inl.h" } #else int idx = 0; #include "tsan_update_shadow_word_inl.h" idx = 1; #include "tsan_update_shadow_word_inl.h" idx = 2; #include "tsan_update_shadow_word_inl.h" idx = 3; #include "tsan_update_shadow_word_inl.h" #endif // we did not find any races and had already stored // the current access info, so we are done if (LIKELY(store_word == 0)) return; // choose a random candidate slot and replace it StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); StatInc(thr, StatShadowReplace); return; RACE: HandleRace(thr, shadow_mem, cur, old); return; } void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, int size, bool kAccessIsWrite, bool kIsAtomic) { while (size) { int size1 = 1; int kAccessSizeLog = kSizeLog1; if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { size1 = 8; kAccessSizeLog = kSizeLog8; } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { size1 = 4; kAccessSizeLog = kSizeLog4; } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { size1 = 2; kAccessSizeLog = kSizeLog2; } MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); addr += size1; size -= size1; } } ALWAYS_INLINE bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { Shadow cur(a); for (uptr i = 0; i < kShadowCnt; i++) { Shadow old(LoadShadow(&s[i])); if (Shadow::Addr0AndSizeAreEqual(cur, old) && old.TidWithIgnore() == cur.TidWithIgnore() && old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() && old.IsRead() <= cur.IsRead()) return true; } return false; } #if defined(__SSE3__) #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) ALWAYS_INLINE bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { // This is an optimized version of ContainsSameAccessSlow. // load current access into access[0:63] const m128 access = _mm_cvtsi64_si128(a); // duplicate high part of access in addr0: // addr0[0:31] = access[32:63] // addr0[32:63] = access[32:63] // addr0[64:95] = access[32:63] // addr0[96:127] = access[32:63] const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); // load 4 shadow slots const m128 shadow0 = _mm_load_si128((__m128i*)s); const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); // load high parts of 4 shadow slots into addr_vect: // addr_vect[0:31] = shadow0[32:63] // addr_vect[32:63] = shadow0[96:127] // addr_vect[64:95] = shadow1[32:63] // addr_vect[96:127] = shadow1[96:127] m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); if (!is_write) { // set IsRead bit in addr_vect const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); addr_vect = _mm_or_si128(addr_vect, rw_mask); } // addr0 == addr_vect? const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); // epoch1[0:63] = sync_epoch const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); // epoch[0:31] = sync_epoch[0:31] // epoch[32:63] = sync_epoch[0:31] // epoch[64:95] = sync_epoch[0:31] // epoch[96:127] = sync_epoch[0:31] const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); // load low parts of shadow cell epochs into epoch_vect: // epoch_vect[0:31] = shadow0[0:31] // epoch_vect[32:63] = shadow0[64:95] // epoch_vect[64:95] = shadow1[0:31] // epoch_vect[96:127] = shadow1[64:95] const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); // epoch_vect >= sync_epoch? const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); // addr_res & epoch_res const m128 res = _mm_and_si128(addr_res, epoch_res); // mask[0] = res[7] // mask[1] = res[15] // ... // mask[15] = res[127] const int mask = _mm_movemask_epi8(res); return mask != 0; } #endif ALWAYS_INLINE bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { #if defined(__SSE3__) bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); // NOTE: this check can fail if the shadow is concurrently mutated // by other threads. But it still can be useful if you modify // ContainsSameAccessFast and want to ensure that it's not completely broken. // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); return res; #else return ContainsSameAccessSlow(s, a, sync_epoch, is_write); #endif } ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { u64 *shadow_mem = (u64*)MemToShadow(addr); DPrintf2("#%d: MemoryAccess: @%p %p size=%d" " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", (int)thr->fast_state.tid(), (void*)pc, (void*)addr, (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2], (uptr)shadow_mem[3]); #if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } if (!IsShadowMem((uptr)shadow_mem)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); DCHECK(IsShadowMem((uptr)shadow_mem)); } #endif if (!SANITIZER_GO && *shadow_mem == kShadowRodata) { // Access to .rodata section, no races here. // Measurements show that it can be 10-20% of all memory accesses. StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopRodata); return; } FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopIgnored); return; } Shadow cur(fast_state); cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); cur.SetWrite(kAccessIsWrite); cur.SetAtomic(kIsAtomic); if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite))) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } if (kCollectHistory) { fast_state.IncrementEpoch(); thr->fast_state = fast_state; TraceAddEvent(thr, fast_state, EventTypeMop, pc); cur.IncrementEpoch(); } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } // Called by MemoryAccessRange in tsan_rtl_thread.cc ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite))) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, u64 val) { (void)thr; (void)pc; if (size == 0) return; // FIXME: fix me. uptr offset = addr % kShadowCell; if (offset) { offset = kShadowCell - offset; if (size <= offset) return; addr += offset; size -= offset; } DCHECK_EQ(addr % 8, 0); // If a user passes some insane arguments (memset(0)), // let it just crash as usual. if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) return; // Don't want to touch lots of shadow memory. // If a program maps 10MB stack, there is no need reset the whole range. size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); - // UnmapOrDie/MmapFixedNoReserve does not work on Windows, - // so we do it only for C/C++. - if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) { + // UnmapOrDie/MmapFixedNoReserve does not work on Windows. + if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { u64 *p = (u64*)MemToShadow(addr); CHECK(IsShadowMem((uptr)p)); CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); // FIXME: may overwrite a part outside the region for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { p[i++] = val; for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0; } } else { // The region is big, reset only beginning and end. const uptr kPageSize = GetPageSizeCached(); u64 *begin = (u64*)MemToShadow(addr); u64 *end = begin + size / kShadowCell * kShadowCnt; u64 *p = begin; // Set at least first kPageSize/2 to page boundary. while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } // Reset middle part. u64 *p1 = p; p = RoundDown(end, kPageSize); UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); // Set the ending. while (p < end) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } } } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { MemoryRangeSet(thr, pc, addr, size, 0); } void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { // Processing more than 1k (4k of shadow) is expensive, // can cause excessive memory consumption (user does not necessary touch // the whole range) and most likely unnecessary. if (size > 1024) size = 1024; CHECK_EQ(thr->is_freeing, false); thr->is_freeing = true; MemoryAccessRange(thr, pc, addr, size, true); thr->is_freeing = false; if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.MarkAsFreed(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } ALWAYS_INLINE USED void FuncEntry(ThreadState *thr, uptr pc) { StatInc(thr, StatFuncEnter); DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); } // Shadow stack maintenance can be replaced with // stack unwinding during trace switch (which presumably must be faster). DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) GrowShadowStack(thr); #endif thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } ALWAYS_INLINE USED void FuncExit(ThreadState *thr) { StatInc(thr, StatFuncExit); DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); } DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); #if !SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #endif thr->shadow_stack_pos--; } void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); thr->ignore_reads_and_writes++; CHECK_GT(thr->ignore_reads_and_writes, 0); thr->fast_state.SetIgnoreBit(); #if !SANITIZER_GO if (save_stack && !ctx->after_multithreaded_fork) thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); CHECK_GT(thr->ignore_reads_and_writes, 0); thr->ignore_reads_and_writes--; if (thr->ignore_reads_and_writes == 0) { thr->fast_state.ClearIgnoreBit(); #if !SANITIZER_GO thr->mop_ignore_set.Reset(); #endif } } #if !SANITIZER_GO extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __tsan_testonly_shadow_stack_current_size() { ThreadState *thr = cur_thread(); return thr->shadow_stack_pos - thr->shadow_stack; } #endif void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); thr->ignore_sync++; CHECK_GT(thr->ignore_sync, 0); #if !SANITIZER_GO if (save_stack && !ctx->after_multithreaded_fork) thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); CHECK_GT(thr->ignore_sync, 0); thr->ignore_sync--; #if !SANITIZER_GO if (thr->ignore_sync == 0) thr->sync_ignore_set.Reset(); #endif } bool MD5Hash::operator==(const MD5Hash &other) const { return hash[0] == other.hash[0] && hash[1] == other.hash[1]; } #if SANITIZER_DEBUG void build_consistency_debug() {} #else void build_consistency_release() {} #endif #if TSAN_COLLECT_STATS void build_consistency_stats() {} #else void build_consistency_nostats() {} #endif } // namespace __tsan #if !SANITIZER_GO // Must be included in this file to make sure everything is inlined. #include "tsan_interface_inl.h" #endif Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h (revision 317687) @@ -1,828 +1,852 @@ //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // // Main internal TSan header file. // // Ground rules: // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static // function-scope locals) // - All functions/classes/etc reside in namespace __tsan, except for those // declared in tsan_interface.h. // - Platform-specific files should be used instead of ifdefs (*). // - No system headers included in header files (*). // - Platform specific headres included only into platform-specific files (*). // // (*) Except when inlining is critical for performance. //===----------------------------------------------------------------------===// #ifndef TSAN_RTL_H #define TSAN_RTL_H #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_asm.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" #include "sanitizer_common/sanitizer_libignore.h" #include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_thread_registry.h" #include "tsan_clock.h" #include "tsan_defs.h" #include "tsan_flags.h" #include "tsan_sync.h" #include "tsan_trace.h" #include "tsan_vector.h" #include "tsan_report.h" #include "tsan_platform.h" #include "tsan_mutexset.h" #include "tsan_ignoreset.h" #include "tsan_stack_trace.h" #if SANITIZER_WORDSIZE != 64 # error "ThreadSanitizer is supported only on 64-bit platforms" #endif namespace __tsan { #if !SANITIZER_GO struct MapUnmapCallback; #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) static const uptr kAllocatorSpace = 0; static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kAllocatorRegionSizeLog = 20; static const uptr kAllocatorNumRegions = kAllocatorSize >> kAllocatorRegionSizeLog; typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12, MapUnmapCallback> ByteMap; typedef SizeClassAllocator32 PrimaryAllocator; #else struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = Mapping::kHeapMemBeg; static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg; static const uptr kMetadataSize = 0; typedef DefaultSizeClassMap SizeClassMap; typedef __tsan::MapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator64 PrimaryAllocator; #endif typedef SizeClassAllocatorLocalCache AllocatorCache; typedef LargeMmapAllocator SecondaryAllocator; typedef CombinedAllocator Allocator; Allocator *allocator(); #endif void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); const u64 kShadowRodata = (u64)-1; // .rodata shadow marker // FastState (from most significant bit): // ignore : 1 // tid : kTidBits // unused : - // history_size : 3 // epoch : kClkBits class FastState { public: FastState(u64 tid, u64 epoch) { x_ = tid << kTidShift; x_ |= epoch; DCHECK_EQ(tid, this->tid()); DCHECK_EQ(epoch, this->epoch()); DCHECK_EQ(GetIgnoreBit(), false); } explicit FastState(u64 x) : x_(x) { } u64 raw() const { return x_; } u64 tid() const { u64 res = (x_ & ~kIgnoreBit) >> kTidShift; return res; } u64 TidWithIgnore() const { u64 res = x_ >> kTidShift; return res; } u64 epoch() const { u64 res = x_ & ((1ull << kClkBits) - 1); return res; } void IncrementEpoch() { u64 old_epoch = epoch(); x_ += 1; DCHECK_EQ(old_epoch + 1, epoch()); (void)old_epoch; } void SetIgnoreBit() { x_ |= kIgnoreBit; } void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } bool GetIgnoreBit() const { return (s64)x_ < 0; } void SetHistorySize(int hs) { CHECK_GE(hs, 0); CHECK_LE(hs, 7); x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift); } ALWAYS_INLINE int GetHistorySize() const { return (int)((x_ >> kHistoryShift) & kHistoryMask); } void ClearHistorySize() { SetHistorySize(0); } ALWAYS_INLINE u64 GetTracePos() const { const int hs = GetHistorySize(); // When hs == 0, the trace consists of 2 parts. const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; return epoch() & mask; } private: friend class Shadow; static const int kTidShift = 64 - kTidBits - 1; static const u64 kIgnoreBit = 1ull << 63; static const u64 kFreedBit = 1ull << 63; static const u64 kHistoryShift = kClkBits; static const u64 kHistoryMask = 7; u64 x_; }; // Shadow (from most significant bit): // freed : 1 // tid : kTidBits // is_atomic : 1 // is_read : 1 // size_log : 2 // addr0 : 3 // epoch : kClkBits class Shadow : public FastState { public: explicit Shadow(u64 x) : FastState(x) { } explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); } void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { DCHECK_EQ((x_ >> kClkBits) & 31, 0); DCHECK_LE(addr0, 7); DCHECK_LE(kAccessSizeLog, 3); x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits; DCHECK_EQ(kAccessSizeLog, size_log()); DCHECK_EQ(addr0, this->addr0()); } void SetWrite(unsigned kAccessIsWrite) { DCHECK_EQ(x_ & kReadBit, 0); if (!kAccessIsWrite) x_ |= kReadBit; DCHECK_EQ(kAccessIsWrite, IsWrite()); } void SetAtomic(bool kIsAtomic) { DCHECK(!IsAtomic()); if (kIsAtomic) x_ |= kAtomicBit; DCHECK_EQ(IsAtomic(), kIsAtomic); } bool IsAtomic() const { return x_ & kAtomicBit; } bool IsZero() const { return x_ == 0; } static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); return shifted_xor == 0; } static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31; return masked_xor == 0; } static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2, unsigned kS2AccessSize) { bool res = false; u64 diff = s1.addr0() - s2.addr0(); if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT // if (s1.addr0() + size1) > s2.addr0()) return true; if (s1.size() > -diff) res = true; } else { // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; if (kS2AccessSize > diff) res = true; } DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2)); DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1)); return res; } u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; } u64 ALWAYS_INLINE size() const { return 1ull << size_log(); } bool ALWAYS_INLINE IsWrite() const { return !IsRead(); } bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; } // The idea behind the freed bit is as follows. // When the memory is freed (or otherwise unaccessible) we write to the shadow // values with tid/epoch related to the free and the freed bit set. // During memory accesses processing the freed bit is considered // as msb of tid. So any access races with shadow with freed bit set // (it is as if write from a thread with which we never synchronized before). // This allows us to detect accesses to freed memory w/o additional // overheads in memory access processing and at the same time restore // tid/epoch of free. void MarkAsFreed() { x_ |= kFreedBit; } bool IsFreed() const { return x_ & kFreedBit; } bool GetFreedAndReset() { bool res = x_ & kFreedBit; x_ &= ~kFreedBit; return res; } bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) | (u64(kIsAtomic) << kAtomicShift)); DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); return v; } bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); return v; } bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); return v; } private: static const u64 kReadShift = 5 + kClkBits; static const u64 kReadBit = 1ull << kReadShift; static const u64 kAtomicShift = 6 + kClkBits; static const u64 kAtomicBit = 1ull << kAtomicShift; u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; } static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) { if (s1.addr0() == s2.addr0()) return true; if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) return true; if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) return true; return false; } }; struct ThreadSignalContext; struct JmpBuf { uptr sp; uptr mangled_sp; int int_signal_send; bool in_blocking_func; uptr in_signal_handler; uptr *shadow_stack_pos; }; // A Processor represents a physical thread, or a P for Go. // It is used to store internal resources like allocate cache, and does not // participate in race-detection logic (invisible to end user). // In C++ it is tied to an OS thread just like ThreadState, however ideally // it should be tied to a CPU (this way we will have fewer allocator caches). // In Go it is tied to a P, so there are significantly fewer Processor's than // ThreadState's (which are tied to Gs). // A ThreadState must be wired with a Processor to handle events. struct Processor { ThreadState *thr; // currently wired thread, or nullptr #if !SANITIZER_GO AllocatorCache alloc_cache; InternalAllocatorCache internal_alloc_cache; #endif DenseSlabAllocCache block_cache; DenseSlabAllocCache sync_cache; DenseSlabAllocCache clock_cache; DDPhysicalThread *dd_pt; }; #if !SANITIZER_GO // ScopedGlobalProcessor temporary setups a global processor for the current // thread, if it does not have one. Intended for interceptors that can run // at the very thread end, when we already destroyed the thread processor. struct ScopedGlobalProcessor { ScopedGlobalProcessor(); ~ScopedGlobalProcessor(); }; #endif // This struct is stored in TLS. struct ThreadState { FastState fast_state; // Synch epoch represents the threads's epoch before the last synchronization // action. It allows to reduce number of shadow state updates. // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, // if we are processing write to X from the same thread at epoch=200, // we do nothing, because both writes happen in the same 'synch epoch'. // That is, if another memory access does not race with the former write, // it does not race with the latter as well. // QUESTION: can we can squeeze this into ThreadState::Fast? // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are // taken by epoch between synchs. // This way we can save one load from tls. u64 fast_synch_epoch; // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. // We do not distinguish beteween ignoring reads and writes // for better performance. int ignore_reads_and_writes; int ignore_sync; int suppress_reports; // Go does not support ignores. #if !SANITIZER_GO IgnoreSet mop_ignore_set; IgnoreSet sync_ignore_set; #endif // C/C++ uses fixed size shadow stack embed into Trace. // Go uses malloc-allocated shadow stack with dynamic size. uptr *shadow_stack; uptr *shadow_stack_end; uptr *shadow_stack_pos; u64 *racy_shadow_addr; u64 racy_state[2]; MutexSet mset; ThreadClock clock; #if !SANITIZER_GO Vector jmp_bufs; int ignore_interceptors; #endif #if TSAN_COLLECT_STATS u64 stat[StatCnt]; #endif const int tid; const int unique_id; bool in_symbolizer; bool in_ignored_lib; bool is_inited; bool is_dead; bool is_freeing; bool is_vptr_access; - uptr external_tag; const uptr stk_addr; const uptr stk_size; const uptr tls_addr; const uptr tls_size; ThreadContext *tctx; #if SANITIZER_DEBUG && !SANITIZER_GO InternalDeadlockDetector internal_deadlock_detector; #endif DDLogicalThread *dd_lt; // Current wired Processor, or nullptr. Required to handle any events. Processor *proc1; #if !SANITIZER_GO Processor *proc() { return proc1; } #else Processor *proc(); #endif atomic_uintptr_t in_signal_handler; ThreadSignalContext *signal_ctx; #if !SANITIZER_GO u32 last_sleep_stack_id; ThreadClock last_sleep_clock; #endif // Set in regions of runtime that must be signal-safe and fork-safe. // If set, malloc must not be called. int nomalloc; const ReportDesc *current_report; explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, unsigned reuse_count, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size); }; #if !SANITIZER_GO #if SANITIZER_MAC || SANITIZER_ANDROID ThreadState *cur_thread(); void cur_thread_finalize(); #else __attribute__((tls_model("initial-exec"))) extern THREADLOCAL char cur_thread_placeholder[]; INLINE ThreadState *cur_thread() { return reinterpret_cast(&cur_thread_placeholder); } INLINE void cur_thread_finalize() { } #endif // SANITIZER_MAC || SANITIZER_ANDROID #endif // SANITIZER_GO class ThreadContext : public ThreadContextBase { public: explicit ThreadContext(int tid); ~ThreadContext(); ThreadState *thr; u32 creation_stack_id; SyncClock sync; // Epoch at which the thread had started. // If we see an event from the thread stamped by an older epoch, // the event is from a dead thread that shared tid with this thread. u64 epoch0; u64 epoch1; // Override superclass callbacks. void OnDead() override; void OnJoined(void *arg) override; void OnFinished() override; void OnStarted(void *arg) override; void OnCreated(void *arg) override; void OnReset() override; void OnDetached(void *arg) override; }; struct RacyStacks { MD5Hash hash[2]; bool operator==(const RacyStacks &other) const { if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) return true; if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) return true; return false; } }; struct RacyAddress { uptr addr_min; uptr addr_max; }; struct FiredSuppression { ReportType type; uptr pc_or_addr; Suppression *supp; }; struct Context { Context(); bool initialized; bool after_multithreaded_fork; MetaMap metamap; Mutex report_mtx; int nreported; int nmissed_expected; atomic_uint64_t last_symbolize_time_ns; void *background_thread; atomic_uint32_t stop_background_thread; ThreadRegistry *thread_registry; Mutex racy_mtx; Vector racy_stacks; Vector racy_addresses; // Number of fired suppressions may be large enough. Mutex fired_suppressions_mtx; InternalMmapVector fired_suppressions; DDetector *dd; ClockAlloc clock_alloc; Flags flags; u64 stat[StatCnt]; u64 int_alloc_cnt[MBlockTypeCount]; u64 int_alloc_siz[MBlockTypeCount]; }; extern Context *ctx; // The one and the only global runtime context. ALWAYS_INLINE Flags *flags() { return &ctx->flags; } struct ScopedIgnoreInterceptors { ScopedIgnoreInterceptors() { #if !SANITIZER_GO cur_thread()->ignore_interceptors++; #endif } ~ScopedIgnoreInterceptors() { #if !SANITIZER_GO cur_thread()->ignore_interceptors--; #endif } }; +enum ExternalTag : uptr { + kExternalTagNone = 0, + kExternalTagFirstUserAvailable = 1, + kExternalTagMax = 1024, + // Don't set kExternalTagMax over 65,536, since MBlock only stores tags + // as 16-bit values, see tsan_defs.h. +}; +const char *GetObjectTypeFromTag(uptr tag); +uptr TagFromShadowStackFrame(uptr pc); + class ScopedReport { public: explicit ScopedReport(ReportType typ); ~ScopedReport(); void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack, const MutexSet *mset); void AddStack(StackTrace stack, bool suppressable = false); void AddThread(const ThreadContext *tctx, bool suppressable = false); void AddThread(int unique_tid, bool suppressable = false); void AddUniqueTid(int unique_tid); void AddMutex(const SyncVar *s); u64 AddMutex(u64 id); void AddLocation(uptr addr, uptr size); void AddSleep(u32 stack_id); void SetCount(int count); const ReportDesc *GetReport() const; private: ReportDesc *rep_; // Symbolizer makes lots of intercepted calls. If we try to process them, // at best it will cause deadlocks on internal mutexes. ScopedIgnoreInterceptors ignore_interceptors_; void AddDeadMutex(u64 id); ScopedReport(const ScopedReport&); void operator = (const ScopedReport&); }; ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack); void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, - MutexSet *mset); + MutexSet *mset, uptr *tag = nullptr); +// The stack could look like: +// |
| | tag | +// This will extract the tag and keep: +// |
| | template -void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { +void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) { + if (stack->size < 2) return; + uptr possible_tag_pc = stack->trace[stack->size - 2]; + uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc); + if (possible_tag == kExternalTagNone) return; + stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1]; + stack->size -= 1; + if (tag) *tag = possible_tag; +} + +template +void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack, + uptr *tag = nullptr) { uptr size = thr->shadow_stack_pos - thr->shadow_stack; uptr start = 0; if (size + !!toppc > kStackTraceMax) { start = size + !!toppc - kStackTraceMax; size = kStackTraceMax - !!toppc; } stack->Init(&thr->shadow_stack[start], size, toppc); + ExtractTagFromStack(stack, tag); } #if TSAN_COLLECT_STATS void StatAggregate(u64 *dst, u64 *src); void StatOutput(u64 *stat); #endif void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { #if TSAN_COLLECT_STATS thr->stat[typ] += n; #endif } void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { #if TSAN_COLLECT_STATS thr->stat[typ] = n; #endif } void MapShadow(uptr addr, uptr size); void MapThreadTrace(uptr addr, uptr size, const char *name); void DontNeedShadowFor(uptr addr, uptr size); void InitializeShadowMemory(); void InitializeInterceptors(); void InitializeLibIgnore(); void InitializeDynamicAnnotations(); void ForkBefore(ThreadState *thr, uptr pc); void ForkParentAfter(ThreadState *thr, uptr pc); void ForkChildAfter(ThreadState *thr, uptr pc); void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); -const char *GetObjectTypeFromTag(uptr tag); - #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 # define DPrintf Printf #else # define DPrintf(...) #endif #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 # define DPrintf2 Printf #else # define DPrintf2(...) #endif u32 CurrentStackId(ThreadState *thr, uptr pc); ReportStack *SymbolizeStackId(u32 stack_id); void PrintCurrentStack(ThreadState *thr, uptr pc); void PrintCurrentStackSlow(uptr pc); // uses libunwind void Initialize(ThreadState *thr); int Finalize(ThreadState *thr); void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); void MemoryAccessImpl(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur); void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size, bool is_write); void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, uptr size, uptr step, bool is_write); void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, int size, bool kAccessIsWrite, bool kIsAtomic); const int kSizeLog1 = 0; const int kSizeLog2 = 1; const int kSizeLog4 = 2; const int kSizeLog8 = 3; void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); } void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); } void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); } void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true); void ThreadIgnoreEnd(ThreadState *thr, uptr pc); void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true); void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); void FuncEntry(ThreadState *thr, uptr pc); void FuncExit(ThreadState *thr); int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread); void ThreadFinish(ThreadState *thr); int ThreadTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); void ThreadDetach(ThreadState *thr, uptr pc, int tid); void ThreadFinalize(ThreadState *thr); void ThreadSetName(ThreadState *thr, const char *name); int ThreadCount(ThreadState *thr); void ProcessPendingSignals(ThreadState *thr); Processor *ProcCreate(); void ProcDestroy(Processor *proc); void ProcWire(Processor *proc, ThreadState *thr); void ProcUnwire(Processor *proc, ThreadState *thr); // Note: the parameter is called flagz, because flags is already taken // by the global function that returns flags. void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); -void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0, int rec = 1); int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr); void Acquire(ThreadState *thr, uptr pc, uptr addr); // AcquireGlobal synchronizes the current thread with all other threads. // In terms of happens-before relation, it draws a HB edge from all threads // (where they happen to execute right now) to the current thread. We use it to // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal // right before executing finalizers. This provides a coarse, but simple // approximation of the actual required synchronization. void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); void AfterSleep(ThreadState *thr, uptr pc); void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); // The hacky call uses custom calling convention and an assembly thunk. // It is considerably faster that a normal call for the caller // if it is not executed (it is intended for slow paths from hot functions). // The trick is that the call preserves all registers and the compiler // does not treat it as a call. // If it does not work for you, use normal call. #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC // The caller may not create the stack frame for itself at all, // so we create a reserve stack frame for it (1024b must be enough). #define HACKY_CALL(f) \ __asm__ __volatile__("sub $1024, %%rsp;" \ CFI_INL_ADJUST_CFA_OFFSET(1024) \ ".hidden " #f "_thunk;" \ "call " #f "_thunk;" \ "add $1024, %%rsp;" \ CFI_INL_ADJUST_CFA_OFFSET(-1024) \ ::: "memory", "cc"); #else #define HACKY_CALL(f) f() #endif void TraceSwitch(ThreadState *thr); uptr TraceTopPC(ThreadState *thr); uptr TraceSize(); uptr TraceParts(); Trace *ThreadTrace(int tid); extern "C" void __tsan_trace_switch(); void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, EventType typ, u64 addr) { if (!kCollectHistory) return; DCHECK_GE((int)typ, 0); DCHECK_LE((int)typ, 7); DCHECK_EQ(GetLsb(addr, 61), addr); StatInc(thr, StatEvents); u64 pos = fs.GetTracePos(); if (UNLIKELY((pos % kTracePartSize) == 0)) { #if !SANITIZER_GO HACKY_CALL(__tsan_trace_switch); #else TraceSwitch(thr); #endif } Event *trace = (Event*)GetThreadTrace(fs.tid()); Event *evp = &trace[pos]; Event ev = (u64)addr | ((u64)typ << 61); *evp = ev; } #if !SANITIZER_GO uptr ALWAYS_INLINE HeapEnd() { return HeapMemEnd() + PrimaryAllocator::AdditionalSize(); } #endif } // namespace __tsan #endif // TSAN_RTL_H Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc (revision 317687) @@ -1,538 +1,538 @@ //===-- tsan_rtl_mutex.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include #include #include "tsan_rtl.h" #include "tsan_flags.h" #include "tsan_sync.h" #include "tsan_report.h" #include "tsan_symbolize.h" #include "tsan_platform.h" namespace __tsan { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r); struct Callback : DDCallback { ThreadState *thr; uptr pc; Callback(ThreadState *thr, uptr pc) : thr(thr) , pc(pc) { DDCallback::pt = thr->proc()->dd_pt; DDCallback::lt = thr->dd_lt; } u32 Unwind() override { return CurrentStackId(thr, pc); } int UniqueTid() override { return thr->unique_id; } }; void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) { Callback cb(thr, pc); ctx->dd->MutexInit(&cb, &s->dd); s->dd.ctx = s->GetId(); } static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, uptr addr, u64 mid) { // In Go, these misuses are either impossible, or detected by std lib, // or false positives (e.g. unlock in a different thread). if (SANITIZER_GO) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(typ); rep.AddMutex(mid); VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); } void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexCreate); if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->SetFlags(flagz & MutexCreationFlagMask); if (!SANITIZER_GO && s->creation_stack_id == 0) s->creation_stack_id = CurrentStackId(thr, pc); s->mtx.Unlock(); } -void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); StatInc(thr, StatMutexDestroy); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s == 0) return; - if (s->IsFlagSet(MutexFlagLinkerInit)) { + if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) { // Destroy is no-op for linker-initialized mutexes. s->mtx.Unlock(); return; } if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ctx->dd->MutexDestroy(&cb, &s->dd); ctx->dd->MutexInit(&cb, &s->dd); } bool unlock_locked = false; if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); unlock_locked = true; } u64 mid = s->GetId(); u32 last_lock = s->last_lock; if (!unlock_locked) s->Reset(thr->proc()); // must not reset it before the report is printed s->mtx.Unlock(); if (unlock_locked) { ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(mid); VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); rep.AddStack(trace); FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s != 0) { s->Reset(thr->proc()); s->mtx.Unlock(); } } thr->mset.Remove(mid); // Imitate a memory write to catch unlock-destroy races. // Do this outside of sync mutex, because it can report a race which locks // sync mutexes. if (IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } // s will be destroyed and freed in MetaMap::FreeBlock. } void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); if (s->owner_tid != thr->tid) { Callback cb(thr, pc); ctx->dd->MutexBeforeLock(&cb, &s->dd, true); s->mtx.ReadUnlock(); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } else { s->mtx.ReadUnlock(); } } } void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", thr->tid, addr, flagz, rec); if (flagz & MutexFlagRecursiveLock) CHECK_GT(rec, 0); else rec = 1; if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); bool report_double_lock = false; if (s->owner_tid == SyncVar::kInvalidTid) { CHECK_EQ(s->recursion, 0); s->owner_tid = thr->tid; s->last_lock = thr->fast_state.raw(); } else if (s->owner_tid == thr->tid) { CHECK_GT(s->recursion, 0); } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_double_lock = true; } const bool first = s->recursion == 0; s->recursion += rec; if (first) { StatInc(thr, StatMutexLock); AcquireImpl(thr, pc, &s->clock); AcquireImpl(thr, pc, &s->read_clock); } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) { StatInc(thr, StatMutexRecLock); } thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); bool pre_lock = false; if (first && common_flags()->detect_deadlocks) { pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); Callback cb(thr, pc); if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, true); ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. s = 0; if (report_double_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); if (first && pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); int rec = 0; bool report_bad_unlock = false; if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } else { rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1; s->recursion -= rec; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; ReleaseStoreImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } } thr->mset.Del(s->GetId(), true); if (common_flags()->detect_deadlocks && s->recursion == 0 && !report_bad_unlock) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks && !report_bad_unlock) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } return rec; } void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); Callback cb(thr, pc); ctx->dd->MutexBeforeLock(&cb, &s->dd, false); s->mtx.ReadUnlock(); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); bool report_bad_lock = false; if (s->owner_tid != SyncVar::kInvalidTid) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_lock = true; } } AcquireImpl(thr, pc, &s->clock); s->last_lock = thr->fast_state.raw(); thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); bool pre_lock = false; if (common_flags()->detect_deadlocks) { pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); Callback cb(thr, pc); if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, false); ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.ReadUnlock(); // Can't touch s after this point. s = 0; if (report_bad_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); if (pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadUnlock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); bool report_bad_unlock = false; if (s->owner_tid != SyncVar::kInvalidTid) { if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } ReleaseImpl(thr, pc, &s->read_clock); if (common_flags()->detect_deadlocks && s->recursion == 0) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. thr->mset.Del(mid, false); if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid); if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); bool write = true; bool report_bad_unlock = false; if (s->owner_tid == SyncVar::kInvalidTid) { // Seems to be read unlock. write = false; StatInc(thr, StatMutexReadUnlock); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); ReleaseImpl(thr, pc, &s->read_clock); } else if (s->owner_tid == thr->tid) { // Seems to be write unlock. thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); CHECK_GT(s->recursion, 0); s->recursion--; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; ReleaseImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } } else if (!s->IsFlagSet(MutexFlagBroken)) { s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } thr->mset.Del(s->GetId(), write); if (common_flags()->detect_deadlocks && s->recursion == 0) { Callback cb(thr, pc); ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = SyncVar::kInvalidTid; s->recursion = 0; s->mtx.Unlock(); } void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); u64 mid = s->GetId(); s->mtx.Unlock(); ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid); } void Acquire(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false); if (!s) return; AcquireImpl(thr, pc, &s->clock); s->mtx.ReadUnlock(); } static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast(arg); ThreadContext *tctx = static_cast(tctx_base); if (tctx->status == ThreadStatusRunning) thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch()); else thr->clock.set(tctx->tid, tctx->epoch1); } void AcquireGlobal(ThreadState *thr, uptr pc) { DPrintf("#%d: AcquireGlobal\n", thr->tid); if (thr->ignore_sync) return; ThreadRegistryLock l(ctx->thread_registry); ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateClockCallback, thr); } void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseImpl(thr, pc, &s->clock); s->mtx.Unlock(); } void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreImpl(thr, pc, &s->clock); s->mtx.Unlock(); } #if !SANITIZER_GO static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast(arg); ThreadContext *tctx = static_cast(tctx_base); if (tctx->status == ThreadStatusRunning) thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch()); else thr->last_sleep_clock.set(tctx->tid, tctx->epoch1); } void AfterSleep(ThreadState *thr, uptr pc) { DPrintf("#%d: AfterSleep %zx\n", thr->tid); if (thr->ignore_sync) return; thr->last_sleep_stack_id = CurrentStackId(thr, pc); ThreadRegistryLock l(ctx->thread_registry); ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateSleepClockCallback, thr); } #endif void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->clock.acquire(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncAcquire); } void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncRelease); } void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.ReleaseStore(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncRelease); } void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.acq_rel(&thr->proc()->clock_cache, c); StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncRelease); } void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeDeadlock); for (int i = 0; i < r->n; i++) { rep.AddMutex(r->loop[i].mtx_ctx0); rep.AddUniqueTid((int)r->loop[i].thr_ctx); rep.AddThread((int)r->loop[i].thr_ctx); } uptr dummy_pc = 0x42; for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; if (stk && stk != 0xffffffff) { rep.AddStack(StackDepotGet(stk), true); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), // but we should still produce some stack trace in the report. rep.AddStack(StackTrace(&dummy_pc, 1), true); } } } OutputReport(thr, rep); } } // namespace __tsan Index: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc (revision 317687) @@ -1,725 +1,729 @@ //===-- tsan_rtl_report.cc ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_suppressions.h" #include "tsan_symbolize.h" #include "tsan_report.h" #include "tsan_sync.h" #include "tsan_mman.h" #include "tsan_flags.h" #include "tsan_fd.h" namespace __tsan { using namespace __sanitizer; // NOLINT static ReportStack *SymbolizeStack(StackTrace trace); void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { // There is high probability that interceptors will check-fail as well, // on the other hand there is no sense in processing interceptors // since we are going to die soon. ScopedIgnoreInterceptors ignore; #if !SANITIZER_GO cur_thread()->ignore_sync++; cur_thread()->ignore_reads_and_writes++; #endif Printf("FATAL: ThreadSanitizer CHECK failed: " "%s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line, cond, (uptr)v1, (uptr)v2); PrintCurrentStackSlow(StackTrace::GetCurrentPc()); Die(); } // Can be overriden by an application/test to intercept reports. #ifdef TSAN_EXTERNAL_HOOKS bool OnReport(const ReportDesc *rep, bool suppressed); #else SANITIZER_WEAK_CXX_DEFAULT_IMPL bool OnReport(const ReportDesc *rep, bool suppressed) { (void)rep; return suppressed; } #endif SANITIZER_WEAK_DEFAULT_IMPL void __tsan_on_report(const ReportDesc *rep) { (void)rep; } static void StackStripMain(SymbolizedStack *frames) { SymbolizedStack *last_frame = nullptr; SymbolizedStack *last_frame2 = nullptr; for (SymbolizedStack *cur = frames; cur; cur = cur->next) { last_frame2 = last_frame; last_frame = cur; } if (last_frame2 == 0) return; #if !SANITIZER_GO const char *last = last_frame->info.function; const char *last2 = last_frame2->info.function; // Strip frame above 'main' if (last2 && 0 == internal_strcmp(last2, "main")) { last_frame->ClearAll(); last_frame2->next = nullptr; // Strip our internal thread start routine. } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { last_frame->ClearAll(); last_frame2->next = nullptr; // Strip global ctors init. } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { last_frame->ClearAll(); last_frame2->next = nullptr; // If both are 0, then we probably just failed to symbolize. } else if (last || last2) { // Ensure that we recovered stack completely. Trimmed stack // can actually happen if we do not instrument some code, // so it's only a debug print. However we must try hard to not miss it // due to our fault. DPrintf("Bottom stack frame is missed\n"); } #else // The last frame always point into runtime (gosched0, goexit0, runtime.main). last_frame->ClearAll(); last_frame2->next = nullptr; #endif } ReportStack *SymbolizeStackId(u32 stack_id) { if (stack_id == 0) return 0; StackTrace stack = StackDepotGet(stack_id); if (stack.trace == nullptr) return nullptr; return SymbolizeStack(stack); } static ReportStack *SymbolizeStack(StackTrace trace) { if (trace.size == 0) return 0; SymbolizedStack *top = nullptr; for (uptr si = 0; si < trace.size; si++) { const uptr pc = trace.trace[si]; uptr pc1 = pc; // We obtain the return address, but we're interested in the previous // instruction. if ((pc & kExternalPCBit) == 0) pc1 = StackTrace::GetPreviousInstructionPc(pc); SymbolizedStack *ent = SymbolizeCode(pc1); CHECK_NE(ent, 0); SymbolizedStack *last = ent; while (last->next) { last->info.address = pc; // restore original pc for report last = last->next; } last->info.address = pc; // restore original pc for report last->next = top; top = ent; } StackStripMain(top); ReportStack *stack = ReportStack::New(); stack->frames = top; return stack; } ScopedReport::ScopedReport(ReportType typ) { ctx->thread_registry->CheckLocked(); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); rep_ = new(mem) ReportDesc; rep_->typ = typ; ctx->report_mtx.Lock(); CommonSanitizerReportMutex.Lock(); } ScopedReport::~ScopedReport() { CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); DestroyAndFree(rep_); } void ScopedReport::AddStack(StackTrace stack, bool suppressable) { ReportStack **rs = rep_->stacks.PushBack(); *rs = SymbolizeStack(stack); (*rs)->suppressable = suppressable; } void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack, const MutexSet *mset) { void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); ReportMop *mop = new(mem) ReportMop; rep_->mops.PushBack(mop); mop->tid = s.tid(); mop->addr = addr + s.addr0(); mop->size = s.size(); mop->write = s.IsWrite(); mop->atomic = s.IsAtomic(); mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; if (mop->stack) mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); u64 mid = this->AddMutex(d.id); ReportMopMutex mtx = {mid, d.write}; mop->mset.PushBack(mtx); } } void ScopedReport::AddUniqueTid(int unique_tid) { rep_->unique_tids.PushBack(unique_tid); } void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { for (uptr i = 0; i < rep_->threads.Size(); i++) { if ((u32)rep_->threads[i]->id == tctx->tid) return; } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); ReportThread *rt = new(mem) ReportThread; rep_->threads.PushBack(rt); rt->id = tctx->tid; rt->os_id = tctx->os_id; rt->running = (tctx->status == ThreadStatusRunning); rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->workerthread = tctx->workerthread; rt->stack = 0; rt->stack = SymbolizeStackId(tctx->creation_stack_id); if (rt->stack) rt->stack->suppressable = suppressable; } #if !SANITIZER_GO static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { int unique_id = *(int *)arg; return tctx->unique_id == (u32)unique_id; } static ThreadContext *FindThreadByUidLocked(int unique_id) { ctx->thread_registry->CheckLocked(); return static_cast( ctx->thread_registry->FindThreadContextLocked( FindThreadByUidLockedCallback, &unique_id)); } static ThreadContext *FindThreadByTidLocked(int tid) { ctx->thread_registry->CheckLocked(); return static_cast( ctx->thread_registry->GetThreadLocked(tid)); } static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { uptr addr = (uptr)arg; ThreadContext *tctx = static_cast(tctx_base); if (tctx->status != ThreadStatusRunning) return false; ThreadState *thr = tctx->thr; CHECK(thr); return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); } ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { ctx->thread_registry->CheckLocked(); ThreadContext *tctx = static_cast( ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, (void*)addr)); if (!tctx) return 0; ThreadState *thr = tctx->thr; CHECK(thr); *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); return tctx; } #endif void ScopedReport::AddThread(int unique_tid, bool suppressable) { #if !SANITIZER_GO if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) AddThread(tctx, suppressable); #endif } void ScopedReport::AddMutex(const SyncVar *s) { for (uptr i = 0; i < rep_->mutexes.Size(); i++) { if (rep_->mutexes[i]->id == s->uid) return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = s->uid; rm->addr = s->addr; rm->destroyed = false; rm->stack = SymbolizeStackId(s->creation_stack_id); } u64 ScopedReport::AddMutex(u64 id) { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); // Check that the mutex is still alive. // Another mutex can be created at the same address, // so check uid as well. if (s && s->CheckId(uid)) { mid = s->uid; AddMutex(s); } else { AddDeadMutex(id); } if (s) s->mtx.Unlock(); return mid; } void ScopedReport::AddDeadMutex(u64 id) { for (uptr i = 0; i < rep_->mutexes.Size(); i++) { if (rep_->mutexes[i]->id == id) return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = id; rm->addr = 0; rm->destroyed = true; rm->stack = 0; } void ScopedReport::AddLocation(uptr addr, uptr size) { if (addr == 0) return; #if !SANITIZER_GO int fd = -1; int creat_tid = -1; u32 creat_stack = 0; if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { ReportLocation *loc = ReportLocation::New(ReportLocationFD); loc->fd = fd; loc->tid = creat_tid; loc->stack = SymbolizeStackId(creat_stack); rep_->locs.PushBack(loc); ThreadContext *tctx = FindThreadByUidLocked(creat_tid); if (tctx) AddThread(tctx); return; } MBlock *b = 0; Allocator *a = allocator(); if (a->PointerIsMine((void*)addr)) { void *block_begin = a->GetBlockBegin((void*)addr); if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin); } if (b != 0) { ThreadContext *tctx = FindThreadByTidLocked(b->tid); ReportLocation *loc = ReportLocation::New(ReportLocationHeap); loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = tctx ? tctx->tid : b->tid; loc->stack = SymbolizeStackId(b->stk); rep_->locs.PushBack(loc); if (tctx) AddThread(tctx); return; } bool is_stack = false; if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { ReportLocation *loc = ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); loc->tid = tctx->tid; rep_->locs.PushBack(loc); AddThread(tctx); } #endif if (ReportLocation *loc = SymbolizeData(addr)) { loc->suppressable = true; rep_->locs.PushBack(loc); return; } } #if !SANITIZER_GO void ScopedReport::AddSleep(u32 stack_id) { rep_->sleep = SymbolizeStackId(stack_id); } #endif void ScopedReport::SetCount(int count) { rep_->count = count; } const ReportDesc *ScopedReport::GetReport() const { return rep_; } void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, - MutexSet *mset) { + MutexSet *mset, uptr *tag) { // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. Trace* trace = ThreadTrace(tid); ReadLock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) return; CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); const u64 epoch0 = RoundDown(epoch, TraceSize()); const u64 eend = epoch % TraceSize(); const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); Vector stack(MBlockReportStack); stack.Resize(hdr->stack0.size + 64); for (uptr i = 0; i < hdr->stack0.size; i++) { stack[i] = hdr->stack0.trace[i]; DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); } if (mset) *mset = hdr->mset0; uptr pos = hdr->stack0.size; Event *events = (Event*)GetThreadTrace(tid); for (uptr i = ebegin; i <= eend; i++) { Event ev = events[i]; EventType typ = (EventType)(ev >> 61); uptr pc = (uptr)(ev & ((1ull << 61) - 1)); DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { if (stack.Size() < pos + 2) stack.Resize(pos + 2); stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { if (pos > 0) pos--; } if (mset) { if (typ == EventTypeLock) { mset->Add(pc, true, epoch0 + i); } else if (typ == EventTypeUnlock) { mset->Del(pc, true); } else if (typ == EventTypeRLock) { mset->Add(pc, false, epoch0 + i); } else if (typ == EventTypeRUnlock) { mset->Del(pc, false); } } for (uptr j = 0; j <= pos; j++) DPrintf2(" #%zu: %zx\n", j, stack[j]); } if (pos == 0 && stack[0] == 0) return; pos++; stk->Init(&stack[0], pos); + ExtractTagFromStack(stk, tag); } static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; bool equal_address = false; RacyAddress ra0 = {addr_min, addr_max}; { ReadLock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { if (hash == ctx->racy_stacks[i]) { VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n"); equal_stack = true; break; } } } if (flags()->suppress_equal_addresses) { for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { RacyAddress ra2 = ctx->racy_addresses[i]; uptr maxbeg = max(ra0.addr_min, ra2.addr_min); uptr minend = min(ra0.addr_max, ra2.addr_max); if (maxbeg < minend) { VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); equal_address = true; break; } } } } if (!equal_stack && !equal_address) return false; if (!equal_stack) { Lock lock(&ctx->racy_mtx); ctx->racy_stacks.PushBack(hash); } if (!equal_address) { Lock lock(&ctx->racy_mtx); ctx->racy_addresses.PushBack(ra0); } return true; } static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { Lock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { RacyStacks hash; hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); ctx->racy_stacks.PushBack(hash); } if (flags()->suppress_equal_addresses) { RacyAddress ra0 = {addr_min, addr_max}; ctx->racy_addresses.PushBack(ra0); } } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { if (!flags()->report_bugs || thr->suppress_reports) return false; atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); thr->current_report = rep; Suppression *supp = 0; uptr pc_or_addr = 0; for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); if (pc_or_addr != 0) { Lock lock(&ctx->fired_suppressions_mtx); FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; ctx->fired_suppressions.push_back(s); } { bool old_is_freeing = thr->is_freeing; thr->is_freeing = false; bool suppressed = OnReport(rep, pc_or_addr != 0); thr->is_freeing = old_is_freeing; if (suppressed) { thr->current_report = nullptr; return false; } } PrintReport(rep); __tsan_on_report(rep); ctx->nreported++; if (flags()->halt_on_error) Die(); thr->current_report = nullptr; return true; } bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { if (ctx->fired_suppressions[k].type != type) continue; for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; if (trace.trace[j] == s->pc_or_addr) { if (s->supp) atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } } return false; } static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { if (ctx->fired_suppressions[k].type != type) continue; FiredSuppression *s = &ctx->fired_suppressions[k]; if (addr == s->pc_or_addr) { if (s->supp) atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } return false; } static bool RaceBetweenAtomicAndFree(ThreadState *thr) { Shadow s0(thr->racy_state[0]); Shadow s1(thr->racy_state[1]); CHECK(!(s0.IsAtomic() && s1.IsAtomic())); if (!s0.IsAtomic() && !s1.IsAtomic()) return true; if (s0.IsAtomic() && s1.IsFreed()) return true; if (s1.IsAtomic() && thr->is_freeing) return true; return false; } void ReportRace(ThreadState *thr) { CheckNoLocks(thr); // Symbolizer makes lots of intercepted calls. If we try to process them, // at best it will cause deadlocks on internal mutexes. ScopedIgnoreInterceptors ignore; if (!flags()->report_bugs) return; if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) return; bool freed = false; { Shadow s(thr->racy_state[1]); freed = s.GetFreedAndReset(); thr->racy_state[1] = s.raw(); } uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); uptr addr_min = 0; uptr addr_max = 0; { uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); addr_min = min(a0, a1); addr_max = max(e0, e1); if (IsExpectedReport(addr_min, addr_max - addr_min)) return; } ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) typ = ReportTypeVptrUseAfterFree; else if (thr->is_vptr_access) typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; - else if (thr->external_tag > 0) - typ = ReportTypeExternalRace; if (IsFiredSuppression(ctx, typ, addr)) return; const uptr kMop = 2; VarSizeStackTrace traces[kMop]; + uptr tags[kMop] = {kExternalTagNone}; const uptr toppc = TraceTopPC(thr); - ObtainCurrentStack(thr, toppc, &traces[0]); + ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); if (IsFiredSuppression(ctx, typ, traces[0])) return; // MutexSet is too large to live on stack. Vector mset_buffer(MBlockScopedBuf); mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); Shadow s2(thr->racy_state[1]); - RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2); + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); if (IsFiredSuppression(ctx, typ, traces[1])) return; if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; + // If any of the two accesses has a tag, treat this as an "external" race. + if (tags[0] != kExternalTagNone || tags[1] != kExternalTagNone) + typ = ReportTypeExternalRace; + ThreadRegistryLock l0(ctx->thread_registry); ScopedReport rep(typ); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, thr->external_tag, s, traces[i], + rep.AddMemoryAccess(addr, tags[i], s, traces[i], i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { FastState s(thr->racy_state[i]); ThreadContext *tctx = static_cast( ctx->thread_registry->GetThreadLocked(s.tid())); if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) continue; rep.AddThread(tctx); } rep.AddLocation(addr_min, addr_max - addr_min); #if !SANITIZER_GO { // NOLINT Shadow s(thr->racy_state[1]); if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) rep.AddSleep(thr->last_sleep_stack_id); } #endif if (!OutputReport(thr, rep)) return; AddRacyStacks(thr, traces, addr_min, addr_max); } void PrintCurrentStack(ThreadState *thr, uptr pc) { VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); PrintStack(SymbolizeStack(trace)); } // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes // __sanitizer_print_stack_trace exists in the actual unwinded stack, but // tail-call to PrintCurrentStackSlow breaks this assumption because // __sanitizer_print_stack_trace disappears after tail-call. // However, this solution is not reliable enough, please see dvyukov's comment // http://reviews.llvm.org/D19148#406208 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. ALWAYS_INLINE void PrintCurrentStackSlow(uptr pc) { #if !SANITIZER_GO BufferedStackTrace *ptrace = new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) BufferedStackTrace(); ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); for (uptr i = 0; i < ptrace->size / 2; i++) { uptr tmp = ptrace->trace_buffer[i]; ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; ptrace->trace_buffer[ptrace->size - i - 1] = tmp; } PrintStack(SymbolizeStack(*ptrace)); #endif } } // namespace __tsan using namespace __tsan; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() { PrintCurrentStackSlow(StackTrace::GetCurrentPc()); } } // extern "C" Index: vendor/compiler-rt/dist/lib/tsan/tests/rtl/tsan_posix.cc =================================================================== --- vendor/compiler-rt/dist/lib/tsan/tests/rtl/tsan_posix.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/tsan/tests/rtl/tsan_posix.cc (revision 317687) @@ -1,154 +1,155 @@ //===-- tsan_posix.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_interface.h" #include "tsan_posix_util.h" #include "tsan_test_util.h" #include "gtest/gtest.h" #include struct thread_key { pthread_key_t key; pthread_mutex_t *mtx; int val; int *cnt; thread_key(pthread_key_t key, pthread_mutex_t *mtx, int val, int *cnt) : key(key) , mtx(mtx) , val(val) , cnt(cnt) { } }; static void thread_secific_dtor(void *v) { thread_key *k = (thread_key *)v; EXPECT_EQ(__interceptor_pthread_mutex_lock(k->mtx), 0); (*k->cnt)++; __tsan_write4(&k->cnt); EXPECT_EQ(__interceptor_pthread_mutex_unlock(k->mtx), 0); if (k->val == 42) { // Okay. } else if (k->val == 43 || k->val == 44) { k->val--; EXPECT_EQ(pthread_setspecific(k->key, k), 0); } else { ASSERT_TRUE(false); } } static void *dtors_thread(void *p) { thread_key *k = (thread_key *)p; EXPECT_EQ(pthread_setspecific(k->key, k), 0); return 0; } TEST(Posix, ThreadSpecificDtors) { int cnt = 0; pthread_key_t key; EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0); pthread_mutex_t mtx; EXPECT_EQ(__interceptor_pthread_mutex_init(&mtx, 0), 0); pthread_t th[3]; thread_key k1 = thread_key(key, &mtx, 42, &cnt); thread_key k2 = thread_key(key, &mtx, 43, &cnt); thread_key k3 = thread_key(key, &mtx, 44, &cnt); EXPECT_EQ(__interceptor_pthread_create(&th[0], 0, dtors_thread, &k1), 0); EXPECT_EQ(__interceptor_pthread_create(&th[1], 0, dtors_thread, &k2), 0); EXPECT_EQ(__interceptor_pthread_join(th[0], 0), 0); EXPECT_EQ(__interceptor_pthread_create(&th[2], 0, dtors_thread, &k3), 0); EXPECT_EQ(__interceptor_pthread_join(th[1], 0), 0); EXPECT_EQ(__interceptor_pthread_join(th[2], 0), 0); EXPECT_EQ(pthread_key_delete(key), 0); EXPECT_EQ(6, cnt); } #if !defined(__aarch64__) && !defined(__APPLE__) static __thread int local_var; static void *local_thread(void *p) { __tsan_write1(&local_var); __tsan_write1(&p); if (p == 0) return 0; const int kThreads = 4; pthread_t th[kThreads]; for (int i = 0; i < kThreads; i++) EXPECT_EQ(__interceptor_pthread_create(&th[i], 0, local_thread, (void*)((long)p - 1)), 0); // NOLINT for (int i = 0; i < kThreads; i++) EXPECT_EQ(__interceptor_pthread_join(th[i], 0), 0); return 0; } #endif TEST(Posix, ThreadLocalAccesses) { // The test is failing with high thread count for aarch64. // FIXME: track down the issue and re-enable the test. // On Darwin, we're running unit tests without interceptors and __thread is -// using malloc and free, which causes false data race reports. -#if !defined(__aarch64__) && !defined(__APPLE__) +// using malloc and free, which causes false data race reports. On rare +// occasions on powerpc64le this test also fails. +#if !defined(__aarch64__) && !defined(__APPLE__) && !defined(powerpc64le) local_thread((void*)2); #endif } struct CondContext { pthread_mutex_t m; pthread_cond_t c; int data; }; static void *cond_thread(void *p) { CondContext &ctx = *static_cast(p); EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0); EXPECT_EQ(ctx.data, 0); ctx.data = 1; EXPECT_EQ(__interceptor_pthread_cond_signal(&ctx.c), 0); EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0); EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0); while (ctx.data != 2) EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0); EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0); EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0); ctx.data = 3; EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0); EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0); return 0; } TEST(Posix, CondBasic) { CondContext ctx; EXPECT_EQ(__interceptor_pthread_mutex_init(&ctx.m, 0), 0); EXPECT_EQ(__interceptor_pthread_cond_init(&ctx.c, 0), 0); ctx.data = 0; pthread_t th; EXPECT_EQ(__interceptor_pthread_create(&th, 0, cond_thread, &ctx), 0); EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0); while (ctx.data != 1) EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0); ctx.data = 2; EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0); EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0); EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0); while (ctx.data != 3) EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0); EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0); EXPECT_EQ(__interceptor_pthread_join(th, 0), 0); EXPECT_EQ(__interceptor_pthread_cond_destroy(&ctx.c), 0); EXPECT_EQ(__interceptor_pthread_mutex_destroy(&ctx.m), 0); } Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.cc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.cc (revision 317687) @@ -1,428 +1,429 @@ //===-- ubsan_diag.cc -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Diagnostic reporting for the UBSan runtime. // //===----------------------------------------------------------------------===// #include "ubsan_platform.h" #if CAN_SANITIZE_UB #include "ubsan_diag.h" #include "ubsan_init.h" #include "ubsan_flags.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace_printer.h" #include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include using namespace __ubsan; static void MaybePrintStackTrace(uptr pc, uptr bp) { // We assume that flags are already parsed, as UBSan runtime // will definitely be called when we print the first diagnostics message. if (!flags()->print_stacktrace) return; - // We can only use slow unwind, as we don't have any information about stack - // top/bottom. - // FIXME: It's better to respect "fast_unwind_on_fatal" runtime flag and - // fetch stack top/bottom information if we have it (e.g. if we're running - // under ASan). - if (StackTrace::WillUseFastUnwind(false)) - return; + + uptr top = 0; + uptr bottom = 0; + bool request_fast_unwind = common_flags()->fast_unwind_on_fatal; + if (request_fast_unwind) + __sanitizer::GetThreadStackTopAndBottom(false, &top, &bottom); + BufferedStackTrace stack; - stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false); + stack.Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom, + request_fast_unwind); stack.Print(); } static const char *ConvertTypeToString(ErrorType Type) { switch (Type) { #define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \ case ErrorType::Name: \ return SummaryKind; #include "ubsan_checks.inc" #undef UBSAN_CHECK } UNREACHABLE("unknown ErrorType!"); } static const char *ConvertTypeToFlagName(ErrorType Type) { switch (Type) { #define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \ case ErrorType::Name: \ return FSanitizeFlagName; #include "ubsan_checks.inc" #undef UBSAN_CHECK } UNREACHABLE("unknown ErrorType!"); } static void MaybeReportErrorSummary(Location Loc, ErrorType Type) { if (!common_flags()->print_summary) return; if (!flags()->report_error_type) Type = ErrorType::GenericUB; const char *ErrorKind = ConvertTypeToString(Type); if (Loc.isSourceLocation()) { SourceLocation SLoc = Loc.getSourceLocation(); if (!SLoc.isInvalid()) { AddressInfo AI; AI.file = internal_strdup(SLoc.getFilename()); AI.line = SLoc.getLine(); AI.column = SLoc.getColumn(); AI.function = internal_strdup(""); // Avoid printing ?? as function name. ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName()); AI.Clear(); return; } } else if (Loc.isSymbolizedStack()) { const AddressInfo &AI = Loc.getSymbolizedStack()->info; ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName()); return; } ReportErrorSummary(ErrorKind, GetSanititizerToolName()); } namespace { class Decorator : public SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() {} const char *Highlight() const { return Green(); } const char *EndHighlight() const { return Default(); } const char *Note() const { return Black(); } const char *EndNote() const { return Default(); } }; } SymbolizedStack *__ubsan::getSymbolizedLocation(uptr PC) { InitAsStandaloneIfNecessary(); return Symbolizer::GetOrInit()->SymbolizePC(PC); } Diag &Diag::operator<<(const TypeDescriptor &V) { return AddArg(V.getTypeName()); } Diag &Diag::operator<<(const Value &V) { if (V.getType().isSignedIntegerTy()) AddArg(V.getSIntValue()); else if (V.getType().isUnsignedIntegerTy()) AddArg(V.getUIntValue()); else if (V.getType().isFloatTy()) AddArg(V.getFloatValue()); else AddArg(""); return *this; } /// Hexadecimal printing for numbers too large for Printf to handle directly. static void RenderHex(InternalScopedString *Buffer, UIntMax Val) { #if HAVE_INT128_T Buffer->append("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96), (unsigned int)(Val >> 64), (unsigned int)(Val >> 32), (unsigned int)(Val)); #else UNREACHABLE("long long smaller than 64 bits?"); #endif } static void RenderLocation(InternalScopedString *Buffer, Location Loc) { switch (Loc.getKind()) { case Location::LK_Source: { SourceLocation SLoc = Loc.getSourceLocation(); if (SLoc.isInvalid()) Buffer->append(""); else RenderSourceLocation(Buffer, SLoc.getFilename(), SLoc.getLine(), SLoc.getColumn(), common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix); return; } case Location::LK_Memory: Buffer->append("%p", Loc.getMemoryLocation()); return; case Location::LK_Symbolized: { const AddressInfo &Info = Loc.getSymbolizedStack()->info; if (Info.file) RenderSourceLocation(Buffer, Info.file, Info.line, Info.column, common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix); else if (Info.module) RenderModuleLocation(Buffer, Info.module, Info.module_offset, Info.module_arch, common_flags()->strip_path_prefix); else Buffer->append("%p", Info.address); return; } case Location::LK_Null: Buffer->append(""); return; } } static void RenderText(InternalScopedString *Buffer, const char *Message, const Diag::Arg *Args) { for (const char *Msg = Message; *Msg; ++Msg) { if (*Msg != '%') { Buffer->append("%c", *Msg); continue; } const Diag::Arg &A = Args[*++Msg - '0']; switch (A.Kind) { case Diag::AK_String: Buffer->append("%s", A.String); break; case Diag::AK_TypeName: { if (SANITIZER_WINDOWS) // The Windows implementation demangles names early. Buffer->append("'%s'", A.String); else Buffer->append("'%s'", Symbolizer::GetOrInit()->Demangle(A.String)); break; } case Diag::AK_SInt: // 'long long' is guaranteed to be at least 64 bits wide. if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX) Buffer->append("%lld", (long long)A.SInt); else RenderHex(Buffer, A.SInt); break; case Diag::AK_UInt: if (A.UInt <= UINT64_MAX) Buffer->append("%llu", (unsigned long long)A.UInt); else RenderHex(Buffer, A.UInt); break; case Diag::AK_Float: { // FIXME: Support floating-point formatting in sanitizer_common's // printf, and stop using snprintf here. char FloatBuffer[32]; #if SANITIZER_WINDOWS sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float); #else snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float); #endif Buffer->append("%s", FloatBuffer); break; } case Diag::AK_Pointer: Buffer->append("%p", A.Pointer); break; } } } /// Find the earliest-starting range in Ranges which ends after Loc. static Range *upperBound(MemoryLocation Loc, Range *Ranges, unsigned NumRanges) { Range *Best = 0; for (unsigned I = 0; I != NumRanges; ++I) if (Ranges[I].getEnd().getMemoryLocation() > Loc && (!Best || Best->getStart().getMemoryLocation() > Ranges[I].getStart().getMemoryLocation())) Best = &Ranges[I]; return Best; } static inline uptr subtractNoOverflow(uptr LHS, uptr RHS) { return (LHS < RHS) ? 0 : LHS - RHS; } static inline uptr addNoOverflow(uptr LHS, uptr RHS) { const uptr Limit = (uptr)-1; return (LHS > Limit - RHS) ? Limit : LHS + RHS; } /// Render a snippet of the address space near a location. static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc, Range *Ranges, unsigned NumRanges, const Diag::Arg *Args) { // Show at least the 8 bytes surrounding Loc. const unsigned MinBytesNearLoc = 4; MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc); MemoryLocation Max = addNoOverflow(Loc, MinBytesNearLoc); MemoryLocation OrigMin = Min; for (unsigned I = 0; I < NumRanges; ++I) { Min = __sanitizer::Min(Ranges[I].getStart().getMemoryLocation(), Min); Max = __sanitizer::Max(Ranges[I].getEnd().getMemoryLocation(), Max); } // If we have too many interesting bytes, prefer to show bytes after Loc. const unsigned BytesToShow = 32; if (Max - Min > BytesToShow) Min = __sanitizer::Min(Max - BytesToShow, OrigMin); Max = addNoOverflow(Min, BytesToShow); if (!IsAccessibleMemoryRange(Min, Max - Min)) { Printf("\n"); return; } // Emit data. InternalScopedString Buffer(1024); for (uptr P = Min; P != Max; ++P) { unsigned char C = *reinterpret_cast(P); Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C); } Buffer.append("\n"); // Emit highlights. Buffer.append(Decor.Highlight()); Range *InRange = upperBound(Min, Ranges, NumRanges); for (uptr P = Min; P != Max; ++P) { char Pad = ' ', Byte = ' '; if (InRange && InRange->getEnd().getMemoryLocation() == P) InRange = upperBound(P, Ranges, NumRanges); if (!InRange && P > Loc) break; if (InRange && InRange->getStart().getMemoryLocation() < P) Pad = '~'; if (InRange && InRange->getStart().getMemoryLocation() <= P) Byte = '~'; if (P % 8 == 0) Buffer.append("%c", Pad); Buffer.append("%c", Pad); Buffer.append("%c", P == Loc ? '^' : Byte); Buffer.append("%c", Byte); } Buffer.append("%s\n", Decor.EndHighlight()); // Go over the line again, and print names for the ranges. InRange = 0; unsigned Spaces = 0; for (uptr P = Min; P != Max; ++P) { if (!InRange || InRange->getEnd().getMemoryLocation() == P) InRange = upperBound(P, Ranges, NumRanges); if (!InRange) break; Spaces += (P % 8) == 0 ? 2 : 1; if (InRange && InRange->getStart().getMemoryLocation() == P) { while (Spaces--) Buffer.append(" "); RenderText(&Buffer, InRange->getText(), Args); Buffer.append("\n"); // FIXME: We only support naming one range for now! break; } Spaces += 2; } Printf("%s", Buffer.data()); // FIXME: Print names for anything we can identify within the line: // // * If we can identify the memory itself as belonging to a particular // global, stack variable, or dynamic allocation, then do so. // // * If we have a pointer-size, pointer-aligned range highlighted, // determine whether the value of that range is a pointer to an // entity which we can name, and if so, print that name. // // This needs an external symbolizer, or (preferably) ASan instrumentation. } Diag::~Diag() { // All diagnostics should be printed under report mutex. CommonSanitizerReportMutex.CheckLocked(); Decorator Decor; InternalScopedString Buffer(1024); Buffer.append(Decor.Bold()); RenderLocation(&Buffer, Loc); Buffer.append(":"); switch (Level) { case DL_Error: Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.EndWarning(), Decor.Bold()); break; case DL_Note: Buffer.append("%s note: %s", Decor.Note(), Decor.EndNote()); break; } RenderText(&Buffer, Message, Args); Buffer.append("%s\n", Decor.Default()); Printf("%s", Buffer.data()); if (Loc.isMemoryLocation()) PrintMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges, NumRanges, Args); } ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type) : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) { InitAsStandaloneIfNecessary(); CommonSanitizerReportMutex.Lock(); } ScopedReport::~ScopedReport() { MaybePrintStackTrace(Opts.pc, Opts.bp); MaybeReportErrorSummary(SummaryLoc, Type); CommonSanitizerReportMutex.Unlock(); if (flags()->halt_on_error) Die(); } ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; static SuppressionContext *suppression_ctx = nullptr; static const char kVptrCheck[] = "vptr_check"; static const char *kSuppressionTypes[] = { #define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) FSanitizeFlagName, #include "ubsan_checks.inc" #undef UBSAN_CHECK kVptrCheck, }; void __ubsan::InitializeSuppressions() { CHECK_EQ(nullptr, suppression_ctx); suppression_ctx = new (suppression_placeholder) // NOLINT SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); suppression_ctx->ParseFromFile(flags()->suppressions); } bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) { InitAsStandaloneIfNecessary(); CHECK(suppression_ctx); Suppression *s; return suppression_ctx->Match(TypeName, kVptrCheck, &s); } bool __ubsan::IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename) { InitAsStandaloneIfNecessary(); CHECK(suppression_ctx); const char *SuppType = ConvertTypeToFlagName(ET); // Fast path: don't symbolize PC if there is no suppressions for given UB // type. if (!suppression_ctx->HasSuppressionType(SuppType)) return false; Suppression *s = nullptr; // Suppress by file name known to runtime. if (Filename != nullptr && suppression_ctx->Match(Filename, SuppType, &s)) return true; // Suppress by module name. if (const char *Module = Symbolizer::GetOrInit()->GetModuleNameForPc(PC)) { if (suppression_ctx->Match(Module, SuppType, &s)) return true; } // Suppress by function or source file name from debug info. SymbolizedStackHolder Stack(Symbolizer::GetOrInit()->SymbolizePC(PC)); const AddressInfo &AI = Stack.get()->info; return suppression_ctx->Match(AI.function, SuppType, &s) || suppression_ctx->Match(AI.file, SuppType, &s); } #endif // CAN_SANITIZE_UB Index: vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc =================================================================== --- vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc (revision 317687) @@ -1,614 +1,614 @@ //===-- ubsan_handlers.cc -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Error logging entry points for the UBSan runtime. // //===----------------------------------------------------------------------===// #include "ubsan_platform.h" #if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" #include "sanitizer_common/sanitizer_common.h" using namespace __sanitizer; using namespace __ubsan; namespace __ubsan { bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) { // We are not allowed to skip error report: if we are in unrecoverable // handler, we have to terminate the program right now, and therefore // have to print some diagnostic. // // Even if source location is disabled, it doesn't mean that we have // already report an error to the user: some concurrently running // thread could have acquired it, but not yet printed the report. if (Opts.FromUnrecoverableHandler) return false; return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename()); } const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of", "upcast of", "cast to virtual base of", "_Nonnull binding to"}; } static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, ReportOptions Opts) { Location Loc = Data->Loc.acquire(); uptr Alignment = (uptr)1 << Data->LogAlignment; ErrorType ET; if (!Pointer) ET = ErrorType::NullPointerUse; else if (Pointer & (Alignment - 1)) ET = ErrorType::MisalignedPointerUse; else ET = ErrorType::InsufficientObjectSize; // Use the SourceLocation from Data to track deduplication, even if it's // invalid. if (ignoreReport(Loc.getSourceLocation(), Opts, ET)) return; SymbolizedStackHolder FallbackLoc; if (Data->Loc.isInvalid()) { FallbackLoc.reset(getCallerLocation(Opts.pc)); Loc = FallbackLoc; } ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::NullPointerUse: Diag(Loc, DL_Error, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; case ErrorType::MisalignedPointerUse: Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment << Data->Type; break; case ErrorType::InsufficientObjectSize: Diag(Loc, DL_Error, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type; break; default: UNREACHABLE("unexpected error type!"); } if (Pointer) Diag(Pointer, DL_Note, "pointer points here"); } void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(false); handleTypeMismatchImpl(Data, Pointer, Opts); } void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(true); handleTypeMismatchImpl(Data, Pointer, Opts); Die(); } /// \brief Common diagnostic emission for various forms of integer overflow. template static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, const char *Operator, T RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "%0 integer overflow: " "%1 %2 %3 cannot be represented in type %4") << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) << Operator << RHS << Data->Type; } #define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \ ValueHandle RHS) { \ GET_REPORT_OPTIONS(unrecoverable); \ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \ if (unrecoverable) \ Die(); \ } UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false) UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true) static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); bool IsSigned = Data->Type.isSignedIntegerTy(); ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow : ErrorType::UnsignedIntegerOverflow; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (IsSigned) Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") << Value(Data->Type, OldVal) << Data->Type; else Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1") << Value(Data->Type, OldVal) << Data->Type; } void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(false); handleNegateOverflowImpl(Data, OldVal, Opts); } void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data, ValueHandle OldVal) { GET_REPORT_OPTIONS(true); handleNegateOverflowImpl(Data, OldVal, Opts); Die(); } static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->Type, LHS); Value RHSVal(Data->Type, RHS); ErrorType ET; if (RHSVal.isMinusOne()) ET = ErrorType::SignedIntegerOverflow; else if (Data->Type.isIntegerTy()) ET = ErrorType::IntegerDivideByZero; else ET = ErrorType::FloatDivideByZero; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); switch (ET) { case ErrorType::SignedIntegerOverflow: Diag(Loc, DL_Error, "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; break; default: Diag(Loc, DL_Error, "division by zero"); break; } } void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleDivremOverflowImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleDivremOverflowImpl(Data, LHS, RHS, Opts); Die(); } static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); Value LHSVal(Data->LHSType, LHS); Value RHSVal(Data->RHSType, RHS); ErrorType ET; if (RHSVal.isNegative() || RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth()) ET = ErrorType::InvalidShiftExponent; else ET = ErrorType::InvalidShiftBase; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); if (ET == ErrorType::InvalidShiftExponent) { if (RHSVal.isNegative()) Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal; else Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2") << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; } else { if (LHSVal.isNegative()) Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal; else Diag(Loc, DL_Error, "left shift of %0 by %1 places cannot be represented in type %2") << LHSVal << RHSVal << Data->LHSType; } } void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(false); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); } void __ubsan::__ubsan_handle_shift_out_of_bounds_abort( ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) { GET_REPORT_OPTIONS(true); handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts); Die(); } static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::OutOfBoundsIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Value IndexVal(Data->IndexType, Index); Diag(Loc, DL_Error, "index %0 out of bounds for type %1") << IndexVal << Data->ArrayType; } void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(false); handleOutOfBoundsImpl(Data, Index, Opts); } void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, ValueHandle Index) { GET_REPORT_OPTIONS(true); handleOutOfBoundsImpl(Data, Index, Opts); Die(); } static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { ScopedReport R(Opts, Data->Loc, ErrorType::UnreachableCall); Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call"); } void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleBuiltinUnreachableImpl(Data, Opts); Die(); } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { ScopedReport R(Opts, Data->Loc, ErrorType::MissingReturn); Diag(Data->Loc, DL_Error, "execution reached the end of a value-returning function " "without returning a value"); } void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) { GET_REPORT_OPTIONS(true); handleMissingReturnImpl(Data, Opts); Die(); } static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::NonPositiveVLAIndex; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "variable length array bound evaluates to " "non-positive value %0") << Value(Data->Type, Bound); } void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(false); handleVLABoundNotPositive(Data, Bound, Opts); } void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data, ValueHandle Bound) { GET_REPORT_OPTIONS(true); handleVLABoundNotPositive(Data, Bound, Opts); Die(); } static bool looksLikeFloatCastOverflowDataV1(void *Data) { // First field is either a pointer to filename or a pointer to a // TypeDescriptor. u8 *FilenameOrTypeDescriptor; internal_memcpy(&FilenameOrTypeDescriptor, Data, sizeof(FilenameOrTypeDescriptor)); // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known, // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename, // adding two printable characters will not yield such a value. Otherwise, // if one of them is 0xff, this is most likely TK_Unknown type descriptor. u16 MaybeFromTypeKind = FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1]; return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff || FilenameOrTypeDescriptor[1] == 0xff; } static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, ReportOptions Opts) { SymbolizedStackHolder CallerLoc; Location Loc; const TypeDescriptor *FromType, *ToType; ErrorType ET = ErrorType::FloatCastOverflow; if (looksLikeFloatCastOverflowDataV1(DataPtr)) { auto Data = reinterpret_cast(DataPtr); CallerLoc.reset(getCallerLocation(Opts.pc)); Loc = CallerLoc; FromType = &Data->FromType; ToType = &Data->ToType; } else { auto Data = reinterpret_cast(DataPtr); SourceLocation SLoc = Data->Loc.acquire(); if (ignoreReport(SLoc, Opts, ET)) return; Loc = SLoc; FromType = &Data->FromType; ToType = &Data->ToType; } ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, - "value %0 is outside the range of representable values of type %2") + "%0 is outside the range of representable values of type %2") << Value(*FromType, From) << *FromType << *ToType; } void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(false); handleFloatCastOverflow(Data, From, Opts); } void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(true); handleFloatCastOverflow(Data, From, Opts); Die(); } static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, ReportOptions Opts) { SourceLocation Loc = Data->Loc.acquire(); // This check could be more precise if we used different handlers for // -fsanitize=bool and -fsanitize=enum. bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")); ErrorType ET = IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "load of value %0, which is not a valid value for type %1") << Value(Data->Type, Val) << Data->Type; } void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(false); handleLoadInvalidValue(Data, Val, Opts); } void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data, ValueHandle Val) { GET_REPORT_OPTIONS(true); handleLoadInvalidValue(Data, Val, Opts); Die(); } static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, ValueHandle Function, ReportOptions Opts) { SourceLocation CallLoc = Data->Loc.acquire(); ErrorType ET = ErrorType::FunctionTypeMismatch; if (ignoreReport(CallLoc, Opts, ET)) return; ScopedReport R(Opts, CallLoc, ET); SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(CallLoc, DL_Error, "call to function %0 through pointer to incorrect function type %1") << FName << Data->Type; Diag(FLoc, DL_Note, "%0 defined here") << FName; } void __ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(false); handleFunctionTypeMismatch(Data, Function, Opts); } void __ubsan::__ubsan_handle_function_type_mismatch_abort( FunctionTypeMismatchData *Data, ValueHandle Function) { GET_REPORT_OPTIONS(true); handleFunctionTypeMismatch(Data, Function, Opts); Die(); } static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidNullReturn; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "null pointer returned from function declared to never " "return null"); if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, "%0 specified here") << (IsAttr ? "returns_nonnull attribute" : "_Nonnull return type annotation"); } void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, Opts, true); } void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_return(NonNullReturnData *Data) { GET_REPORT_OPTIONS(false); handleNonNullReturn(Data, Opts, false); } void __ubsan::__ubsan_handle_nullability_return_abort(NonNullReturnData *Data) { GET_REPORT_OPTIONS(true); handleNonNullReturn(Data, Opts, false); Die(); } static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::InvalidNullArgument; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; if (!Data->AttrLoc.isInvalid()) Diag(Data->AttrLoc, DL_Note, "%0 specified here") << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation"); } void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, true); } void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, true); Die(); } void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) { GET_REPORT_OPTIONS(false); handleNonNullArg(Data, Opts, false); } void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) { GET_REPORT_OPTIONS(true); handleNonNullArg(Data, Opts, false); Die(); } static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ReportOptions Opts) { if (Data->CheckKind != CFITCK_ICall) Die(); SourceLocation Loc = Data->Loc.acquire(); ErrorType ET = ErrorType::CFIBadType; if (ignoreReport(Loc, Opts, ET)) return; ScopedReport R(Opts, Loc, ET); Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " "indirect function call") << Data->Type; SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; Diag(FLoc, DL_Note, "%0 defined here") << FName; } namespace __ubsan { #ifdef UBSAN_CAN_USE_CXXABI SANITIZER_WEAK_ATTRIBUTE void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); #else static void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } #endif } // namespace __ubsan void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(false); if (Data->CheckKind == CFITCK_ICall) handleCFIBadIcall(Data, Value, Opts); else HandleCFIBadType(Data, Value, ValidVtable, Opts); } void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(true); if (Data->CheckKind == CFITCK_ICall) handleCFIBadIcall(Data, Value, Opts); else HandleCFIBadType(Data, Value, ValidVtable, Opts); Die(); } #endif // CAN_SANITIZE_UB Index: vendor/compiler-rt/dist/lib/xray/xray_log_interface.cc =================================================================== --- vendor/compiler-rt/dist/lib/xray/xray_log_interface.cc (revision 317686) +++ vendor/compiler-rt/dist/lib/xray/xray_log_interface.cc (revision 317687) @@ -1,59 +1,69 @@ //===-- xray_log_interface.cc ---------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a function call tracing system. // //===----------------------------------------------------------------------===// #include "xray/xray_log_interface.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_mutex.h" #include "xray/xray_interface.h" #include "xray_defs.h" #include __sanitizer::SpinMutex XRayImplMutex; std::unique_ptr GlobalXRayImpl; void __xray_set_log_impl(XRayLogImpl Impl) XRAY_NEVER_INSTRUMENT { if (Impl.log_init == nullptr || Impl.log_finalize == nullptr || Impl.handle_arg0 == nullptr || Impl.flush_log == nullptr) { __sanitizer::SpinMutexLock Guard(&XRayImplMutex); GlobalXRayImpl.reset(); + __xray_remove_handler(); + __xray_remove_handler_arg1(); return; } __sanitizer::SpinMutexLock Guard(&XRayImplMutex); GlobalXRayImpl.reset(new XRayLogImpl); *GlobalXRayImpl = Impl; + __xray_set_handler(Impl.handle_arg0); +} + +void __xray_remove_log_impl() XRAY_NEVER_INSTRUMENT { + __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + GlobalXRayImpl.reset(); + __xray_remove_handler(); + __xray_remove_handler_arg1(); } XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers, void *Args, size_t ArgsSize) XRAY_NEVER_INSTRUMENT { __sanitizer::SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; return GlobalXRayImpl->log_init(BufferSize, MaxBuffers, Args, ArgsSize); } XRayLogInitStatus __xray_log_finalize() XRAY_NEVER_INSTRUMENT { __sanitizer::SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; return GlobalXRayImpl->log_finalize(); } XRayLogFlushStatus __xray_log_flushLog() XRAY_NEVER_INSTRUMENT { __sanitizer::SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; return GlobalXRayImpl->flush_log(); } Index: vendor/compiler-rt/dist/test/asan/CMakeLists.txt =================================================================== --- vendor/compiler-rt/dist/test/asan/CMakeLists.txt (revision 317686) +++ vendor/compiler-rt/dist/test/asan/CMakeLists.txt (revision 317687) @@ -1,122 +1,171 @@ set(ASAN_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(ASAN_TESTSUITES) set(ASAN_DYNAMIC_TESTSUITES) # FIXME: Shadow memory for 64-bit asan easily exhausts swap on most machines. # Find a way to make these tests pass reliably, and re-enable them. if(OS_NAME MATCHES "Windows" AND CMAKE_SIZEOF_VOID_P EQUAL 8) set(EXCLUDE_FROM_ALL TRUE) endif() macro(get_bits_for_arch arch bits) if (${arch} MATCHES "i386|i686|arm|mips|mipsel") set(${bits} 32) elseif (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|mips64|mips64el|s390x") set(${bits} 64) else() message(FATAL_ERROR "Unknown target architecture: ${arch}") endif() endmacro() set(ASAN_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND ASAN_TEST_DEPS asan) - if(WIN32 AND COMPILER_RT_HAS_LLD) + if(NOT APPLE AND COMPILER_RT_HAS_LLD) list(APPEND ASAN_TEST_DEPS lld ) endif() endif() set(ASAN_DYNAMIC_TEST_DEPS ${ASAN_TEST_DEPS}) set(ASAN_TEST_ARCH ${ASAN_SUPPORTED_ARCH}) if(APPLE) darwin_filter_host_archs(ASAN_SUPPORTED_ARCH ASAN_TEST_ARCH) endif() foreach(arch ${ASAN_TEST_ARCH}) if(ANDROID) set(ASAN_TEST_TARGET_ARCH ${arch}-android) else() set(ASAN_TEST_TARGET_ARCH ${arch}) endif() + + set(ASAN_TEST_IOS "0") + pythonize_bool(ASAN_TEST_IOS) + set(ASAN_TEST_IOSSIM "0") + pythonize_bool(ASAN_TEST_IOSSIM) + string(TOLOWER "-${arch}-${OS_NAME}" ASAN_TEST_CONFIG_SUFFIX) get_bits_for_arch(${arch} ASAN_TEST_BITS) get_test_cc_for_arch(${arch} ASAN_TEST_TARGET_CC ASAN_TEST_TARGET_CFLAGS) if(ANDROID) set(ASAN_TEST_DYNAMIC True) else() set(ASAN_TEST_DYNAMIC False) endif() string(TOUPPER ${arch} ARCH_UPPER_CASE) set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}Config) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg ) list(APPEND ASAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) string(TOLOWER "-${arch}-${OS_NAME}-dynamic" ASAN_TEST_CONFIG_SUFFIX) set(ASAN_TEST_DYNAMIC True) set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}DynamicConfig) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg) list(APPEND ASAN_DYNAMIC_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}) endif() endforeach() + +# iOS and iOS simulator test suites +# These are not added into "check-all", in order to run these tests, you have to +# manually call (from the build directory). They also require that an extra env +# variable to select which iOS device or simulator to use, e.g.: +# $ SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER=BBE44C1C-8AAA-4000-8D06-91C89ED58172 +# $ ./bin/llvm-lit ./projects/compiler-rt/test/asan/IOSSimI386Config +if(APPLE) + set(ASAN_TEST_TARGET_CC ${COMPILER_RT_TEST_COMPILER}) + set(ASAN_TEST_IOS "1") + pythonize_bool(ASAN_TEST_IOS) + set(ASAN_TEST_DYNAMIC True) + + foreach(arch ${DARWIN_iossim_ARCHS}) + set(ASAN_TEST_IOSSIM "1") + pythonize_bool(ASAN_TEST_IOSSIM) + set(ASAN_TEST_TARGET_ARCH ${arch}) + set(ASAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_iossim_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") + set(ASAN_TEST_CONFIG_SUFFIX "-${arch}-iossim") + get_bits_for_arch(${arch} ASAN_TEST_BITS) + string(TOUPPER ${arch} ARCH_UPPER_CASE) + set(CONFIG_NAME "IOSSim${ARCH_UPPER_CASE}Config") + configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg + ) + endforeach() + + foreach (arch ${DARWIN_ios_ARCHS}) + set(ASAN_TEST_IOSSIM "0") + pythonize_bool(ASAN_TEST_IOSSIM) + set(ASAN_TEST_TARGET_ARCH ${arch}) + set(ASAN_TEST_TARGET_CFLAGS "-arch ${arch} -isysroot ${DARWIN_ios_SYSROOT} ${COMPILER_RT_TEST_COMPILER_CFLAGS}") + set(ASAN_TEST_CONFIG_SUFFIX "-${arch}-ios") + get_bits_for_arch(${arch} ASAN_TEST_BITS) + string(TOUPPER ${arch} ARCH_UPPER_CASE) + set(CONFIG_NAME "IOS${ARCH_UPPER_CASE}Config") + configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_NAME}/lit.site.cfg + ) + endforeach() +endif() # Add unit tests. if(COMPILER_RT_INCLUDE_TESTS) set(ASAN_TEST_DYNAMIC False) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) set(ASAN_TEST_DYNAMIC True) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/Unit/dynamic/lit.site.cfg) endif() # FIXME: support unit test in the android test runner if (NOT ANDROID) list(APPEND ASAN_TEST_DEPS AsanUnitTests) list(APPEND ASAN_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/Unit) if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) list(APPEND ASAN_DYNAMIC_TEST_DEPS AsanDynamicUnitTests) list(APPEND ASAN_DYNAMIC_TESTSUITES ${CMAKE_CURRENT_BINARY_DIR}/Unit/dynamic) endif() endif() endif() add_lit_testsuite(check-asan "Running the AddressSanitizer tests" ${ASAN_TESTSUITES} DEPENDS ${ASAN_TEST_DEPS}) set_target_properties(check-asan PROPERTIES FOLDER "Compiler-RT Misc") if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME) # Add check-dynamic-asan target. It is a part of check-all only on Windows, # where we want to always test both dynamic and static runtime. if(NOT OS_NAME MATCHES "Windows") set(EXCLUDE_FROM_ALL TRUE) endif() add_lit_testsuite(check-asan-dynamic "Running the AddressSanitizer tests with dynamic runtime" ${ASAN_DYNAMIC_TESTSUITES} DEPENDS ${ASAN_DYNAMIC_TEST_DEPS}) set_target_properties(check-asan-dynamic PROPERTIES FOLDER "Compiler-RT Misc") if(NOT OS_NAME MATCHES "Windows") set(EXCLUDE_FROM_ALL FALSE) endif() endif() # Reset EXCLUDE_FROM_ALL to its initial value. # FIXME: Remove when we run Win64 asan tests. if(OS_NAME MATCHES "Windows" AND CMAKE_SIZEOF_VOID_P EQUAL 8) set(EXCLUDE_FROM_ALL FALSE) endif() Index: vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc (nonexistent) @@ -1,31 +0,0 @@ -// Test various levels of coverage -// -// RUN: %clangxx_asan -O1 -fsanitize-coverage=func %s -o %t -// RUN: %env_asan_opts=coverage=1:coverage_bitset=1:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1 -// RUN: %clangxx_asan -O1 -fsanitize-coverage=bb %s -o %t -// RUN: %env_asan_opts=coverage=1:coverage_bitset=1:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2 -// RUN: %clangxx_asan -O1 -fsanitize-coverage=edge %s -o %t -// RUN: %env_asan_opts=coverage=1:coverage_bitset=1:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3 -// RUN: %clangxx_asan -O1 -fsanitize-coverage=edge -mllvm -sanitizer-coverage-block-threshold=0 %s -o %t -// RUN: %env_asan_opts=coverage=1:coverage_bitset=1:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3 - -// RUN: %env_asan_opts=coverage=1:coverage_bitset=0:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3_NOBITSET -// RUN: %env_asan_opts=coverage=1:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3_NOBITSET -// RUN: %env_asan_opts=coverage=1:coverage_pcs=0:verbosity=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK3_NOPCS -// -// REQUIRES: asan-64-bits -// UNSUPPORTED: android -volatile int sink; -int main(int argc, char **argv) { - if (argc == 0) - sink = 0; -} - -// CHECK1: CovDump: bitset of 1 bits written for '{{.*}}', 1 bits are set -// CHECK1: 1 PCs written -// CHECK2: CovDump: bitset of 2 bits written for '{{.*}}', 1 bits are set -// CHECK2: 1 PCs written -// CHECK3: CovDump: bitset of 3 bits written for '{{.*}}', 2 bits are set -// CHECK3: 2 PCs written -// CHECK3_NOBITSET-NOT: bitset of -// CHECK3_NOPCS-NOT: PCs written Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dead-strip.c =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dead-strip.c (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dead-strip.c (revision 317687) @@ -1,22 +1,23 @@ // Test that AddressSanitizer does not re-animate dead globals when dead // stripping is turned on. // // This test verifies that an out-of-bounds access on a global variable is // detected after dead stripping has been performed. This proves that the // runtime is able to register globals in the __DATA,__asan_globals section. // REQUIRES: osx-ld64-live_support +// UNSUPPORTED: ios // RUN: %clang_asan -mmacosx-version-min=10.11 -Xlinker -dead_strip -o %t %s // RUN: llvm-nm -format=posix %t | FileCheck --check-prefix NM-CHECK %s // RUN: not %run %t 2>&1 | FileCheck --check-prefix ASAN-CHECK %s int alive[1] = {}; int dead[1] = {}; // NM-CHECK: {{^_alive }} // NM-CHECK-NOT: {{^_dead }} int main(int argc, char *argv[]) { alive[argc] = 0; // ASAN-CHECK: {{0x.* is located 0 bytes to the right of global variable}} return 0; } Index: vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dump_registers.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dump_registers.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dump_registers.cc (revision 317687) @@ -1,26 +1,26 @@ // Check that ASan dumps the CPU registers on a SIGSEGV. // RUN: %clangxx_asan %s -o %t // RUN: not %run %t 2>&1 | FileCheck %s #include #include int main() { fprintf(stderr, "Hello\n"); char *ptr; if (sizeof(void *) == 8) ptr = (char *)0x6666666666666666; else if (sizeof(void *) == 4) ptr = (char *)0x55555555; else assert(0 && "Your computer is weird."); char c = *ptr; // BOOM - // CHECK: ERROR: AddressSanitizer: SEGV + // CHECK: ERROR: AddressSanitizer: {{SEGV|BUS}} // CHECK: Register values: // CHECK: {{0x55555555|0x6666666666666666}} fprintf(stderr, "World\n"); return c; } Index: vendor/compiler-rt/dist/test/asan/TestCases/Darwin/reexec-insert-libraries-env.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Darwin/reexec-insert-libraries-env.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Darwin/reexec-insert-libraries-env.cc (revision 317687) @@ -1,25 +1,25 @@ // Make sure ASan doesn't hang in an exec loop if DYLD_INSERT_LIBRARIES is set. // This is a regression test for // https://code.google.com/p/address-sanitizer/issues/detail?id=159 // RUN: %clangxx_asan %s -o %t // RUN: %clangxx -DSHARED_LIB %s \ // RUN: -dynamiclib -o darwin-dummy-shared-lib-so.dylib // FIXME: the following command line may hang in the case of a regression. -// RUN: env DYLD_INSERT_LIBRARIES=darwin-dummy-shared-lib-so.dylib \ +// RUN: %env DYLD_INSERT_LIBRARIES=darwin-dummy-shared-lib-so.dylib \ // RUN: %run %t 2>&1 | FileCheck %s || exit 1 #if !defined(SHARED_LIB) #include #include int main() { const char kEnvName[] = "DYLD_INSERT_LIBRARIES"; printf("%s=%s\n", kEnvName, getenv(kEnvName)); // CHECK: {{DYLD_INSERT_LIBRARIES=.*darwin-dummy-shared-lib-so.dylib.*}} return 0; } #else // SHARED_LIB void foo() {} #endif // SHARED_LIB Index: vendor/compiler-rt/dist/test/asan/TestCases/Darwin/scribble.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Darwin/scribble.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Darwin/scribble.cc (revision 317687) @@ -1,58 +1,58 @@ // RUN: %clang_asan -O2 %s -o %t // RUN: %run %t 2>&1 | FileCheck --check-prefix=CHECK-NOSCRIBBLE %s -// RUN: env MallocScribble=1 MallocPreScribble=1 %run %t 2>&1 | FileCheck --check-prefix=CHECK-SCRIBBLE %s +// RUN: %env MallocScribble=1 MallocPreScribble=1 %run %t 2>&1 | FileCheck --check-prefix=CHECK-SCRIBBLE %s // RUN: %env_asan_opts=max_free_fill_size=4096 %run %t 2>&1 | FileCheck --check-prefix=CHECK-SCRIBBLE %s #include #include #include #include struct Isa { const char *class_name; }; struct MyClass { long padding; Isa *isa; long data; void print_my_class_name(); }; __attribute__((no_sanitize("address"))) void MyClass::print_my_class_name() { fprintf(stderr, "this = %p\n", this); fprintf(stderr, "padding = 0x%lx\n", this->padding); fprintf(stderr, "isa = %p\n", this->isa); if ((uint32_t)(uintptr_t)this->isa != 0x55555555) { fprintf(stderr, "class name: %s\n", this->isa->class_name); } } int main() { Isa *my_class_isa = (Isa *)malloc(sizeof(Isa)); memset(my_class_isa, 0x77, sizeof(Isa)); my_class_isa->class_name = "MyClass"; MyClass *my_object = (MyClass *)malloc(sizeof(MyClass)); memset(my_object, 0x88, sizeof(MyClass)); my_object->isa = my_class_isa; my_object->data = 42; my_object->print_my_class_name(); // CHECK-SCRIBBLE: class name: MyClass // CHECK-NOSCRIBBLE: class name: MyClass free(my_object); my_object->print_my_class_name(); // CHECK-NOSCRIBBLE: class name: MyClass // CHECK-SCRIBBLE: isa = {{(0x)?}}{{5555555555555555|55555555}} fprintf(stderr, "okthxbai!\n"); // CHECK-SCRIBBLE: okthxbai! // CHECK-NOSCRIBBLE: okthxbai! free(my_class_isa); } Index: vendor/compiler-rt/dist/test/asan/TestCases/Darwin/unset-insert-libraries-on-exec.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Darwin/unset-insert-libraries-on-exec.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Darwin/unset-insert-libraries-on-exec.cc (revision 317687) @@ -1,25 +1,25 @@ // Make sure ASan removes the runtime library from DYLD_INSERT_LIBRARIES before // executing other programs. // RUN: %clangxx_asan %s -o %t // RUN: %clangxx %p/../Helpers/echo-env.cc -o %T/echo-env // RUN: %clangxx -DSHARED_LIB %s \ // RUN: -dynamiclib -o %t-darwin-dummy-shared-lib-so.dylib // Make sure DYLD_INSERT_LIBRARIES doesn't contain the runtime library before // execl(). // RUN: %run %t %T/echo-env >/dev/null 2>&1 -// RUN: env DYLD_INSERT_LIBRARIES=%t-darwin-dummy-shared-lib-so.dylib \ +// RUN: %env DYLD_INSERT_LIBRARIES=%t-darwin-dummy-shared-lib-so.dylib \ // RUN: %run %t %T/echo-env 2>&1 | FileCheck %s || exit 1 #if !defined(SHARED_LIB) #include int main(int argc, char *argv[]) { execl(argv[1], argv[1], "DYLD_INSERT_LIBRARIES", NULL); // CHECK: {{DYLD_INSERT_LIBRARIES = .*darwin-dummy-shared-lib-so.dylib.*}} return 0; } #else // SHARED_LIB void foo() {} #endif // SHARED_LIB Index: vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections.cc (nonexistent) @@ -1,13 +0,0 @@ -// RUN: %clangxx_asan %s -o %t -Wl,--gc-sections -ffunction-sections -mllvm -asan-globals=0 -// RUN: %clangxx_asan %s -o %t -Wl,--gc-sections -ffunction-sections -mllvm -asan-globals=1 - -// https://code.google.com/p/address-sanitizer/issues/detail?id=260 -// XFAIL: * - -int undefined(); - -int (*unused)() = undefined; - -int main() { - return 0; -} Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-bfd.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-bfd.cc (nonexistent) +++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-bfd.cc (revision 317687) @@ -0,0 +1,18 @@ +// Test that gc-sections-friendly instrumentation of globals does not introduce +// false negatives with the BFD linker. +// RUN: %clangxx_asan -fuse-ld=bfd -Wl,-gc-sections -ffunction-sections -fdata-sections -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s + +#include +int main(int argc, char **argv) { + static char XXX[10]; + static char YYY[10]; + static char ZZZ[10]; + memset(XXX, 0, 10); + memset(YYY, 0, 10); + memset(ZZZ, 0, 10); + int res = YYY[argc * 10]; // BOOOM + // CHECK: {{READ of size 1 at}} + // CHECK: {{located 0 bytes to the right of global variable}} + res += XXX[argc] + ZZZ[argc]; + return res; +} Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-bfd.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-lld.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-lld.cc (nonexistent) +++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-lld.cc (revision 317687) @@ -0,0 +1,19 @@ +// Test that gc-sections-friendly instrumentation of globals does not introduce +// false negatives with the LLD linker. +// RUN: %clangxx_asan -fuse-ld=lld -Wl,-gc-sections -ffunction-sections -fdata-sections -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s +// REQUIRES: lld + +#include +int main(int argc, char **argv) { + static char XXX[10]; + static char YYY[10]; + static char ZZZ[10]; + memset(XXX, 0, 10); + memset(YYY, 0, 10); + memset(ZZZ, 0, 10); + int res = YYY[argc * 10]; // BOOOM + // CHECK: {{READ of size 1 at}} + // CHECK: {{located 0 bytes to the right of global variable}} + res += XXX[argc] + ZZZ[argc]; + return res; +} Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-lld.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections-lld.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections-lld.cc (nonexistent) +++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections-lld.cc (revision 317687) @@ -0,0 +1,15 @@ +// RUN: %clangxx_asan %s -o %t -Wl,--gc-sections -fuse-ld=lld -ffunction-sections -fdata-sections -mllvm -asan-globals=0 +// RUN: %clangxx_asan %s -o %t -Wl,--gc-sections -fuse-ld=lld -ffunction-sections -fdata-sections -mllvm -asan-globals=1 + +// https://code.google.com/p/address-sanitizer/issues/detail?id=260 +// REQUIRES: lld + +int undefined(); + +// On i386 clang adds --export-dynamic when linking with ASan, which adds all +// non-hidden globals to GC roots. +__attribute__((visibility("hidden"))) int (*unused)() = undefined; + +int main() { + return 0; +} Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections-lld.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-fork-direct.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-fork-direct.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-fork-direct.cc (nonexistent) @@ -1,38 +0,0 @@ -// RUN: %clangxx_asan -fsanitize-coverage=func %s -o %t -// RUN: rm -rf %T/coverage-fork-direct -// RUN: mkdir -p %T/coverage-fork-direct && cd %T/coverage-fork-direct -// RUN: (%env_asan_opts=coverage=1:coverage_direct=1:verbosity=1 %run %t; \ -// RUN: %sancov rawunpack *.sancov.raw; %sancov print *.sancov) 2>&1 -// -// XFAIL: android - -#include -#include -#include - -__attribute__((noinline)) -void foo() { printf("foo\n"); } - -__attribute__((noinline)) -void bar() { printf("bar\n"); } - -__attribute__((noinline)) -void baz() { printf("baz\n"); } - -int main(int argc, char **argv) { - pid_t child_pid = fork(); - if (child_pid == 0) { - fprintf(stderr, "Child PID: %d\n", getpid()); - baz(); - } else { - fprintf(stderr, "Parent PID: %d\n", getpid()); - foo(); - bar(); - } - return 0; -} - -// CHECK-DAG: Child PID: [[ChildPID:[0-9]+]] -// CHECK-DAG: Parent PID: [[ParentPID:[0-9]+]] -// CHECK-DAG: read 3 PCs from {{.*}}.[[ParentPID]].sancov -// CHECK-DAG: read 1 PCs from {{.*}}.[[ChildPID]].sancov Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-fork-direct.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct.cc (nonexistent) @@ -1,83 +0,0 @@ -// Test for direct coverage writing with dlopen at coverage level 1 to 3. - -// RUN: %clangxx_asan -fsanitize-coverage=func -DSHARED %s -shared -o %dynamiclib -fPIC -// RUN: %clangxx_asan -fsanitize-coverage=func %s %libdl -o %t - -// RUN: rm -rf %T/coverage-direct - -// RUN: mkdir -p %T/coverage-direct/normal -// RUN: %env_asan_opts=coverage=1:coverage_direct=0:coverage_dir=%T/coverage-direct/normal:verbosity=1 %run %t %dynamiclib -// RUN: %sancov print %T/coverage-direct/normal/*.sancov >%T/coverage-direct/normal/out.txt - -// RUN: mkdir -p %T/coverage-direct/direct -// RUN: %env_asan_opts=coverage=1:coverage_direct=1:coverage_dir=%T/coverage-direct/direct:verbosity=1 %run %t %dynamiclib -// RUN: cd %T/coverage-direct/direct -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: diff -u coverage-direct/normal/out.txt coverage-direct/direct/out.txt - - -// RUN: %clangxx_asan -fsanitize-coverage=bb -DSHARED %s -shared -o %dynamiclib -fPIC -// RUN: %clangxx_asan -fsanitize-coverage=bb -DSO_DIR=\"%T\" %s %libdl -o %t - -// RUN: rm -rf %T/coverage-direct - -// RUN: mkdir -p %T/coverage-direct/normal -// RUN: %env_asan_opts=coverage=1:coverage_direct=0:coverage_dir=%T/coverage-direct/normal:verbosity=1 %run %t %dynamiclib -// RUN: %sancov print %T/coverage-direct/normal/*.sancov >%T/coverage-direct/normal/out.txt - -// RUN: mkdir -p %T/coverage-direct/direct -// RUN: %env_asan_opts=coverage=1:coverage_direct=1:coverage_dir=%T/coverage-direct/direct:verbosity=1 %run %t %dynamiclib -// RUN: cd %T/coverage-direct/direct -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: diff -u coverage-direct/normal/out.txt coverage-direct/direct/out.txt - - -// RUN: %clangxx_asan -fsanitize-coverage=edge -DSHARED %s -shared -o %dynamiclib -fPIC -// RUN: %clangxx_asan -fsanitize-coverage=edge -DSO_DIR=\"%T\" %s %libdl -o %t - -// RUN: rm -rf %T/coverage-direct - -// RUN: mkdir -p %T/coverage-direct/normal -// RUN: %env_asan_opts=coverage=1:coverage_direct=0:coverage_dir=%T/coverage-direct/normal:verbosity=1 %run %t %dynamiclib -// RUN: %sancov print %T/coverage-direct/normal/*.sancov >%T/coverage-direct/normal/out.txt - -// RUN: mkdir -p %T/coverage-direct/direct -// RUN: %env_asan_opts=coverage=1:coverage_direct=1:coverage_dir=%T/coverage-direct/direct:verbosity=1 %run %t %dynamiclib -// RUN: cd %T/coverage-direct/direct -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: diff -u coverage-direct/normal/out.txt coverage-direct/direct/out.txt - -// XFAIL: android - -#include -#include -#include -#include - -#ifdef SHARED -extern "C" { -void bar() { printf("bar\n"); } -} -#else - -int main(int argc, char **argv) { - fprintf(stderr, "PID: %d\n", getpid()); - assert(argc > 1); - void *handle1 = dlopen(argv[1], RTLD_LAZY); - assert(handle1); - void (*bar1)() = (void (*)())dlsym(handle1, "bar"); - assert(bar1); - bar1(); - - return 0; -} -#endif Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-activation.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-activation.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-activation.cc (nonexistent) @@ -1,59 +0,0 @@ -// Test for direct coverage writing enabled at activation time. - -// RUN: %clangxx_asan -fsanitize-coverage=func -DSHARED %s -shared -o %dynamiclib -fPIC -// RUN: %clangxx -c -DSO_DIR=\"%T\" %s -o %t.o -// RUN: %clangxx_asan -fsanitize-coverage=func %t.o %libdl -o %t - -// RUN: rm -rf %T/coverage-direct-activation - -// RUN: mkdir -p %T/coverage-direct-activation/normal -// RUN: %env_asan_opts=coverage=1,coverage_direct=0,coverage_dir=%T/coverage-direct-activation/normal:verbosity=1 %run %t %dynamiclib -// RUN: %sancov print %T/coverage-direct-activation/normal/*.sancov >%T/coverage-direct-activation/normal/out.txt - -// RUN: mkdir -p %T/coverage-direct-activation/direct -// RUN: %env_asan_opts=start_deactivated=1,coverage_direct=1,verbosity=1 \ -// RUN: ASAN_ACTIVATION_OPTIONS=coverage=1,coverage_dir=%T/coverage-direct-activation/direct %run %t %dynamiclib -// RUN: cd %T/coverage-direct-activation/direct -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// Test start_deactivated=1,coverage=1 in ASAN_OPTIONS. - -// RUN: diff -u coverage-direct-activation/normal/out.txt coverage-direct-activation/direct/out.txt - -// RUN: mkdir -p %T/coverage-direct-activation/direct2 -// RUN: %env_asan_opts=start_deactivated=1,coverage=1,coverage_direct=1,verbosity=1 \ -// RUN: ASAN_ACTIVATION_OPTIONS=coverage_dir=%T/coverage-direct-activation/direct2 %run %t %dynamiclib -// RUN: cd %T/coverage-direct-activation/direct2 -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: diff -u coverage-direct-activation/normal/out.txt coverage-direct-activation/direct2/out.txt - -// XFAIL: android - -#include -#include -#include -#include - -#ifdef SHARED -extern "C" { -void bar() { printf("bar\n"); } -} -#else - -int main(int argc, char **argv) { - fprintf(stderr, "PID: %d\n", getpid()); - assert(argc > 1); - void *handle1 = dlopen(argv[1], RTLD_LAZY); // %dynamiclib - assert(handle1); - void (*bar1)() = (void (*)())dlsym(handle1, "bar"); - assert(bar1); - bar1(); - - return 0; -} -#endif Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-activation.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-large.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-large.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-large.cc (nonexistent) @@ -1,65 +0,0 @@ -// Test for direct coverage writing with lots of data. -// Current implementation maps output file in chunks of 64K. This test overflows -// 1 chunk. - -// RUN: %clangxx_asan -fsanitize-coverage=func -O0 -DSHARED %s -shared -o %dynamiclib -fPIC -// RUN: %clangxx_asan -fsanitize-coverage=func -O0 %s %libdl -o %t - -// RUN: rm -rf %T/coverage-direct-large - -// RUN: mkdir -p %T/coverage-direct-large/normal && cd %T/coverage-direct-large/normal -// RUN: %env_asan_opts=coverage=1:coverage_direct=0:verbosity=1 %run %t %dynamiclib -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: mkdir -p %T/coverage-direct-large/direct && cd %T/coverage-direct-large/direct -// RUN: %env_asan_opts=coverage=1:coverage_direct=1:verbosity=1 %run %t %dynamiclib -// RUN: %sancov rawunpack *.sancov.raw -// RUN: %sancov print *.sancov >out.txt -// RUN: cd ../.. - -// RUN: diff -u coverage-direct-large/normal/out.txt coverage-direct-large/direct/out.txt -// -// XFAIL: android - -#define F0(Q, x) Q(x) -#define F1(Q, x) \ - F0(Q, x##0) F0(Q, x##1) F0(Q, x##2) F0(Q, x##3) F0(Q, x##4) F0(Q, x##5) \ - F0(Q, x##6) F0(Q, x##7) F0(Q, x##8) F0(Q, x##9) -#define F2(Q, x) \ - F1(Q, x##0) F1(Q, x##1) F1(Q, x##2) F1(Q, x##3) F1(Q, x##4) F1(Q, x##5) \ - F1(Q, x##6) F1(Q, x##7) F1(Q, x##8) F1(Q, x##9) -#define F3(Q, x) \ - F2(Q, x##0) F2(Q, x##1) F2(Q, x##2) F2(Q, x##3) F2(Q, x##4) F2(Q, x##5) \ - F2(Q, x##6) F2(Q, x##7) F2(Q, x##8) F2(Q, x##9) -#define F4(Q, x) \ - F3(Q, x##0) F3(Q, x##1) F3(Q, x##2) F3(Q, x##3) F3(Q, x##4) F3(Q, x##5) \ - F3(Q, x##6) F3(Q, x##7) F3(Q, x##8) F3(Q, x##9) - -#define DECL(x) __attribute__((noinline)) static void x() {} -#define CALL(x) x(); - -F4(DECL, f) - -#ifdef SHARED -extern "C" void so_entry() { - F4(CALL, f) -} -#else - -#include -#include -#include -int main(int argc, char **argv) { - F4(CALL, f) - assert(argc > 1); - void *handle1 = dlopen(argv[1], RTLD_LAZY); // %dynamiclib - assert(handle1); - void (*so_entry)() = (void (*)())dlsym(handle1, "so_entry"); - assert(so_entry); - so_entry(); - - return 0; -} - -#endif // SHARED Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-large.cc ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/asan-sigbus.cpp =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/asan-sigbus.cpp (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/asan-sigbus.cpp (revision 317687) @@ -1,40 +1,42 @@ // Check handle_bus flag // Defaults to true // RUN: %clangxx_asan -std=c++11 %s -o %t // RUN: not %run %t %T/file 2>&1 | FileCheck %s -check-prefix=CHECK-BUS // RUN: %env_asan_opts=handle_sigbus=false not --crash %run %t %T/file 2>&1 | FileCheck %s +// UNSUPPORTED: ios + #include #include #include #include #include #include char array[4096]; int main(int argc, char **argv) { assert(argc > 1); int fd = open(argv[1], O_RDWR | O_CREAT, 0700); if (fd < 0) { perror("open"); exit(1); } assert(write(fd, array, sizeof(array)) == sizeof(array)); // Write some zeroes to the file, then mmap it while it has a 4KiB size char *addr = (char *)mmap(nullptr, sizeof(array), PROT_READ, MAP_FILE | MAP_SHARED, fd, 0); if (addr == MAP_FAILED) { perror("mmap"); exit(1); } // Truncate the file so our memory isn't valid any more assert(ftruncate(fd, 0) == 0); // Try to access the memory return addr[42]; // CHECK-NOT: DEADLYSIGNAL // CHECK-BUS: DEADLYSIGNAL // CHECK-BUS: ERROR: AddressSanitizer: BUS } Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/current_allocated_bytes.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/current_allocated_bytes.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/current_allocated_bytes.cc (revision 317687) @@ -1,44 +1,47 @@ // RUN: %clangxx_asan -O0 %s -pthread -o %t && %run %t // RUN: %clangxx_asan -O2 %s -pthread -o %t && %run %t // REQUIRES: stable-runtime +// UNSUPPORTED: powerpc64le +// FIXME: This test occasionally fails on powerpc64 LE possibly starting with +// r279664. Re-enable the test once the problem(s) have been fixed. #include #include #include #include #include const size_t kLargeAlloc = 1UL << 20; void* allocate(void *arg) { volatile void *ptr = malloc(kLargeAlloc); free((void*)ptr); return 0; } void* check_stats(void *arg) { assert(__sanitizer_get_current_allocated_bytes() > 0); return 0; } int main() { size_t used_mem = __sanitizer_get_current_allocated_bytes(); printf("Before: %zu\n", used_mem); const int kNumIterations = 1000; for (int iter = 0; iter < kNumIterations; iter++) { pthread_t thr[4]; for (int j = 0; j < 4; j++) { assert(0 == pthread_create(&thr[j], 0, (j < 2) ? allocate : check_stats, 0)); } for (int j = 0; j < 4; j++) assert(0 == pthread_join(thr[j], 0)); used_mem = __sanitizer_get_current_allocated_bytes(); if (used_mem > kLargeAlloc) { printf("After iteration %d: %zu\n", iter, used_mem); return 1; } } printf("Success after %d iterations\n", kNumIterations); return 0; } Index: vendor/compiler-rt/dist/test/asan/TestCases/Posix/fread_fwrite.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/Posix/fread_fwrite.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/fread_fwrite.cc (revision 317687) @@ -1,34 +1,34 @@ // RUN: %clangxx_asan -g %s -o %t -// RUN: not %t 2>&1 | FileCheck %s --check-prefix=CHECK-FWRITE -// RUN: not %t 1 2>&1 | FileCheck %s --check-prefix=CHECK-FREAD +// RUN: not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-FWRITE +// RUN: not %run %t 1 2>&1 | FileCheck %s --check-prefix=CHECK-FREAD #include #include int test_fread() { FILE *f = fopen("/dev/zero", "r"); char buf[2]; fread(buf, sizeof(buf), 2, f); // BOOM fclose(f); return 0; } int test_fwrite() { FILE *f = fopen("/dev/null", "w"); char buf[2]; fwrite(buf, sizeof(buf), 2, f); // BOOM return fclose(f); } int main(int argc, char *argv[]) { if (argc > 1) test_fread(); else test_fwrite(); return 0; } // CHECK-FREAD: {{.*ERROR: AddressSanitizer: stack-buffer-overflow}} // CHECK-FREAD: #{{.*}} in {{(wrap_|__interceptor_)?}}fread // CHECK-FWRITE: {{.*ERROR: AddressSanitizer: stack-buffer-overflow}} // CHECK-FWRITE: #{{.*}} in {{(wrap_|__interceptor_)?}}fwrite Index: vendor/compiler-rt/dist/test/asan/TestCases/coverage-disabled.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/coverage-disabled.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/coverage-disabled.cc (revision 317687) @@ -1,20 +1,15 @@ // Test that no data is collected without a runtime flag. // // RUN: %clangxx_asan -fsanitize-coverage=func %s -o %t // // RUN: rm -rf %T/coverage-disabled // // RUN: mkdir -p %T/coverage-disabled/normal // RUN: %env_asan_opts=coverage_direct=0:coverage_dir='"%T/coverage-disabled/normal"':verbosity=1 %run %t // RUN: not %sancov print %T/coverage-disabled/normal/*.sancov 2>&1 // -// RUN: mkdir -p %T/coverage-disabled/direct -// RUN: %env_asan_opts=coverage_direct=1:coverage_dir='"%T/coverage-disabled/direct"':verbosity=1 %run %t -// RUN: cd %T/coverage-disabled/direct -// RUN: not %sancov rawunpack *.sancov -// // UNSUPPORTED: android int main(int argc, char **argv) { return 0; } Index: vendor/compiler-rt/dist/test/asan/TestCases/initialization-bug.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/initialization-bug.cc (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/initialization-bug.cc (revision 317687) @@ -1,50 +1,51 @@ // Test to make sure basic initialization order errors are caught. // RUN: %clangxx_asan %macos_min_target_10_11 -O0 %s %p/Helpers/initialization-bug-extra2.cc -o %t-INIT-ORDER-EXE // RUN: %env_asan_opts=check_initialization_order=true not %run %t-INIT-ORDER-EXE 2>&1 | FileCheck %s // Do not test with optimization -- the error may be optimized away. // FIXME: https://code.google.com/p/address-sanitizer/issues/detail?id=186 // XFAIL: win32 // The test is expected to fail on OS X Yosemite and older // UNSUPPORTED: osx-no-ld64-live_support +// UNSUPPORTED: ios #include // The structure of the test is: // "x", "y", "z" are dynamically initialized globals. // Value of "x" depends on "y", value of "y" depends on "z". // "x" and "z" are defined in this TU, "y" is defined in another one. // Thus we shoud stably report initialization order fiasco independently of // the translation unit order. int initZ() { return 5; } int z = initZ(); // 'y' is a dynamically initialized global residing in a different TU. This // dynamic initializer will read the value of 'y' before main starts. The // result is undefined behavior, which should be caught by initialization order // checking. extern int y; int __attribute__((noinline)) initX() { return y + 1; // CHECK: {{AddressSanitizer: initialization-order-fiasco}} // CHECK: {{READ of size .* at 0x.* thread T0}} // CHECK: {{0x.* is located 0 bytes inside of global variable .*(y|z).*}} // CHECK: registered at: // CHECK: 0x{{.*}} in __asan_register_globals } // This initializer begins our initialization order problems. static int x = initX(); int main() { // ASan should have caused an exit before main runs. printf("PASS\n"); // CHECK-NOT: PASS return 0; } Index: vendor/compiler-rt/dist/test/asan/TestCases/small_memcpy_test.cc =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/small_memcpy_test.cc (nonexistent) +++ vendor/compiler-rt/dist/test/asan/TestCases/small_memcpy_test.cc (revision 317687) @@ -0,0 +1,28 @@ +// Test that small memcpy works correctly. + +// RUN: %clangxx_asan %s -o %t +// RUN: not %run %t 8 24 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: not %run %t 16 32 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: not %run %t 24 40 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: not %run %t 32 48 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: not %run %t 40 56 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: not %run %t 48 64 2>&1 | FileCheck %s --check-prefix=CHECK +#include +#include +#include +#include + +#include + +int main(int argc, char **argv) { + assert(argc == 3); + size_t poison_from = atoi(argv[1]); + size_t poison_to = atoi(argv[2]); + assert(poison_from <= poison_to); + char A1[64], A2[64]; + fprintf(stderr, "%zd %zd\n", poison_from, poison_to - poison_from); + __asan_poison_memory_region(&A1[0] + poison_from, poison_to - poison_from); + memcpy(A1, A2, sizeof(A1)); +// CHECK: AddressSanitizer: use-after-poison + return 0; +} Property changes on: vendor/compiler-rt/dist/test/asan/TestCases/small_memcpy_test.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/asan/TestCases/strtok.c =================================================================== --- vendor/compiler-rt/dist/test/asan/TestCases/strtok.c (revision 317686) +++ vendor/compiler-rt/dist/test/asan/TestCases/strtok.c (revision 317687) @@ -1,103 +1,103 @@ // RUN: %clang_asan %s -o %t // Test overflows with strict_string_checks // RUN: %env_asan_opts=strict_string_checks=true not %run %t test1 2>&1 | \ // RUN: FileCheck %s --check-prefix=CHECK1 -// RUN: %env_asan_opts=intercept_strtok=false%run %t test1 2>&1 +// RUN: %env_asan_opts=intercept_strtok=false %run %t test1 2>&1 // RUN: %env_asan_opts=strict_string_checks=true not %run %t test2 2>&1 | \ // RUN: FileCheck %s --check-prefix=CHECK2 // RUN: %env_asan_opts=intercept_strtok=false %run %t test2 2>&1 // RUN: %env_asan_opts=strict_string_checks=true not %run %t test3 2>&1 | \ // RUN: FileCheck %s --check-prefix=CHECK3 // RUN: %env_asan_opts=intercept_strtok=false %run %t test3 2>&1 // RUN: %env_asan_opts=strict_string_checks=true %run %t test4 2>&1 // RUN: %env_asan_opts=intercept_strtok=false %run %t test4 2>&1 // Test overflows with !strict_string_checks // RUN: %env_asan_opts=strict_string_checks=false not %run %t test5 2>&1 | \ // RUN: FileCheck %s --check-prefix=CHECK5 // RUN: %env_asan_opts=intercept_strtok=false %run %t test5 2>&1 // RUN: %env_asan_opts=strict_string_checks=false not %run %t test6 2>&1 | \ // RUN: FileCheck %s --check-prefix=CHECK6 // RUN: %env_asan_opts=intercept_strtok=false %run %t test6 2>&1 #include #include #include // Check that we find overflows in the delimiters on the first call // with strict_string_checks. void test1() { char *token; char s[4] = "abc"; char token_delimiter[2] = "b"; __asan_poison_memory_region ((char *)&token_delimiter[1], 2); token = strtok(s, token_delimiter); // CHECK1: 'token_delimiter'{{.*}} <== Memory access at offset {{[0-9]+}} partially overflows this variable } // Check that we find overflows in the delimiters on the second call (str == NULL) // with strict_string_checks. void test2() { char *token; char s[4] = "abc"; char token_delimiter[2] = "b"; token = strtok(s, token_delimiter); assert(strcmp(token, "a") == 0); __asan_poison_memory_region ((char *)&token_delimiter[1], 2); token = strtok(NULL, token_delimiter); // CHECK2: 'token_delimiter'{{.*}} <== Memory access at offset {{[0-9]+}} partially overflows this variable } // Check that we find overflows in the string (only on the first call) with strict_string_checks. void test3() { char *token; char s[4] = "abc"; char token_delimiter[2] = "b"; __asan_poison_memory_region ((char *)&s[3], 2); token = strtok(s, token_delimiter); // CHECK3: 's'{{.*}} <== Memory access at offset {{[0-9]+}} partially overflows this variable } // Check that we do not crash when strtok returns NULL with strict_string_checks. void test4() { char *token; char s[] = ""; char token_delimiter[] = "a"; token = strtok(s, token_delimiter); assert(token == NULL); } // Check that we find overflows in the string (only on the first call) with !strict_string_checks. void test5() { char *token; char s[4] = "abc"; char token_delimiter[2] = "d"; __asan_poison_memory_region ((char *)&s[2], 2); __asan_poison_memory_region ((char *)&token_delimiter[1], 2); token = strtok(s, token_delimiter); // CHECK5: 's'{{.*}} <== Memory access at offset {{[0-9]+}} partially overflows this variable } // Check that we find overflows in the delimiters (only on the first call) with !strict_string_checks. void test6() { char *token; char s[4] = "abc"; char token_delimiter[1] = {'d'}; __asan_poison_memory_region ((char *)&token_delimiter[1], 2); token = strtok(s, &token_delimiter[1]); // CHECK6: 'token_delimiter'{{.*}} <== Memory access at offset {{[0-9]+}} overflows this variable } int main(int argc, char **argv) { if (argc != 2) return 1; if (!strcmp(argv[1], "test1")) test1(); if (!strcmp(argv[1], "test2")) test2(); if (!strcmp(argv[1], "test3")) test3(); if (!strcmp(argv[1], "test4")) test4(); if (!strcmp(argv[1], "test5")) test5(); if (!strcmp(argv[1], "test6")) test6(); return 0; } Index: vendor/compiler-rt/dist/test/asan/lit.cfg =================================================================== --- vendor/compiler-rt/dist/test/asan/lit.cfg (revision 317686) +++ vendor/compiler-rt/dist/test/asan/lit.cfg (revision 317687) @@ -1,258 +1,256 @@ # -*- Python -*- import os import platform import re import lit.formats def get_required_attr(config, attr_name): attr_value = getattr(config, attr_name, None) if attr_value == None: lit_config.fatal( "No attribute %r in test configuration! You may need to run " "tests from your build directory or add this attribute " "to lit.site.cfg " % attr_name) return attr_value def push_dynamic_library_lookup_path(config, new_path): if platform.system() == 'Windows': dynamic_library_lookup_var = 'PATH' elif platform.system() == 'Darwin': dynamic_library_lookup_var = 'DYLD_LIBRARY_PATH' else: dynamic_library_lookup_var = 'LD_LIBRARY_PATH' new_ld_library_path = os.path.pathsep.join( (new_path, config.environment.get(dynamic_library_lookup_var, ''))) config.environment[dynamic_library_lookup_var] = new_ld_library_path # Setup config name. config.name = 'AddressSanitizer' + config.name_suffix # Platform-specific default ASAN_OPTIONS for lit tests. default_asan_opts = '' if config.host_os == 'Darwin': # On Darwin, we default to `abort_on_error=1`, which would make tests run # much slower. Let's override this and run lit tests with 'abort_on_error=0'. # Also, make sure we do not overwhelm the syslog while testing. default_asan_opts = 'abort_on_error=0' default_asan_opts += ':log_to_syslog=0' elif config.android: # The same as on Darwin, we default to "abort_on_error=1" which slows down # testing. Also, all existing tests are using "not" instead of "not --crash" # which does not work for abort()-terminated programs. default_asan_opts = 'abort_on_error=0' if default_asan_opts: config.environment['ASAN_OPTIONS'] = default_asan_opts default_asan_opts += ':' config.substitutions.append(('%env_asan_opts=', 'env ASAN_OPTIONS=' + default_asan_opts)) # Setup source root. config.test_source_root = os.path.dirname(__file__) # There is no libdl on FreeBSD. if config.host_os != 'FreeBSD': libdl_flag = "-ldl" else: libdl_flag = "" # GCC-ASan doesn't link in all the necessary libraries automatically, so # we have to do it ourselves. if config.compiler_id == 'GNU': extra_link_flags = ["-pthread", "-lstdc++", libdl_flag] else: extra_link_flags = [] # BFD linker in 64-bit android toolchains fails to find libm.so, which is a # transitive shared library dependency (via asan runtime). if config.android: extra_link_flags += ["-lm"] # Setup default compiler flags used with -fsanitize=address option. # FIXME: Review the set of required flags and check if it can be reduced. target_cflags = [get_required_attr(config, "target_cflags")] + extra_link_flags target_cxxflags = config.cxx_mode_flags + target_cflags clang_asan_static_cflags = (["-fsanitize=address", "-mno-omit-leaf-frame-pointer", "-fno-omit-frame-pointer", "-fno-optimize-sibling-calls"] + config.debug_info_flags + target_cflags) if config.target_arch == 's390x': clang_asan_static_cflags.append("-mbackchain") clang_asan_static_cxxflags = config.cxx_mode_flags + clang_asan_static_cflags asan_dynamic_flags = [] if config.asan_dynamic: asan_dynamic_flags = ["-shared-libasan"] # On Windows, we need to simulate "clang-cl /MD" on the clang driver side. if platform.system() == 'Windows': asan_dynamic_flags += ["-D_MT", "-D_DLL", "-Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames"] config.available_features.add("asan-dynamic-runtime") else: config.available_features.add("asan-static-runtime") clang_asan_cflags = clang_asan_static_cflags + asan_dynamic_flags clang_asan_cxxflags = clang_asan_static_cxxflags + asan_dynamic_flags # Add win32-(static|dynamic)-asan features to mark tests as passing or failing # in those modes. lit doesn't support logical feature test combinations. if platform.system() == 'Windows': if config.asan_dynamic: win_runtime_feature = "win32-dynamic-asan" else: win_runtime_feature = "win32-static-asan" config.available_features.add(win_runtime_feature) asan_lit_source_dir = get_required_attr(config, "asan_lit_source_dir") if config.android == "1": config.available_features.add('android') - clang_wrapper = os.path.join(asan_lit_source_dir, - "android_commands", "android_compile.py") + " " + compile_wrapper = os.path.join(asan_lit_source_dir, "android_commands", "android_compile.py") + " " else: config.available_features.add('not-android') - clang_wrapper = "" def build_invocation(compile_flags): - return " " + " ".join([clang_wrapper, config.clang] + compile_flags) + " " + return " " + " ".join([config.compile_wrapper, config.clang] + compile_flags) + " " # Clang driver link 'x86' (i686) architecture to 'i386'. target_arch = config.target_arch if (target_arch == "i686"): target_arch = "i386" config.substitutions.append( ("%clang ", build_invocation(target_cflags)) ) config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) ) config.substitutions.append( ("%clang_asan ", build_invocation(clang_asan_cflags)) ) config.substitutions.append( ("%clangxx_asan ", build_invocation(clang_asan_cxxflags)) ) config.substitutions.append( ("%shared_libasan", "libclang_rt.asan-%s.so" % target_arch)) if config.asan_dynamic: config.substitutions.append( ("%clang_asan_static ", build_invocation(clang_asan_static_cflags)) ) config.substitutions.append( ("%clangxx_asan_static ", build_invocation(clang_asan_static_cxxflags)) ) # Windows-specific tests might also use the clang-cl.exe driver. if platform.system() == 'Windows': clang_cl_cxxflags = ["-Wno-deprecated-declarations", "-WX", "-D_HAS_EXCEPTIONS=0", "-Zi"] + target_cflags clang_cl_asan_cxxflags = ["-fsanitize=address"] + clang_cl_cxxflags if config.asan_dynamic: clang_cl_asan_cxxflags.append("-MD") clang_cl_invocation = build_invocation(clang_cl_cxxflags) clang_cl_invocation = clang_cl_invocation.replace("clang.exe","clang-cl.exe") config.substitutions.append( ("%clang_cl ", clang_cl_invocation) ) clang_cl_asan_invocation = build_invocation(clang_cl_asan_cxxflags) clang_cl_asan_invocation = clang_cl_asan_invocation.replace("clang.exe","clang-cl.exe") config.substitutions.append( ("%clang_cl_asan ", clang_cl_asan_invocation) ) base_lib = os.path.join(config.compiler_rt_libdir, "clang_rt.asan%%s-%s.lib" % config.target_arch) config.substitutions.append( ("%asan_lib", base_lib % "") ) config.substitutions.append( ("%asan_cxx_lib", base_lib % "_cxx") ) config.substitutions.append( ("%asan_dll_thunk", base_lib % "_dll_thunk") ) if platform.system() == 'Windows': # Don't use -std=c++11 on Windows, as the driver will detect the appropriate # default needed to use with the STL. config.substitutions.append(("%stdcxx11 ", "")) else: # Some tests uses C++11 features such as lambdas and need to pass -std=c++11. config.substitutions.append(("%stdcxx11 ", "-std=c++11 ")) # FIXME: De-hardcode this path. asan_source_dir = os.path.join( get_required_attr(config, "compiler_rt_src_root"), "lib", "asan") # Setup path to asan_symbolize.py script. asan_symbolize = os.path.join(asan_source_dir, "scripts", "asan_symbolize.py") if not os.path.exists(asan_symbolize): lit_config.fatal("Can't find script on path %r" % asan_symbolize) python_exec = get_required_attr(config, "python_executable") config.substitutions.append( ("%asan_symbolize", python_exec + " " + asan_symbolize + " ") ) # Setup path to sancov.py script. sanitizer_common_source_dir = os.path.join( get_required_attr(config, "compiler_rt_src_root"), "lib", "sanitizer_common") sancov = os.path.join(sanitizer_common_source_dir, "scripts", "sancov.py") if not os.path.exists(sancov): lit_config.fatal("Can't find script on path %r" % sancov) python_exec = get_required_attr(config, "python_executable") config.substitutions.append( ("%sancov ", python_exec + " " + sancov + " ") ) # Determine kernel bitness if config.host_arch.find('64') != -1 and config.android != "1": kernel_bits = '64' else: kernel_bits = '32' config.substitutions.append( ('CHECK-%kernel_bits', ("CHECK-kernel-" + kernel_bits + "-bits"))) config.substitutions.append( ("%libdl", libdl_flag) ) config.available_features.add("asan-" + config.bits + "-bits") if config.host_os == 'Darwin': config.substitutions.append( ("%ld_flags_rpath_exe", '-Wl,-rpath,@executable_path/ %dynamiclib') ) config.substitutions.append( ("%ld_flags_rpath_so", '-install_name @rpath/`basename %dynamiclib`') ) elif config.host_os == 'FreeBSD': config.substitutions.append( ("%ld_flags_rpath_exe", "-Wl,-z,origin -Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec") ) config.substitutions.append( ("%ld_flags_rpath_so", '') ) elif config.host_os == 'Linux': config.substitutions.append( ("%ld_flags_rpath_exe", "-Wl,-rpath,\$ORIGIN -L%T -l%xdynamiclib_namespec") ) config.substitutions.append( ("%ld_flags_rpath_so", '') ) # Must be defined after the substitutions that use %dynamiclib. config.substitutions.append( ("%dynamiclib", '%T/%xdynamiclib_filename') ) config.substitutions.append( ("%xdynamiclib_filename", 'lib%xdynamiclib_namespec.so') ) config.substitutions.append( ("%xdynamiclib_namespec", '%basename_t.dynamic') ) # Allow tests to use REQUIRES=stable-runtime. For use when you cannot use XFAIL # because the test hangs. Adding armhf as we now have two modes. if config.target_arch != 'arm' and config.target_arch != 'armhf' and config.target_arch != 'aarch64': config.available_features.add('stable-runtime') # Fast unwinder doesn't work with Thumb if re.search('mthumb', config.target_cflags) is not None: config.available_features.add('fast-unwinder-works') # Turn on leak detection on 64-bit Linux. if config.host_os == 'Linux' and (config.target_arch == 'x86_64' or config.target_arch == 'i386'): config.available_features.add('leak-detection') # Set LD_LIBRARY_PATH to pick dynamic runtime up properly. push_dynamic_library_lookup_path(config, config.compiler_rt_libdir) # GCC-ASan uses dynamic runtime by default. if config.compiler_id == 'GNU': gcc_dir = os.path.dirname(config.clang) libasan_dir = os.path.join(gcc_dir, "..", "lib" + config.bits) push_dynamic_library_lookup_path(config, libasan_dir) # Add the RT libdir to PATH directly so that we can successfully run the gtest # binary to list its tests. if config.host_os == 'Windows' and config.asan_dynamic: os.environ['PATH'] = os.path.pathsep.join([config.compiler_rt_libdir, os.environ.get('PATH', '')]) # Default test suffixes. config.suffixes = ['.c', '.cc', '.cpp'] if config.host_os == 'Darwin': config.suffixes.append('.mm') if config.host_os == 'Windows': config.substitutions.append(('%fPIC', '')) config.substitutions.append(('%fPIE', '')) config.substitutions.append(('%pie', '')) else: config.substitutions.append(('%fPIC', '-fPIC')) config.substitutions.append(('%fPIE', '-fPIE')) config.substitutions.append(('%pie', '-pie')) # Only run the tests on supported OSs. if config.host_os not in ['Linux', 'Darwin', 'FreeBSD', 'Windows']: config.unsupported = True if config.host_os == 'Darwin' and config.target_arch in ["x86_64", "x86_64h"]: config.parallelism_group = "darwin-64bit-sanitizer" Index: vendor/compiler-rt/dist/test/asan/lit.site.cfg.in =================================================================== --- vendor/compiler-rt/dist/test/asan/lit.site.cfg.in (revision 317686) +++ vendor/compiler-rt/dist/test/asan/lit.site.cfg.in (revision 317687) @@ -1,17 +1,19 @@ @LIT_SITE_CFG_IN_HEADER@ # Tool-specific config options. config.name_suffix = "@ASAN_TEST_CONFIG_SUFFIX@" config.asan_lit_source_dir = "@ASAN_LIT_SOURCE_DIR@" config.target_cflags = "@ASAN_TEST_TARGET_CFLAGS@" config.clang = "@ASAN_TEST_TARGET_CC@" config.bits = "@ASAN_TEST_BITS@" config.android = "@ANDROID@" +config.ios = @ASAN_TEST_IOS_PYBOOL@ +config.iossim = @ASAN_TEST_IOSSIM_PYBOOL@ config.asan_dynamic = @ASAN_TEST_DYNAMIC@ config.target_arch = "@ASAN_TEST_TARGET_ARCH@" # Load common config for all compiler-rt lit tests. lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") # Load tool-specific config that would do the real work. lit_config.load_config(config, "@ASAN_LIT_SOURCE_DIR@/lit.cfg") Index: vendor/compiler-rt/dist/test/lit.common.cfg =================================================================== --- vendor/compiler-rt/dist/test/lit.common.cfg (revision 317686) +++ vendor/compiler-rt/dist/test/lit.common.cfg (revision 317687) @@ -1,254 +1,273 @@ # -*- Python -*- # Configuration file for 'lit' test runner. # This file contains common rules for various compiler-rt testsuites. # It is mostly copied from lit.cfg used by Clang. import os import platform import re import subprocess import lit.formats import lit.util # Choose between lit's internal shell pipeline runner and a real shell. If # LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override. use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL") if use_lit_shell: # 0 is external, "" is default, and everything else is internal. execute_external = (use_lit_shell == "0") else: # Otherwise we default to internal on Windows and external elsewhere, as # bash on Windows is usually very slow. execute_external = (not sys.platform in ['win32']) # Setup test format. config.test_format = lit.formats.ShTest(execute_external) if execute_external: config.available_features.add('shell') # Setup clang binary. compiler_path = getattr(config, 'clang', None) if (not compiler_path) or (not os.path.exists(compiler_path)): lit_config.fatal("Can't find compiler on path %r" % compiler_path) compiler_id = getattr(config, 'compiler_id', None) if compiler_id == "Clang": if platform.system() != 'Windows': config.cxx_mode_flags = ["--driver-mode=g++"] else: config.cxx_mode_flags = [] # We assume that sanitizers should provide good enough error # reports and stack traces even with minimal debug info. config.debug_info_flags = ["-gline-tables-only"] if platform.system() == 'Windows': config.debug_info_flags.append("-gcodeview") elif compiler_id == 'GNU': config.cxx_mode_flags = ["-x c++"] config.debug_info_flags = ["-g"] else: lit_config.fatal("Unsupported compiler id: %r" % compiler_id) # Add compiler ID to the list of available features. config.available_features.add(compiler_id) # Clear some environment variables that might affect Clang. possibly_dangerous_env_vars = ['ASAN_OPTIONS', 'DFSAN_OPTIONS', 'LSAN_OPTIONS', 'MSAN_OPTIONS', 'UBSAN_OPTIONS', 'COMPILER_PATH', 'RC_DEBUG_OPTIONS', 'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH', 'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH', 'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH', 'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING', 'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX', 'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS', 'LIBCLANG_RESOURCE_USAGE', 'LIBCLANG_CODE_COMPLETION_LOGGING', 'XRAY_OPTIONS'] # Clang/Win32 may refer to %INCLUDE%. vsvarsall.bat sets it. if platform.system() != 'Windows': possibly_dangerous_env_vars.append('INCLUDE') for name in possibly_dangerous_env_vars: if name in config.environment: del config.environment[name] # Tweak PATH to include llvm tools dir. llvm_tools_dir = getattr(config, 'llvm_tools_dir', None) if (not llvm_tools_dir) or (not os.path.exists(llvm_tools_dir)): lit_config.fatal("Invalid llvm_tools_dir config attribute: %r" % llvm_tools_dir) path = os.path.pathsep.join((llvm_tools_dir, config.environment['PATH'])) config.environment['PATH'] = path # Help MSVS link.exe find the standard libraries. # Make sure we only try to use it when targetting Windows. if platform.system() == 'Windows' and '-win' in config.target_triple: config.environment['LIB'] = os.environ['LIB'] if re.match(r'^x86_64.*-linux', config.target_triple): config.available_features.add("x86_64-linux") # Use ugly construction to explicitly prohibit "clang", "clang++" etc. # in RUN lines. config.substitutions.append( (' clang', """\n\n*** Do not use 'clangXXX' in tests, instead define '%clangXXX' substitution in lit config. ***\n\n""") ) # Allow tests to be executed on a simulator or remotely. -config.substitutions.append( ('%run', config.emulator) ) +if config.emulator: + config.substitutions.append( ('%run', config.emulator) ) + config.substitutions.append( ('%env ', "env ") ) + config.compile_wrapper = "" +elif config.ios: + config.available_features.add('ios') + + device_id_env = "SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER" if config.iossim else "SANITIZER_IOS_TEST_DEVICE_IDENTIFIER" + if device_id_env in os.environ: config.environment[device_id_env] = os.environ[device_id_env] + ios_commands_dir = os.path.join(config.compiler_rt_src_root, "test", "sanitizer_common", "ios_commands") + run_wrapper = os.path.join(ios_commands_dir, "iossim_run.py" if config.iossim else "ios_run.py") + config.substitutions.append(('%run', run_wrapper)) + env_wrapper = os.path.join(ios_commands_dir, "iossim_env.py" if config.iossim else "ios_env.py") + config.substitutions.append(('%env ', env_wrapper + " ")) + compile_wrapper = os.path.join(ios_commands_dir, "iossim_compile.py" if config.iossim else "ios_compile.py") + config.compile_wrapper = compile_wrapper +else: + config.substitutions.append( ('%run', "") ) + config.substitutions.append( ('%env ', "env ") ) + config.compile_wrapper = "" # Define CHECK-%os to check for OS-dependent output. config.substitutions.append( ('CHECK-%os', ("CHECK-" + config.host_os))) # Define %arch to check for architecture-dependent output. config.substitutions.append( ('%arch', (config.host_arch))) if config.host_os == 'Windows': # FIXME: This isn't quite right. Specifically, it will succeed if the program # does not crash but exits with a non-zero exit code. We ought to merge # KillTheDoctor and not --crash to make the latter more useful and remove the # need for this substitution. config.expect_crash = "not KillTheDoctor " else: config.expect_crash = "not --crash " config.substitutions.append( ("%expect_crash ", config.expect_crash) ) target_arch = getattr(config, 'target_arch', None) if target_arch: config.available_features.add(target_arch + '-target-arch') if target_arch in ['x86_64', 'i386', 'i686']: config.available_features.add('x86-target-arch') config.available_features.add(target_arch + '-' + config.host_os.lower()) compiler_rt_debug = getattr(config, 'compiler_rt_debug', False) if not compiler_rt_debug: config.available_features.add('compiler-rt-optimized') sanitizer_can_use_cxxabi = getattr(config, 'sanitizer_can_use_cxxabi', True) if sanitizer_can_use_cxxabi: config.available_features.add('cxxabi') if config.has_lld: config.available_features.add('lld-available') if config.use_lld: config.available_features.add('lld') if config.can_symbolize: config.available_features.add('can-symbolize') lit.util.usePlatformSdkOnDarwin(config, lit_config) if config.host_os == 'Darwin': osx_version = (10, 0, 0) try: osx_version = subprocess.check_output(["sw_vers", "-productVersion"]) osx_version = tuple(int(x) for x in osx_version.split('.')) if len(osx_version) == 2: osx_version = (osx_version[0], osx_version[1], 0) if osx_version >= (10, 11): config.available_features.add('osx-autointerception') config.available_features.add('osx-ld64-live_support') else: # The ASAN initialization-bug.cc test should XFAIL on OS X systems # older than El Capitan. By marking the test as being unsupported with # this "feature", we can pass the test on newer OS X versions and other # platforms. config.available_features.add('osx-no-ld64-live_support') except: pass config.darwin_osx_version = osx_version # Detect x86_64h try: output = subprocess.check_output(["sysctl", "hw.cpusubtype"]) output_re = re.match("^hw.cpusubtype: ([0-9]+)$", output) if output_re: cpu_subtype = int(output_re.group(1)) if cpu_subtype == 8: # x86_64h config.available_features.add('x86_64h') except: pass config.substitutions.append( ("%macos_min_target_10_11", "-mmacosx-version-min=10.11") ) else: config.substitutions.append( ("%macos_min_target_10_11", "") ) sancovcc_path = os.path.join(llvm_tools_dir, "sancov") if os.path.exists(sancovcc_path): config.available_features.add("has_sancovcc") config.substitutions.append( ("%sancovcc ", sancovcc_path) ) def is_darwin_lto_supported(): return os.path.exists(os.path.join(config.llvm_shlib_dir, 'libLTO.dylib')) def is_linux_lto_supported(): if config.use_lld: return True if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'LLVMgold.so')): return False ld_cmd = subprocess.Popen([config.gold_executable, '--help'], stdout = subprocess.PIPE, env={'LANG': 'C'}) ld_out = ld_cmd.stdout.read().decode() ld_cmd.wait() if not '-plugin' in ld_out: return False return True def is_windows_lto_supported(): return os.path.exists(os.path.join(config.llvm_tools_dir, 'lld-link.exe')) if config.host_os == 'Darwin' and is_darwin_lto_supported(): config.lto_supported = True config.lto_launch = ["env", "DYLD_LIBRARY_PATH=" + config.llvm_shlib_dir] config.lto_flags = [] elif config.host_os == 'Linux' and is_linux_lto_supported(): config.lto_supported = True config.lto_launch = [] if config.use_lld: config.lto_flags = ["-fuse-ld=lld"] else: config.lto_flags = ["-fuse-ld=gold"] elif config.host_os == 'Windows' and is_windows_lto_supported(): config.lto_supported = True config.lto_launch = [] # FIXME: Remove -nopdb when PDB writing is ready. config.lto_flags = ["-fuse-ld=lld -Wl,-nopdb"] else: config.lto_supported = False if config.lto_supported: config.available_features.add('lto') if config.use_thinlto: config.available_features.add('thinlto') config.lto_flags += ["-flto=thin"] else: config.lto_flags += ["-flto"] # Ask llvm-config about assertion mode. try: llvm_config_cmd = subprocess.Popen( [os.path.join(config.llvm_tools_dir, 'llvm-config'), '--assertion-mode'], stdout = subprocess.PIPE, env=config.environment) except OSError: print("Could not find llvm-config in " + llvm_tools_dir) exit(42) if re.search(r'ON', llvm_config_cmd.stdout.read().decode('ascii')): config.available_features.add('asserts') llvm_config_cmd.wait() # Sanitizer tests tend to be flaky on Windows due to PR24554, so add some # retries. We don't do this on otther platforms because it's slower. if platform.system() == 'Windows': config.test_retry_attempts = 2 # Only run up to 3 64-bit sanitized processes simultaneously on Darwin. # Using more scales badly and hogs the system due to inefficient handling # of large mmap'd regions (terabytes) by the kernel. if platform.system() == 'Darwin': lit_config.parallelism_groups["darwin-64bit-sanitizer"] = 3 Index: vendor/compiler-rt/dist/test/lit.common.configured.in =================================================================== --- vendor/compiler-rt/dist/test/lit.common.configured.in (revision 317686) +++ vendor/compiler-rt/dist/test/lit.common.configured.in (revision 317687) @@ -1,44 +1,46 @@ @LIT_SITE_CFG_IN_HEADER@ # Set attribute value if it is unset. def set_default(attr, value): if not getattr(config, attr, None): setattr(config, attr, value) # Generic config options for all compiler-rt lit tests. set_default("target_triple", "@COMPILER_RT_DEFAULT_TARGET_TRIPLE@") set_default("target_cflags", "@COMPILER_RT_TEST_COMPILER_CFLAGS@") set_default("host_arch", "@HOST_ARCH@") set_default("target_arch", "@COMPILER_RT_DEFAULT_TARGET_ARCH@") set_default("host_os", "@HOST_OS@") set_default("llvm_build_mode", "@LLVM_BUILD_MODE@") set_default("llvm_src_root", "@LLVM_MAIN_SRC_DIR@") set_default("llvm_obj_root", "@LLVM_BINARY_DIR@") set_default("compiler_rt_src_root", "@COMPILER_RT_SOURCE_DIR@") set_default("compiler_rt_obj_root", "@COMPILER_RT_BINARY_DIR@") set_default("llvm_tools_dir", "@LLVM_TOOLS_BINARY_DIR@") set_default("llvm_shlib_dir", "@LLVM_LIBRARY_OUTPUT_INTDIR@") set_default("gold_executable", "@GOLD_EXECUTABLE@") set_default("clang", "@COMPILER_RT_TEST_COMPILER@") set_default("compiler_id", "@COMPILER_RT_TEST_COMPILER_ID@") set_default("python_executable", "@PYTHON_EXECUTABLE@") set_default("compiler_rt_debug", @COMPILER_RT_DEBUG_PYBOOL@) set_default("compiler_rt_libdir", "@COMPILER_RT_LIBRARY_OUTPUT_DIR@") set_default("emulator", "@COMPILER_RT_EMULATOR@") +set_default("ios", False) +set_default("iossim", False) set_default("sanitizer_can_use_cxxabi", @SANITIZER_CAN_USE_CXXABI_PYBOOL@) set_default("has_lld", @COMPILER_RT_HAS_LLD_PYBOOL@) set_default("can_symbolize", @CAN_SYMBOLIZE@) set_default("use_lld", False) set_default("use_thinlto", False) config.available_features.add('target-is-%s' % config.target_arch) # LLVM tools dir can be passed in lit parameters, so try to # apply substitution. try: config.llvm_tools_dir = config.llvm_tools_dir % lit_config.params except KeyError as e: key, = e.args lit_config.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) # Setup attributes common for all compiler-rt projects. lit_config.load_config(config, "@COMPILER_RT_SOURCE_DIR@/test/lit.common.cfg") Index: vendor/compiler-rt/dist/test/sanitizer_common/TestCases/sanitizer_coverage_symbolize.cc =================================================================== --- vendor/compiler-rt/dist/test/sanitizer_common/TestCases/sanitizer_coverage_symbolize.cc (revision 317686) +++ vendor/compiler-rt/dist/test/sanitizer_common/TestCases/sanitizer_coverage_symbolize.cc (revision 317687) @@ -1,34 +1,28 @@ // Tests trace pc guard coverage collection. // // REQUIRES: x86_64-linux // XFAIL: tsan // // RUN: DIR=%t_workdir // RUN: rm -rf $DIR // RUN: mkdir -p $DIR // RUN: cd $DIR // RUN: %clangxx -O0 -fsanitize-coverage=trace-pc-guard %s -ldl -o %t // RUN: %env_tool_opts=coverage=1 %t 2>&1 | FileCheck %s -// RUN: %env_tool_opts=coverage=1 SANCOV_OPTIONS=symbolize=0 %t 2>&1 | FileCheck %s --check-prefix=CHECK-NOSYM // RUN: rm -rf $DIR #include int foo() { fprintf(stderr, "foo\n"); return 1; } int main() { fprintf(stderr, "main\n"); foo(); foo(); } // CHECK: main // CHECK: SanitizerCoverage: ./sanitizer_coverage_symbolize.{{.*}}.sancov 2 PCs written -// CHECK: call sancov - -// CHECK-NOSYM: main -// CHECK-NOSYM: SanitizerCoverage: ./sanitizer_coverage_symbolize.{{.*}}.sancov 2 PCs written -// CHECK-NOSYM-NOT: call sancov Index: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_compile.py =================================================================== --- vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_compile.py (nonexistent) +++ vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_compile.py (revision 317687) @@ -0,0 +1,32 @@ +#!/usr/bin/python + +import os, sys, subprocess + +output = None +output_type = 'executable' + +args = sys.argv[1:] +while args: + arg = args.pop(0) + if arg == '-shared': + output_type = 'shared' + elif arg == '-dynamiclib': + output_type = 'dylib' + elif arg == '-c': + output_type = 'object' + elif arg == '-S': + output_type = 'assembly' + elif arg == '-o': + output = args.pop(0) + +if output == None: + print "No output file name!" + sys.exit(1) + +ret = subprocess.call(sys.argv[1:]) +if ret != 0: + sys.exit(ret) + +# If we produce a dylib, ad-hoc sign it. +if output_type in ['shared', 'dylib']: + ret = subprocess.call(["codesign", "-s", "-", output]) Property changes on: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_compile.py ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_env.py =================================================================== --- vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_env.py (nonexistent) +++ vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_env.py (revision 317687) @@ -0,0 +1,17 @@ +#!/usr/bin/python + +import os, sys, subprocess + + +idx = 1 +for arg in sys.argv[1:]: + if not "=" in arg: + break + idx += 1 + (argname, argval) = arg.split("=") + os.environ["SIMCTL_CHILD_" + argname] = argval + +exitcode = subprocess.call(sys.argv[idx:]) +if exitcode > 125: + exitcode = 126 +sys.exit(exitcode) Property changes on: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_env.py ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py =================================================================== --- vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py (nonexistent) +++ vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py (revision 317687) @@ -0,0 +1,17 @@ +#!/usr/bin/python + +import os, sys, subprocess + + +if not "SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER" in os.environ: + raise EnvironmentError("Specify SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER to select which simulator to use.") + +device_id = os.environ["SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER"] + +if "ASAN_OPTIONS" in os.environ: + os.environ["SIMCTL_CHILD_ASAN_OPTIONS"] = os.environ["ASAN_OPTIONS"] + +exitcode = subprocess.call(["xcrun", "simctl", "spawn", device_id] + sys.argv[1:]) +if exitcode > 125: + exitcode = 126 +sys.exit(exitcode) Property changes on: vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm (revision 317686) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm (revision 317687) @@ -1,37 +1,39 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %run %t 2>&1 | FileCheck %s +// XFAIL: ios + #import #import long global; int main(int argc, const char *argv[]) { fprintf(stderr, "Hello world.\n"); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); xpc_connection_t server_conn = xpc_connection_create(NULL, server_q); xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { global = 43; dispatch_async(dispatch_get_main_queue(), ^{ CFRunLoopStop(CFRunLoopGetCurrent()); }); } }); xpc_connection_resume(server_conn); global = 42; xpc_connection_cancel(server_conn); CFRunLoopRun(); fprintf(stderr, "Done.\n"); } // CHECK: Hello world. // CHECK-NOT: WARNING: ThreadSanitizer // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm (revision 317686) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm (revision 317687) @@ -1,81 +1,83 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %deflake %run %t 2>&1 | FileCheck %s +// XFAIL: ios + #import #import #import "../test.h" long global; long received_msgs; xpc_connection_t server_conn; xpc_connection_t client_conns[2]; int main(int argc, const char *argv[]) { @autoreleasepool { NSLog(@"Hello world."); barrier_init(&barrier, 2); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); server_conn = xpc_connection_create(NULL, server_q); xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { NSLog(@"server event handler, client = %@", client); if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { return; } xpc_connection_set_event_handler(client, ^(xpc_object_t object) { NSLog(@"received message: %@", object); barrier_wait(&barrier); global = 42; dispatch_sync(dispatch_get_main_queue(), ^{ received_msgs++; if (received_msgs >= 2) { xpc_connection_cancel(client_conns[0]); xpc_connection_cancel(client_conns[1]); xpc_connection_cancel(server_conn); CFRunLoopStop(CFRunLoopGetCurrent()); } }); }); xpc_connection_resume(client); }); xpc_connection_resume(server_conn); xpc_endpoint_t endpoint = xpc_endpoint_create(server_conn); for (int i = 0; i < 2; i++) { client_conns[i] = xpc_connection_create_from_endpoint(endpoint); xpc_connection_set_event_handler(client_conns[i], ^(xpc_object_t event) { NSLog(@"client event handler, event = %@", event); }); xpc_object_t msg = xpc_dictionary_create(NULL, NULL, 0); xpc_dictionary_set_string(msg, "hello", "world"); NSLog(@"sending message: %@", msg); xpc_connection_send_message(client_conns[i], msg); xpc_connection_resume(client_conns[i]); } CFRunLoopRun(); NSLog(@"Done."); } return 0; } // CHECK: Hello world. // CHECK: WARNING: ThreadSanitizer: data race // CHECK: Write of size 8 -// CHECK: #0 {{.*}}xpc-race.mm:34 +// CHECK: #0 {{.*}}xpc-race.mm:36 // CHECK: Previous write of size 8 -// CHECK: #0 {{.*}}xpc-race.mm:34 +// CHECK: #0 {{.*}}xpc-race.mm:36 // CHECK: Location is global 'global' // CHECK: Done. Index: vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm =================================================================== --- vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm (revision 317686) +++ vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm (revision 317687) @@ -1,74 +1,76 @@ // RUN: %clang_tsan %s -o %t -framework Foundation // RUN: %run %t 2>&1 | FileCheck %s +// XFAIL: ios + #import #import long global; int main(int argc, const char *argv[]) { @autoreleasepool { NSLog(@"Hello world."); dispatch_queue_t server_q = dispatch_queue_create("server.queue", DISPATCH_QUEUE_CONCURRENT); dispatch_queue_t client_q = dispatch_queue_create("client.queue", DISPATCH_QUEUE_CONCURRENT); xpc_connection_t server_conn = xpc_connection_create(NULL, server_q); global = 42; xpc_connection_set_event_handler(server_conn, ^(xpc_object_t client) { NSLog(@"global = %ld", global); NSLog(@"server event handler, client = %@", client); if (client == XPC_ERROR_CONNECTION_INTERRUPTED || client == XPC_ERROR_CONNECTION_INVALID) { return; } xpc_connection_set_event_handler(client, ^(xpc_object_t object) { NSLog(@"received message: %@", object); xpc_object_t reply = xpc_dictionary_create_reply(object); if (!reply) return; xpc_dictionary_set_string(reply, "reply", "value"); xpc_connection_t remote = xpc_dictionary_get_remote_connection(object); xpc_connection_send_message(remote, reply); }); xpc_connection_resume(client); }); xpc_connection_resume(server_conn); xpc_endpoint_t endpoint = xpc_endpoint_create(server_conn); xpc_connection_t client_conn = xpc_connection_create_from_endpoint(endpoint); xpc_connection_set_event_handler(client_conn, ^(xpc_object_t event) { NSLog(@"client event handler, event = %@", event); }); xpc_object_t msg = xpc_dictionary_create(NULL, NULL, 0); xpc_dictionary_set_string(msg, "hello", "world"); NSLog(@"sending message: %@", msg); xpc_connection_send_message_with_reply( client_conn, msg, client_q, ^(xpc_object_t object) { NSLog(@"received reply: %@", object); xpc_connection_cancel(client_conn); xpc_connection_cancel(server_conn); dispatch_sync(dispatch_get_main_queue(), ^{ CFRunLoopStop(CFRunLoopGetCurrent()); }); }); xpc_connection_resume(client_conn); CFRunLoopRun(); NSLog(@"Done."); } return 0; } // CHECK: Done. // CHECK-NOT: WARNING: ThreadSanitizer Index: vendor/compiler-rt/dist/test/tsan/ignore_lib1.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/ignore_lib1.cc (revision 317686) +++ vendor/compiler-rt/dist/test/tsan/ignore_lib1.cc (revision 317687) @@ -1,44 +1,47 @@ // RUN: %clangxx_tsan -O1 %s -DLIB -fPIC -fno-sanitize=thread -shared -o %T/libignore_lib1.so // RUN: %clangxx_tsan -O1 %s -o %t // RUN: echo running w/o suppressions: // RUN: %deflake %run %t | FileCheck %s --check-prefix=CHECK-NOSUPP // RUN: echo running with suppressions: // RUN: %env_tsan_opts=suppressions='%s.supp' %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-WITHSUPP // Tests that interceptors coming from a dynamically loaded library specified // in called_from_lib suppression are ignored. // REQUIRES: stable-runtime +// UNSUPPORTED: powerpc64le +// FIXME: This test regularly fails on powerpc64 LE possibly starting with +// r279664. Re-enable the test once the problem(s) have been fixed. #ifndef LIB #include #include #include #include #include #include int main(int argc, char **argv) { std::string lib = std::string(dirname(argv[0])) + "/libignore_lib1.so"; void *h = dlopen(lib.c_str(), RTLD_GLOBAL | RTLD_NOW); if (h == 0) exit(printf("failed to load the library (%d)\n", errno)); void (*f)() = (void(*)())dlsym(h, "libfunc"); if (f == 0) exit(printf("failed to find the func (%d)\n", errno)); f(); } #else // #ifdef LIB #include "ignore_lib_lib.h" #endif // #ifdef LIB // CHECK-NOSUPP: WARNING: ThreadSanitizer: data race // CHECK-NOSUPP: OK // CHECK-WITHSUPP-NOT: WARNING: ThreadSanitizer: data race // CHECK-WITHSUPP: OK Index: vendor/compiler-rt/dist/test/tsan/ignore_lib5.cc =================================================================== --- vendor/compiler-rt/dist/test/tsan/ignore_lib5.cc (revision 317686) +++ vendor/compiler-rt/dist/test/tsan/ignore_lib5.cc (revision 317687) @@ -1,75 +1,78 @@ // RUN: %clangxx_tsan -O1 %s -DLIB -fPIC -fno-sanitize=thread -shared -o %T/libignore_lib1.so // RUN: %clangxx_tsan -O1 %s -o %t // RUN: echo running w/o suppressions: // RUN: %deflake %run %t | FileCheck %s --check-prefix=CHECK-NOSUPP // RUN: echo running with suppressions: // RUN: %env_tsan_opts=suppressions='%s.supp' %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-WITHSUPP // REQUIRES: stable-runtime +// UNSUPPORTED: powerpc64le +// FIXME: This test occasionally fails on powerpc64 LE possibly starting with +// r279664. Re-enable the test once the problem(s) have been fixed. // Previously the test episodically failed with: // ThreadSanitizer: called_from_lib suppression '/libignore_lib1.so$' is // matched against 2 libraries: '/libignore_lib1.so' and '/libignore_lib1.so' // This was caused by non-atomicity of reading of /proc/self/maps. #ifndef LIB #include #include #include #include #include #include #include #include "test.h" #ifndef MAP_32BIT # define MAP_32BIT 0 #endif #ifdef __APPLE__ # define TSAN_MAP_ANON MAP_ANON #else # define TSAN_MAP_ANON MAP_ANONYMOUS #endif void *thr(void *arg) { // This thread creates lots of separate mappings in /proc/self/maps before // the ignored library. for (int i = 0; i < 10000; i++) { if (i == 5000) barrier_wait(&barrier); mmap(0, 4096, PROT_READ, TSAN_MAP_ANON | MAP_PRIVATE | MAP_32BIT, -1 , 0); mmap(0, 4096, PROT_WRITE, TSAN_MAP_ANON | MAP_PRIVATE | MAP_32BIT, -1 , 0); } return 0; } int main(int argc, char **argv) { barrier_init(&barrier, 2); pthread_t th; pthread_create(&th, 0, thr, 0); barrier_wait(&barrier); std::string lib = std::string(dirname(argv[0])) + "/libignore_lib1.so"; void *h = dlopen(lib.c_str(), RTLD_GLOBAL | RTLD_NOW); if (h == 0) exit(printf("failed to load the library (%d)\n", errno)); void (*f)() = (void(*)())dlsym(h, "libfunc"); if (f == 0) exit(printf("failed to find the func (%d)\n", errno)); pthread_join(th, 0); f(); } #else // #ifdef LIB #include "ignore_lib_lib.h" #endif // #ifdef LIB // CHECK-NOSUPP: WARNING: ThreadSanitizer: data race // CHECK-NOSUPP: OK // CHECK-WITHSUPP-NOT: WARNING: ThreadSanitizer: data race // CHECK-WITHSUPP: OK Index: vendor/compiler-rt/dist/test/ubsan/TestCases/Float/cast-overflow.cpp =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/Float/cast-overflow.cpp (revision 317686) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/Float/cast-overflow.cpp (revision 317687) @@ -1,151 +1,151 @@ // RUN: %clangxx -fsanitize=float-cast-overflow %s -o %t // RUN: %run %t _ // RUN: %env_ubsan_opts=print_summary=1:report_error_type=1 %run %t 0 2>&1 | FileCheck %s --check-prefix=CHECK-0 // RUN: %run %t 1 2>&1 | FileCheck %s --check-prefix=CHECK-1 // RUN: %run %t 2 2>&1 | FileCheck %s --check-prefix=CHECK-2 // RUN: %run %t 3 2>&1 | FileCheck %s --check-prefix=CHECK-3 // RUN: %run %t 4 2>&1 | FileCheck %s --check-prefix=CHECK-4 // RUN: %run %t 5 2>&1 | FileCheck %s --check-prefix=CHECK-5 // RUN: %run %t 6 2>&1 | FileCheck %s --check-prefix=CHECK-6 // FIXME: %run %t 7 2>&1 | FileCheck %s --check-prefix=CHECK-7 // FIXME: not %run %t 8 2>&1 | FileCheck %s --check-prefix=CHECK-8 // RUN: not %run %t 9 2>&1 | FileCheck %s --check-prefix=CHECK-9 // This test assumes float and double are IEEE-754 single- and double-precision. #if defined(__APPLE__) # include # define BYTE_ORDER __DARWIN_BYTE_ORDER # define BIG_ENDIAN __DARWIN_BIG_ENDIAN # define LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN #elif defined(__FreeBSD__) # include # define BYTE_ORDER _BYTE_ORDER # define BIG_ENDIAN _BIG_ENDIAN # define LITTLE_ENDIAN _LITTLE_ENDIAN #elif defined(_WIN32) # define BYTE_ORDER 0 # define BIG_ENDIAN 1 # define LITTLE_ENDIAN 0 #else # include # define BYTE_ORDER __BYTE_ORDER # define BIG_ENDIAN __BIG_ENDIAN # define LITTLE_ENDIAN __LITTLE_ENDIAN #endif // __APPLE__ #include #include #include float Inf; float NaN; int main(int argc, char **argv) { float MaxFloatRepresentableAsInt = 0x7fffff80; (int)MaxFloatRepresentableAsInt; // ok (int)-MaxFloatRepresentableAsInt; // ok float MinFloatRepresentableAsInt = -0x7fffffff - 1; (int)MinFloatRepresentableAsInt; // ok float MaxFloatRepresentableAsUInt = 0xffffff00u; (unsigned int)MaxFloatRepresentableAsUInt; // ok #ifdef __SIZEOF_INT128__ unsigned __int128 FloatMaxAsUInt128 = -((unsigned __int128)1 << 104); (void)(float)FloatMaxAsUInt128; // ok #endif float NearlyMinusOne = -0.99999; unsigned Zero = NearlyMinusOne; // ok // Build a '+Inf'. #if BYTE_ORDER == LITTLE_ENDIAN unsigned char InfVal[] = { 0x00, 0x00, 0x80, 0x7f }; #else unsigned char InfVal[] = { 0x7f, 0x80, 0x00, 0x00 }; #endif float Inf; memcpy(&Inf, InfVal, 4); // Build a 'NaN'. #if BYTE_ORDER == LITTLE_ENDIAN unsigned char NaNVal[] = { 0x01, 0x00, 0x80, 0x7f }; #else unsigned char NaNVal[] = { 0x7f, 0x80, 0x00, 0x01 }; #endif float NaN; memcpy(&NaN, NaNVal, 4); double DblInf = (double)Inf; // ok switch (argv[1][0]) { // FIXME: Produce a source location for these checks and test for it here. // Floating point -> integer overflow. case '0': { // Note that values between 0x7ffffe00 and 0x80000000 may or may not // successfully round-trip, depending on the rounding mode. - // CHECK-0: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: value 2.14748{{.*}} is outside the range of representable values of type 'int' + // CHECK-0: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: 2.14748{{.*}} is outside the range of representable values of type 'int' static int test_int = MaxFloatRepresentableAsInt + 0x80; // CHECK-0: SUMMARY: {{.*}}Sanitizer: float-cast-overflow {{.*}}cast-overflow.cpp:[[@LINE-1]] return 0; } case '1': { - // CHECK-1: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: value -2.14748{{.*}} is outside the range of representable values of type 'int' + // CHECK-1: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: -2.14748{{.*}} is outside the range of representable values of type 'int' static int test_int = MinFloatRepresentableAsInt - 0x100; return 0; } case '2': { - // CHECK-2: {{.*}}cast-overflow.cpp:[[@LINE+2]]:37: runtime error: value -1 is outside the range of representable values of type 'unsigned int' + // CHECK-2: {{.*}}cast-overflow.cpp:[[@LINE+2]]:37: runtime error: -1 is outside the range of representable values of type 'unsigned int' volatile float f = -1.0; volatile unsigned u = (unsigned)f; return 0; } case '3': { - // CHECK-3: {{.*}}cast-overflow.cpp:[[@LINE+1]]:37: runtime error: value 4.2949{{.*}} is outside the range of representable values of type 'unsigned int' + // CHECK-3: {{.*}}cast-overflow.cpp:[[@LINE+1]]:37: runtime error: 4.2949{{.*}} is outside the range of representable values of type 'unsigned int' static int test_int = (unsigned)(MaxFloatRepresentableAsUInt + 0x100); return 0; } case '4': { - // CHECK-4: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: value {{.*}} is outside the range of representable values of type 'int' + // CHECK-4: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: {{.*}} is outside the range of representable values of type 'int' static int test_int = Inf; return 0; } case '5': { - // CHECK-5: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: value {{.*}} is outside the range of representable values of type 'int' + // CHECK-5: {{.*}}cast-overflow.cpp:[[@LINE+1]]:27: runtime error: {{.*}} is outside the range of representable values of type 'int' static int test_int = NaN; return 0; } // Integer -> floating point overflow. case '6': { - // CHECK-6: cast-overflow.cpp:[[@LINE+2]]:{{34: runtime error: value 0xffffff00000000000000000000000001 is outside the range of representable values of type 'float'| __int128 not supported}} + // CHECK-6: cast-overflow.cpp:[[@LINE+2]]:{{34: runtime error: 0xffffff00000000000000000000000001 is outside the range of representable values of type 'float'| __int128 not supported}} #if defined(__SIZEOF_INT128__) && !defined(_WIN32) static int test_int = (float)(FloatMaxAsUInt128 + 1); return 0; #else // Print the same line as the check above. That way the test is robust to // line changes around it printf("%s:%d: __int128 not supported", __FILE__, __LINE__ - 5); return 0; #endif } // FIXME: The backend cannot lower __fp16 operations on x86 yet. //case '7': // (__fp16)65504; // ok - // // CHECK-7: runtime error: value 65505 is outside the range of representable values of type '__fp16' + // // CHECK-7: runtime error: 65505 is outside the range of representable values of type '__fp16' // return (__fp16)65505; // Floating point -> floating point overflow. case '8': - // CHECK-8: {{.*}}cast-overflow.cpp:[[@LINE+1]]:19: runtime error: value 1e+39 is outside the range of representable values of type 'float' + // CHECK-8: {{.*}}cast-overflow.cpp:[[@LINE+1]]:19: runtime error: 1e+39 is outside the range of representable values of type 'float' return (float)1e39; case '9': volatile long double ld = 300.0; - // CHECK-9: {{.*}}cast-overflow.cpp:[[@LINE+1]]:14: runtime error: value 300 is outside the range of representable values of type 'char' + // CHECK-9: {{.*}}cast-overflow.cpp:[[@LINE+1]]:14: runtime error: 300 is outside the range of representable values of type 'char' char c = ld; return c; } } Index: vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/log-path_test.cc =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/log-path_test.cc (revision 317686) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/log-path_test.cc (revision 317687) @@ -1,36 +1,36 @@ // FIXME: https://code.google.com/p/address-sanitizer/issues/detail?id=316 // XFAIL: android // The globs below do not work in the lit shell. // REQUIRES: shell // RUN: %clangxx -fsanitize=undefined %s -O1 -o %t // Regular run. // RUN: %run %t -4 2> %t.out // RUN: FileCheck %s --check-prefix=CHECK-ERROR < %t.out // Good log_path. // RUN: rm -f %t.log.* // RUN: %env_ubsan_opts=log_path='"%t.log"' %run %t -4 2> %t.out // RUN: FileCheck %s --check-prefix=CHECK-ERROR < %t.log.* // Run w/o errors should not produce any log. // RUN: rm -f %t.log.* // RUN: %env_ubsan_opts=log_path='"%t.log"' %run %t 4 // RUN: not cat %t.log.* // FIXME: log_path is not supported on Windows yet. // XFAIL: win32 #include #include int main(int argc, char *argv[]) { double a = atof(argv[1]); unsigned int ai = (unsigned int) a; printf("%f %u\n", a, ai); return 0; } -// CHECK-ERROR: runtime error: value -4 is outside the range of representable values of type 'unsigned int' +// CHECK-ERROR: runtime error: -4 is outside the range of representable values of type 'unsigned int' Index: vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/missing_return.cpp =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/missing_return.cpp (revision 317686) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/missing_return.cpp (revision 317687) @@ -1,15 +1,12 @@ // RUN: %clangxx -fsanitize=return -g %s -O3 -o %t // RUN: not %run %t 2>&1 | FileCheck %s -// RUN: %env_ubsan_opts=print_stacktrace=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%os-STACKTRACE +// RUN: %env_ubsan_opts=print_stacktrace=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-STACKTRACE // CHECK: missing_return.cpp:[[@LINE+1]]:5: runtime error: execution reached the end of a value-returning function without returning a value int f() { -// Slow stack unwinding is not available on Darwin for now, see -// https://code.google.com/p/address-sanitizer/issues/detail?id=137 -// CHECK-Linux-STACKTRACE: #0 {{.*}}f(){{.*}}missing_return.cpp:[[@LINE-3]] -// CHECK-FreeBSD-STACKTRACE: #0 {{.*}}f(void){{.*}}missing_return.cpp:[[@LINE-4]] +// CHECK-STACKTRACE: #0 {{.*}}f{{.*}}missing_return.cpp:[[@LINE-1]] } int main(int, char **argv) { return f(); } Index: vendor/compiler-rt/dist/test/ubsan/TestCases/TypeCheck/misaligned.cpp =================================================================== --- vendor/compiler-rt/dist/test/ubsan/TestCases/TypeCheck/misaligned.cpp (revision 317686) +++ vendor/compiler-rt/dist/test/ubsan/TestCases/TypeCheck/misaligned.cpp (revision 317687) @@ -1,108 +1,104 @@ // FIXME: This test currently fails on Windows because we use the MSVC linker, // which throws away DWARF debug info. // XFAIL: win32 // // RUN: %clangxx -fsanitize=alignment -g %s -O3 -o %t // RUN: %run %t l0 && %run %t s0 && %run %t r0 && %run %t m0 && %run %t f0 && %run %t n0 && %run %t u0 // RUN: %run %t l1 2>&1 | FileCheck %s --check-prefix=CHECK-LOAD --strict-whitespace // RUN: %run %t s1 2>&1 | FileCheck %s --check-prefix=CHECK-STORE // RUN: %run %t r1 2>&1 | FileCheck %s --check-prefix=CHECK-REFERENCE // RUN: %run %t m1 2>&1 | FileCheck %s --check-prefix=CHECK-MEMBER // RUN: %run %t f1 2>&1 | FileCheck %s --check-prefix=CHECK-MEMFUN // RUN: %run %t n1 2>&1 | FileCheck %s --check-prefix=CHECK-NEW // RUN: %run %t u1 2>&1 | FileCheck %s --check-prefix=CHECK-UPCAST -// RUN: %env_ubsan_opts=print_stacktrace=1 %run %t l1 2>&1 | FileCheck %s --check-prefix=CHECK-LOAD --check-prefix=CHECK-%os-STACK-LOAD +// RUN: %env_ubsan_opts=print_stacktrace=1 %run %t l1 2>&1 | FileCheck %s --check-prefix=CHECK-LOAD --check-prefix=CHECK-STACK-LOAD // RUN: %clangxx -fsanitize=alignment -fno-sanitize-recover=alignment %s -O3 -o %t // RUN: not %run %t w1 2>&1 | FileCheck %s --check-prefix=CHECK-WILD #include struct S { S() {} int f() { return 0; } int k; }; struct T : S { int t; }; int main(int, char **argv) { char c[] __attribute__((aligned(8))) = { 0, 0, 0, 0, 1, 2, 3, 4, 5 }; // Pointer value may be unspecified here, but behavior is not undefined. int *p = (int*)&c[4 + argv[1][1] - '0']; S *s = (S*)p; T *t = (T*)p; void *wild = reinterpret_cast(0x123L); (void)*p; // ok! switch (argv[1][0]) { case 'l': // CHECK-LOAD: misaligned.cpp:[[@LINE+4]]{{(:12)?}}: runtime error: load of misaligned address [[PTR:0x[0-9a-f]*]] for type 'int', which requires 4 byte alignment // CHECK-LOAD-NEXT: [[PTR]]: note: pointer points here // CHECK-LOAD-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-LOAD-NEXT: {{^ \^}} return *p && 0; - // Slow stack unwinding is disabled on Darwin for now, see - // https://code.google.com/p/address-sanitizer/issues/detail?id=137 - // CHECK-Linux-STACK-LOAD: #0 {{.*}}main{{.*}}misaligned.cpp - // Check for the already checked line to avoid lit error reports. - // CHECK-Darwin-STACK-LOAD: {{ }} + // CHECK-STACK-LOAD: #0 {{.*}}main{{.*}}misaligned.cpp case 's': // CHECK-STORE: misaligned.cpp:[[@LINE+4]]{{(:5)?}}: runtime error: store to misaligned address [[PTR:0x[0-9a-f]*]] for type 'int', which requires 4 byte alignment // CHECK-STORE-NEXT: [[PTR]]: note: pointer points here // CHECK-STORE-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-STORE-NEXT: {{^ \^}} *p = 1; break; case 'r': // CHECK-REFERENCE: misaligned.cpp:[[@LINE+4]]{{(:(5|15))?}}: runtime error: reference binding to misaligned address [[PTR:0x[0-9a-f]*]] for type 'int', which requires 4 byte alignment // CHECK-REFERENCE-NEXT: [[PTR]]: note: pointer points here // CHECK-REFERENCE-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-REFERENCE-NEXT: {{^ \^}} {int &r = *p;} break; case 'm': // CHECK-MEMBER: misaligned.cpp:[[@LINE+4]]{{(:15)?}}: runtime error: member access within misaligned address [[PTR:0x[0-9a-f]*]] for type 'S', which requires 4 byte alignment // CHECK-MEMBER-NEXT: [[PTR]]: note: pointer points here // CHECK-MEMBER-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-MEMBER-NEXT: {{^ \^}} return s->k && 0; case 'f': // CHECK-MEMFUN: misaligned.cpp:[[@LINE+4]]{{(:15)?}}: runtime error: member call on misaligned address [[PTR:0x[0-9a-f]*]] for type 'S', which requires 4 byte alignment // CHECK-MEMFUN-NEXT: [[PTR]]: note: pointer points here // CHECK-MEMFUN-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-MEMFUN-NEXT: {{^ \^}} return s->f() && 0; case 'n': // CHECK-NEW: misaligned.cpp:[[@LINE+4]]{{(:21)?}}: runtime error: constructor call on misaligned address [[PTR:0x[0-9a-f]*]] for type 'S', which requires 4 byte alignment // CHECK-NEW-NEXT: [[PTR]]: note: pointer points here // CHECK-NEW-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-NEW-NEXT: {{^ \^}} return (new (s) S)->k && 0; case 'u': { // CHECK-UPCAST: misaligned.cpp:[[@LINE+4]]{{(:17)?}}: runtime error: upcast of misaligned address [[PTR:0x[0-9a-f]*]] for type 'T', which requires 4 byte alignment // CHECK-UPCAST-NEXT: [[PTR]]: note: pointer points here // CHECK-UPCAST-NEXT: {{^ 00 00 00 01 02 03 04 05}} // CHECK-UPCAST-NEXT: {{^ \^}} S *s2 = (S*)t; return s2->f(); } case 'w': // CHECK-WILD: misaligned.cpp:[[@LINE+3]]{{(:35)?}}: runtime error: member access within misaligned address 0x{{0+}}123 for type 'S', which requires 4 byte alignment // CHECK-WILD-NEXT: 0x{{0+}}123: note: pointer points here // CHECK-WILD-NEXT: return static_cast(wild)->k; } }