Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: vendor/llvm/dist-release_80/CMakeLists.txt
===================================================================
--- vendor/llvm/dist-release_80/CMakeLists.txt (revision 344165)
+++ vendor/llvm/dist-release_80/CMakeLists.txt (revision 344166)
@@ -1,1079 +1,1082 @@
# See docs/CMake.html for instructions about how to build LLVM with CMake.
cmake_minimum_required(VERSION 3.4.3)
if(POLICY CMP0068)
cmake_policy(SET CMP0068 NEW)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
endif()
if(POLICY CMP0075)
cmake_policy(SET CMP0075 NEW)
endif()
if(NOT DEFINED LLVM_VERSION_MAJOR)
set(LLVM_VERSION_MAJOR 8)
endif()
if(NOT DEFINED LLVM_VERSION_MINOR)
set(LLVM_VERSION_MINOR 0)
endif()
if(NOT DEFINED LLVM_VERSION_PATCH)
set(LLVM_VERSION_PATCH 0)
endif()
if(NOT DEFINED LLVM_VERSION_SUFFIX)
set(LLVM_VERSION_SUFFIX "")
endif()
if (NOT PACKAGE_VERSION)
set(PACKAGE_VERSION
"${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}${LLVM_VERSION_SUFFIX}")
endif()
if ((CMAKE_GENERATOR MATCHES "Visual Studio") AND (CMAKE_GENERATOR_TOOLSET STREQUAL ""))
message(WARNING "Visual Studio generators use the x86 host compiler by "
"default, even for 64-bit targets. This can result in linker "
"instability and out of memory errors. To use the 64-bit "
"host compiler, pass -Thost=x64 on the CMake command line.")
endif()
project(LLVM
VERSION ${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}
LANGUAGES C CXX ASM)
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "No build type selected, default to Debug")
set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build type (default Debug)" FORCE)
endif()
# This should only apply if you are both on an Apple host, and targeting Apple.
if(CMAKE_HOST_APPLE AND APPLE)
# if CMAKE_LIBTOOL is not set, try and find it with xcrun or find_program
if(NOT CMAKE_LIBTOOL)
if(NOT CMAKE_XCRUN)
find_program(CMAKE_XCRUN NAMES xcrun)
endif()
if(CMAKE_XCRUN)
execute_process(COMMAND ${CMAKE_XCRUN} -find libtool
OUTPUT_VARIABLE CMAKE_LIBTOOL
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
if(NOT CMAKE_LIBTOOL OR NOT EXISTS CMAKE_LIBTOOL)
find_program(CMAKE_LIBTOOL NAMES libtool)
endif()
endif()
get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES)
if(CMAKE_LIBTOOL)
set(CMAKE_LIBTOOL ${CMAKE_LIBTOOL} CACHE PATH "libtool executable")
message(STATUS "Found libtool - ${CMAKE_LIBTOOL}")
execute_process(COMMAND ${CMAKE_LIBTOOL} -V
OUTPUT_VARIABLE LIBTOOL_V_OUTPUT
OUTPUT_STRIP_TRAILING_WHITESPACE)
if("${LIBTOOL_V_OUTPUT}" MATCHES ".*cctools-([0-9.]+).*")
string(REGEX REPLACE ".*cctools-([0-9.]+).*" "\\1" LIBTOOL_VERSION
${LIBTOOL_V_OUTPUT})
if(NOT LIBTOOL_VERSION VERSION_LESS "862")
set(LIBTOOL_NO_WARNING_FLAG "-no_warning_for_no_symbols")
endif()
endif()
foreach(lang ${languages})
set(CMAKE_${lang}_CREATE_STATIC_LIBRARY
"\"${CMAKE_LIBTOOL}\" -static ${LIBTOOL_NO_WARNING_FLAG} -o <TARGET> \
<LINK_FLAGS> <OBJECTS> ")
endforeach()
endif()
# If DYLD_LIBRARY_PATH is set we need to set it on archiver commands
if(DYLD_LIBRARY_PATH)
set(dyld_envar "DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}")
foreach(lang ${languages})
foreach(cmd ${CMAKE_${lang}_CREATE_STATIC_LIBRARY})
list(APPEND CMAKE_${lang}_CREATE_STATIC_LIBRARY_NEW
"${dyld_envar} ${cmd}")
endforeach()
set(CMAKE_${lang}_CREATE_STATIC_LIBRARY
${CMAKE_${lang}_CREATE_STATIC_LIBRARY_NEW})
endforeach()
endif()
endif()
# Side-by-side subprojects layout: automatically set the
# LLVM_EXTERNAL_${project}_SOURCE_DIR using LLVM_ALL_PROJECTS
# This allows an easy way of setting up a build directory for llvm and another
# one for llvm+clang+... using the same sources.
set(LLVM_ALL_PROJECTS "clang;libcxx;libcxxabi;libunwind;lldb;compiler-rt;lld;polly;debuginfo-tests")
set(LLVM_ENABLE_PROJECTS "" CACHE STRING
"Semicolon-separated list of projects to build (${LLVM_ALL_PROJECTS}), or \"all\".")
if( LLVM_ENABLE_PROJECTS STREQUAL "all" )
set( LLVM_ENABLE_PROJECTS ${LLVM_ALL_PROJECTS})
endif()
foreach(proj ${LLVM_ENABLE_PROJECTS})
set(PROJ_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${proj}")
if(NOT EXISTS "${PROJ_DIR}" OR NOT IS_DIRECTORY "${PROJ_DIR}")
message(FATAL_ERROR "LLVM_ENABLE_PROJECTS requests ${proj} but directory not found: ${PROJ_DIR}")
endif()
string(TOUPPER "${proj}" upper_proj)
STRING(REGEX REPLACE "-" "_" upper_proj ${upper_proj})
set(LLVM_EXTERNAL_${upper_proj}_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${proj}")
# There is a widely spread opinion that clang-tools-extra should be merged
# into clang. The following simulates it by always enabling clang-tools-extra
# when enabling clang.
if (proj STREQUAL "clang")
set(LLVM_EXTERNAL_CLANG_TOOLS_EXTRA_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../clang-tools-extra")
endif()
endforeach()
# Build llvm with ccache if the package is present
set(LLVM_CCACHE_BUILD OFF CACHE BOOL "Set to ON for a ccache enabled build")
if(LLVM_CCACHE_BUILD)
find_program(CCACHE_PROGRAM ccache)
if(CCACHE_PROGRAM)
set(LLVM_CCACHE_MAXSIZE "" CACHE STRING "Size of ccache")
set(LLVM_CCACHE_DIR "" CACHE STRING "Directory to keep ccached data")
set(LLVM_CCACHE_PARAMS "CCACHE_CPP2=yes CCACHE_HASHDIR=yes"
CACHE STRING "Parameters to pass through to ccache")
set(CCACHE_PROGRAM "${LLVM_CCACHE_PARAMS} ${CCACHE_PROGRAM}")
if (LLVM_CCACHE_MAXSIZE)
set(CCACHE_PROGRAM "CCACHE_MAXSIZE=${LLVM_CCACHE_MAXSIZE} ${CCACHE_PROGRAM}")
endif()
if (LLVM_CCACHE_DIR)
set(CCACHE_PROGRAM "CCACHE_DIR=${LLVM_CCACHE_DIR} ${CCACHE_PROGRAM}")
endif()
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_PROGRAM})
else()
message(FATAL_ERROR "Unable to find the program ccache. Set LLVM_CCACHE_BUILD to OFF")
endif()
endif()
option(LLVM_DEPENDENCY_DEBUGGING "Dependency debugging mode to verify correctly expressed library dependencies (Darwin only)" OFF)
# Some features of the LLVM build may be disallowed when dependency debugging is
# enabled. In particular you cannot use ccache because we want to force compile
# operations to always happen.
if(LLVM_DEPENDENCY_DEBUGGING)
if(NOT CMAKE_HOST_APPLE)
message(FATAL_ERROR "Dependency debugging is only currently supported on Darwin hosts.")
endif()
if(LLVM_CCACHE_BUILD)
message(FATAL_ERROR "Cannot enable dependency debugging while using ccache.")
endif()
endif()
option(LLVM_ENABLE_DAGISEL_COV "Debug: Prints tablegen patterns that were used for selecting" OFF)
option(LLVM_ENABLE_GISEL_COV "Enable collection of GlobalISel rule coverage" OFF)
if(LLVM_ENABLE_GISEL_COV)
set(LLVM_GISEL_COV_PREFIX "${CMAKE_BINARY_DIR}/gisel-coverage-" CACHE STRING "Provide a filename prefix to collect the GlobalISel rule coverage")
endif()
# Add path for custom modules
set(CMAKE_MODULE_PATH
${CMAKE_MODULE_PATH}
"${CMAKE_CURRENT_SOURCE_DIR}/cmake"
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules"
)
# Generate a CompilationDatabase (compile_commands.json file) for our build,
# for use by clang_complete, YouCompleteMe, etc.
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
option(LLVM_INSTALL_BINUTILS_SYMLINKS
"Install symlinks from the binutils tool names to the corresponding LLVM tools." OFF)
option(LLVM_INSTALL_UTILS "Include utility binaries in the 'install' target." OFF)
option(LLVM_INSTALL_TOOLCHAIN_ONLY "Only include toolchain files in the 'install' target." OFF)
# Unfortunatly Clang is too eager to search directories for module maps, which can cause the
# installed version of the maps to be found when building LLVM from source. Therefore we turn off
# the installation by default. See llvm.org/PR31905.
option(LLVM_INSTALL_MODULEMAPS "Install the modulemap files in the 'install' target." OFF)
option(LLVM_USE_FOLDERS "Enable solution folders in Visual Studio. Disable for Express versions." ON)
if ( LLVM_USE_FOLDERS )
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
endif()
include(VersionFromVCS)
option(LLVM_APPEND_VC_REV
"Embed the version control system revision id in LLVM" ON)
set(PACKAGE_NAME LLVM)
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "https://bugs.llvm.org/")
set(BUG_REPORT_URL "${PACKAGE_BUGREPORT}" CACHE STRING
"Default URL where bug reports are to be submitted.")
# Configure CPack.
set(CPACK_PACKAGE_INSTALL_DIRECTORY "LLVM")
set(CPACK_PACKAGE_VENDOR "LLVM")
set(CPACK_PACKAGE_VERSION_MAJOR ${LLVM_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${LLVM_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${LLVM_VERSION_PATCH})
set(CPACK_PACKAGE_VERSION ${PACKAGE_VERSION})
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.TXT")
set(CPACK_NSIS_COMPRESSOR "/SOLID lzma \r\n SetCompressorDictSize 32")
if(WIN32 AND NOT UNIX)
set(CPACK_PACKAGE_INSTALL_REGISTRY_KEY "LLVM")
set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}\\\\cmake\\\\nsis_logo.bmp")
set(CPACK_NSIS_MUI_ICON "${CMAKE_CURRENT_SOURCE_DIR}\\\\cmake\\\\nsis_icon.ico")
set(CPACK_NSIS_MUI_UNIICON "${CMAKE_CURRENT_SOURCE_DIR}\\\\cmake\\\\nsis_icon.ico")
set(CPACK_NSIS_MODIFY_PATH "ON")
set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL "ON")
if( CMAKE_CL_64 )
set(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES64")
endif()
endif()
include(CPack)
# Sanity check our source directory to make sure that we are not trying to
# generate an in-source build (unless on MSVC_IDE, where it is ok), and to make
# sure that we don't have any stray generated files lying around in the tree
# (which would end up getting picked up by header search, instead of the correct
# versions).
if( CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR AND NOT MSVC_IDE )
message(FATAL_ERROR "In-source builds are not allowed.
Please create a directory and run cmake from there, passing the path
to this source directory as the last argument.
This process created the file `CMakeCache.txt' and the directory `CMakeFiles'.
Please delete them.")
endif()
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if (CMAKE_BUILD_TYPE AND
NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL)$")
message(FATAL_ERROR "Invalid value for CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
endif()
set(LLVM_LIBDIR_SUFFIX "" CACHE STRING "Define suffix of library directory name (32/64)" )
set(LLVM_TOOLS_INSTALL_DIR "bin" CACHE STRING "Path for binary subdirectory (defaults to 'bin')")
mark_as_advanced(LLVM_TOOLS_INSTALL_DIR)
set(LLVM_UTILS_INSTALL_DIR "${LLVM_TOOLS_INSTALL_DIR}" CACHE STRING
"Path to install LLVM utilities (enabled by LLVM_INSTALL_UTILS=ON) (defaults to LLVM_TOOLS_INSTALL_DIR)")
mark_as_advanced(LLVM_UTILS_INSTALL_DIR)
# They are used as destination of target generators.
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/bin)
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib${LLVM_LIBDIR_SUFFIX})
if(WIN32 OR CYGWIN)
# DLL platform -- put DLLs into bin.
set(LLVM_SHLIB_OUTPUT_INTDIR ${LLVM_RUNTIME_OUTPUT_INTDIR})
else()
set(LLVM_SHLIB_OUTPUT_INTDIR ${LLVM_LIBRARY_OUTPUT_INTDIR})
endif()
# Each of them corresponds to llvm-config's.
set(LLVM_TOOLS_BINARY_DIR ${LLVM_RUNTIME_OUTPUT_INTDIR}) # --bindir
set(LLVM_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}) # --libdir
set(LLVM_MAIN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR} ) # --src-root
set(LLVM_MAIN_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/include ) # --includedir
set(LLVM_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} ) # --prefix
# Note: LLVM_CMAKE_PATH does not include generated files
set(LLVM_CMAKE_PATH ${LLVM_MAIN_SRC_DIR}/cmake/modules)
set(LLVM_EXAMPLES_BINARY_DIR ${LLVM_BINARY_DIR}/examples)
set(LLVM_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/include)
# List of all targets to be built by default:
set(LLVM_ALL_TARGETS
AArch64
AMDGPU
ARM
BPF
Hexagon
Lanai
Mips
MSP430
NVPTX
PowerPC
Sparc
SystemZ
WebAssembly
X86
XCore
)
# List of targets with JIT support:
set(LLVM_TARGETS_WITH_JIT X86 PowerPC AArch64 ARM Mips SystemZ)
set(LLVM_TARGETS_TO_BUILD "all"
CACHE STRING "Semicolon-separated list of targets to build, or \"all\".")
set(LLVM_EXPERIMENTAL_TARGETS_TO_BUILD ""
CACHE STRING "Semicolon-separated list of experimental targets to build.")
option(BUILD_SHARED_LIBS
"Build all libraries as shared libraries instead of static" OFF)
option(LLVM_ENABLE_BACKTRACES "Enable embedding backtraces on crash." ON)
if(LLVM_ENABLE_BACKTRACES)
set(ENABLE_BACKTRACES 1)
endif()
option(LLVM_ENABLE_CRASH_OVERRIDES "Enable crash overrides." ON)
if(LLVM_ENABLE_CRASH_OVERRIDES)
set(ENABLE_CRASH_OVERRIDES 1)
endif()
option(LLVM_ENABLE_CRASH_DUMPS "Turn on memory dumps on crashes. Currently only implemented on Windows." OFF)
option(LLVM_ENABLE_FFI "Use libffi to call external functions from the interpreter" OFF)
set(FFI_LIBRARY_DIR "" CACHE PATH "Additional directory, where CMake should search for libffi.so")
set(FFI_INCLUDE_DIR "" CACHE PATH "Additional directory, where CMake should search for ffi.h or ffi/ffi.h")
set(LLVM_TARGET_ARCH "host"
CACHE STRING "Set target to use for LLVM JIT or use \"host\" for automatic detection.")
option(LLVM_ENABLE_TERMINFO "Use terminfo database if available." ON)
set(LLVM_ENABLE_LIBXML2 "ON" CACHE STRING "Use libxml2 if available. Can be ON, OFF, or FORCE_ON")
option(LLVM_ENABLE_LIBEDIT "Use libedit if available." ON)
option(LLVM_ENABLE_LIBPFM "Use libpfm for performance counters if available." ON)
option(LLVM_ENABLE_THREADS "Use threads if available." ON)
option(LLVM_ENABLE_ZLIB "Use zlib for compression/decompression if available." ON)
if( LLVM_TARGETS_TO_BUILD STREQUAL "all" )
set( LLVM_TARGETS_TO_BUILD ${LLVM_ALL_TARGETS} )
endif()
set(LLVM_TARGETS_TO_BUILD
${LLVM_TARGETS_TO_BUILD}
${LLVM_EXPERIMENTAL_TARGETS_TO_BUILD})
list(REMOVE_DUPLICATES LLVM_TARGETS_TO_BUILD)
option(LLVM_ENABLE_PIC "Build Position-Independent Code" ON)
option(LLVM_ENABLE_WARNINGS "Enable compiler warnings." ON)
option(LLVM_ENABLE_MODULES "Compile with C++ modules enabled." OFF)
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
option(LLVM_ENABLE_MODULE_DEBUGGING "Compile with -gmodules." ON)
option(LLVM_ENABLE_LOCAL_SUBMODULE_VISIBILITY "Compile with -fmodules-local-submodule-visibility." OFF)
else()
option(LLVM_ENABLE_MODULE_DEBUGGING "Compile with -gmodules." OFF)
option(LLVM_ENABLE_LOCAL_SUBMODULE_VISIBILITY "Compile with -fmodules-local-submodule-visibility." ON)
endif()
option(LLVM_ENABLE_CXX1Y "Compile with C++1y enabled." OFF)
option(LLVM_ENABLE_CXX1Z "Compile with C++1z enabled." OFF)
option(LLVM_ENABLE_LIBCXX "Use libc++ if available." OFF)
option(LLVM_ENABLE_LLD "Use lld as C and C++ linker." OFF)
option(LLVM_ENABLE_PEDANTIC "Compile with pedantic enabled." ON)
option(LLVM_ENABLE_WERROR "Fail and stop if a warning is triggered." OFF)
option(LLVM_ENABLE_DUMP "Enable dump functions even when assertions are disabled" OFF)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
option(LLVM_ENABLE_ASSERTIONS "Enable assertions" OFF)
else()
option(LLVM_ENABLE_ASSERTIONS "Enable assertions" ON)
endif()
option(LLVM_ENABLE_EXPENSIVE_CHECKS "Enable expensive checks" OFF)
set(LLVM_ABI_BREAKING_CHECKS "WITH_ASSERTS" CACHE STRING
"Enable abi-breaking checks. Can be WITH_ASSERTS, FORCE_ON or FORCE_OFF.")
-option(LLVM_FORCE_USE_OLD_HOST_TOOLCHAIN
+option(LLVM_FORCE_USE_OLD_TOOLCHAIN
"Set to ON to force using an old, unsupported host toolchain." OFF)
+
+option(LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN
+ "Set to ON to only warn when using a toolchain which is about to be deprecated, instead of emitting an error." OFF)
option(LLVM_USE_INTEL_JITEVENTS
"Use Intel JIT API to inform Intel(R) VTune(TM) Amplifier XE 2011 about JIT code"
OFF)
if( LLVM_USE_INTEL_JITEVENTS )
# Verify we are on a supported platform
if( NOT CMAKE_SYSTEM_NAME MATCHES "Windows" AND NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
message(FATAL_ERROR
"Intel JIT API support is available on Linux and Windows only.")
endif()
endif( LLVM_USE_INTEL_JITEVENTS )
option(LLVM_USE_OPROFILE
"Use opagent JIT interface to inform OProfile about JIT code" OFF)
option(LLVM_EXTERNALIZE_DEBUGINFO
"Generate dSYM files and strip executables and libraries (Darwin Only)" OFF)
set(LLVM_CODESIGNING_IDENTITY "" CACHE STRING
"Sign executables and dylibs with the given identity or skip if empty (Darwin Only)")
# If enabled, verify we are on a platform that supports oprofile.
if( LLVM_USE_OPROFILE )
if( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
message(FATAL_ERROR "OProfile support is available on Linux only.")
endif( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
endif( LLVM_USE_OPROFILE )
option(LLVM_USE_PERF
"Use perf JIT interface to inform perf about JIT code" OFF)
# If enabled, verify we are on a platform that supports perf.
if( LLVM_USE_PERF )
if( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
message(FATAL_ERROR "perf support is available on Linux only.")
endif( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
endif( LLVM_USE_PERF )
set(LLVM_USE_SANITIZER "" CACHE STRING
"Define the sanitizer used to build binaries and tests.")
option(LLVM_OPTIMIZE_SANITIZED_BUILDS "Pass -O1 on debug sanitizer builds" ON)
set(LLVM_LIB_FUZZING_ENGINE "" CACHE PATH
"Path to fuzzing library for linking with fuzz targets")
option(LLVM_USE_SPLIT_DWARF
"Use -gsplit-dwarf when compiling llvm." OFF)
option(LLVM_POLLY_LINK_INTO_TOOLS "Statically link Polly into tools (if available)" ON)
option(LLVM_POLLY_BUILD "Build LLVM with Polly" ON)
if (EXISTS ${LLVM_MAIN_SRC_DIR}/tools/polly/CMakeLists.txt)
set(POLLY_IN_TREE TRUE)
elseif(LLVM_EXTERNAL_POLLY_SOURCE_DIR)
set(POLLY_IN_TREE TRUE)
else()
set(POLLY_IN_TREE FALSE)
endif()
if (LLVM_POLLY_BUILD AND POLLY_IN_TREE)
set(WITH_POLLY ON)
else()
set(WITH_POLLY OFF)
endif()
if (LLVM_POLLY_LINK_INTO_TOOLS AND WITH_POLLY)
set(LINK_POLLY_INTO_TOOLS ON)
else()
set(LINK_POLLY_INTO_TOOLS OFF)
endif()
# Define an option controlling whether we should build for 32-bit on 64-bit
# platforms, where supported.
if( CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32 )
# TODO: support other platforms and toolchains.
option(LLVM_BUILD_32_BITS "Build 32 bits executables and libraries." OFF)
endif()
# Define the default arguments to use with 'lit', and an option for the user to
# override.
set(LIT_ARGS_DEFAULT "-sv")
if (MSVC_IDE OR XCODE)
set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
# On Win32 hosts, provide an option to specify the path to the GnuWin32 tools.
if( WIN32 AND NOT CYGWIN )
set(LLVM_LIT_TOOLS_DIR "" CACHE PATH "Path to GnuWin32 tools")
endif()
# Define options to control the inclusion and default build behavior for
# components which may not strictly be necessary (tools, examples, and tests).
#
# This is primarily to support building smaller or faster project files.
option(LLVM_INCLUDE_TOOLS "Generate build targets for the LLVM tools." ON)
option(LLVM_BUILD_TOOLS
"Build the LLVM tools. If OFF, just generate build targets." ON)
option(LLVM_INCLUDE_UTILS "Generate build targets for the LLVM utils." ON)
option(LLVM_BUILD_UTILS
"Build LLVM utility binaries. If OFF, just generate build targets." ON)
option(LLVM_INCLUDE_RUNTIMES "Generate build targets for the LLVM runtimes." ON)
option(LLVM_BUILD_RUNTIMES
"Build the LLVM runtimes. If OFF, just generate build targets." ON)
option(LLVM_BUILD_RUNTIME
"Build the LLVM runtime libraries." ON)
option(LLVM_BUILD_EXAMPLES
"Build the LLVM example programs. If OFF, just generate build targets." OFF)
option(LLVM_INCLUDE_EXAMPLES "Generate build targets for the LLVM examples" ON)
option(LLVM_BUILD_TESTS
"Build LLVM unit tests. If OFF, just generate build targets." OFF)
option(LLVM_INCLUDE_TESTS "Generate build targets for the LLVM unit tests." ON)
option(LLVM_INCLUDE_GO_TESTS "Include the Go bindings tests in test build targets." ON)
option(LLVM_BUILD_BENCHMARKS "Add LLVM benchmark targets to the list of default
targets. If OFF, benchmarks still could be built using Benchmarks target." OFF)
option(LLVM_INCLUDE_BENCHMARKS "Generate benchmark targets. If OFF, benchmarks can't be built." ON)
option (LLVM_BUILD_DOCS "Build the llvm documentation." OFF)
option (LLVM_INCLUDE_DOCS "Generate build targets for llvm documentation." ON)
option (LLVM_ENABLE_DOXYGEN "Use doxygen to generate llvm API documentation." OFF)
option (LLVM_ENABLE_SPHINX "Use Sphinx to generate llvm documentation." OFF)
option (LLVM_ENABLE_OCAMLDOC "Build OCaml bindings documentation." ON)
option (LLVM_ENABLE_BINDINGS "Build bindings." ON)
set(LLVM_INSTALL_DOXYGEN_HTML_DIR "share/doc/llvm/doxygen-html"
CACHE STRING "Doxygen-generated HTML documentation install directory")
set(LLVM_INSTALL_OCAMLDOC_HTML_DIR "share/doc/llvm/ocaml-html"
CACHE STRING "OCamldoc-generated HTML documentation install directory")
option (LLVM_BUILD_EXTERNAL_COMPILER_RT
"Build compiler-rt as an external project." OFF)
option (LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO
"Show target and host info when tools are invoked with --version." ON)
# You can configure which libraries from LLVM you want to include in the
# shared library by setting LLVM_DYLIB_COMPONENTS to a semi-colon delimited
# list of LLVM components. All component names handled by llvm-config are valid.
if(NOT DEFINED LLVM_DYLIB_COMPONENTS)
set(LLVM_DYLIB_COMPONENTS "all" CACHE STRING
"Semicolon-separated list of components to include in libLLVM, or \"all\".")
endif()
option(LLVM_LINK_LLVM_DYLIB "Link tools against the libllvm dynamic library" OFF)
if(MSVC)
option(LLVM_BUILD_LLVM_C_DYLIB "Build LLVM-C.dll (Windows only)" OFF)
else()
option(LLVM_BUILD_LLVM_C_DYLIB "Build libllvm-c re-export library (Darwin only)" OFF)
endif()
set(LLVM_BUILD_LLVM_DYLIB_default OFF)
if(LLVM_LINK_LLVM_DYLIB OR (LLVM_BUILD_LLVM_C_DYLIB AND NOT MSVC))
set(LLVM_BUILD_LLVM_DYLIB_default ON)
endif()
option(LLVM_BUILD_LLVM_DYLIB "Build libllvm dynamic library" ${LLVM_BUILD_LLVM_DYLIB_default})
option(LLVM_OPTIMIZED_TABLEGEN "Force TableGen to be built with optimization" OFF)
if(CMAKE_CROSSCOMPILING OR (LLVM_OPTIMIZED_TABLEGEN AND (LLVM_ENABLE_ASSERTIONS OR CMAKE_CONFIGURATION_TYPES)))
set(LLVM_USE_HOST_TOOLS ON)
endif()
if (MSVC_IDE AND NOT (MSVC_VERSION LESS 1900))
option(LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION "Configure project to use Visual Studio native visualizers" TRUE)
else()
set(LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION FALSE CACHE INTERNAL "For Visual Studio 2013, manually copy natvis files to Documents\\Visual Studio 2013\\Visualizers" FORCE)
endif()
if (LLVM_BUILD_INSTRUMENTED OR LLVM_BUILD_INSTRUMENTED_COVERAGE OR
LLVM_ENABLE_IR_PGO)
if(NOT LLVM_PROFILE_MERGE_POOL_SIZE)
# A pool size of 1-2 is probably sufficient on a SSD. 3-4 should be fine
# for spining disks. Anything higher may only help on slower mediums.
set(LLVM_PROFILE_MERGE_POOL_SIZE "4")
endif()
if(NOT LLVM_PROFILE_FILE_PATTERN)
if(NOT LLVM_PROFILE_DATA_DIR)
file(TO_NATIVE_PATH "${LLVM_BINARY_DIR}/profiles" LLVM_PROFILE_DATA_DIR)
endif()
file(TO_NATIVE_PATH "${LLVM_PROFILE_DATA_DIR}/%${LLVM_PROFILE_MERGE_POOL_SIZE}m.profraw" LLVM_PROFILE_FILE_PATTERN)
endif()
endif()
if (LLVM_BUILD_STATIC)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static")
endif()
# Override the default target with an environment variable named by LLVM_TARGET_TRIPLE_ENV.
set(LLVM_TARGET_TRIPLE_ENV CACHE STRING "The name of environment variable to override default target. Disabled by blank.")
mark_as_advanced(LLVM_TARGET_TRIPLE_ENV)
set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR OFF CACHE BOOL
"Enable per-target runtimes directory")
# All options referred to from HandleLLVMOptions have to be specified
# BEFORE this include, otherwise options will not be correctly set on
# first cmake run
include(config-ix)
string(REPLACE "Native" ${LLVM_NATIVE_ARCH}
LLVM_TARGETS_TO_BUILD "${LLVM_TARGETS_TO_BUILD}")
list(REMOVE_DUPLICATES LLVM_TARGETS_TO_BUILD)
# By default, we target the host, but this can be overridden at CMake
# invocation time.
set(LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_HOST_TRIPLE}" CACHE STRING
"Default target for which LLVM will generate code." )
set(TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}")
message(STATUS "LLVM host triple: ${LLVM_HOST_TRIPLE}")
message(STATUS "LLVM default target triple: ${LLVM_DEFAULT_TARGET_TRIPLE}")
include(HandleLLVMOptions)
# Verify that we can find a Python 2 interpreter. Python 3 is unsupported.
# FIXME: We should support systems with only Python 3, but that requires work
# on LLDB.
set(Python_ADDITIONAL_VERSIONS 2.7)
include(FindPythonInterp)
if( NOT PYTHONINTERP_FOUND )
message(FATAL_ERROR
"Unable to find Python interpreter, required for builds and testing.
Please install Python or specify the PYTHON_EXECUTABLE CMake variable.")
endif()
if( ${PYTHON_VERSION_STRING} VERSION_LESS 2.7 )
message(FATAL_ERROR "Python 2.7 or newer is required")
endif()
######
# LLVMBuild Integration
#
# We use llvm-build to generate all the data required by the CMake based
# build system in one swoop:
#
# - We generate a file (a CMake fragment) in the object root which contains
# all the definitions that are required by CMake.
#
# - We generate the library table used by llvm-config.
#
# - We generate the dependencies for the CMake fragment, so that we will
# automatically reconfigure ourselves.
set(LLVMBUILDTOOL "${LLVM_MAIN_SRC_DIR}/utils/llvm-build/llvm-build")
set(LLVMCONFIGLIBRARYDEPENDENCIESINC
"${LLVM_BINARY_DIR}/tools/llvm-config/LibraryDependencies.inc")
set(LLVMBUILDCMAKEFRAG
"${LLVM_BINARY_DIR}/LLVMBuild.cmake")
# Create the list of optional components that are enabled
if (LLVM_USE_INTEL_JITEVENTS)
set(LLVMOPTIONALCOMPONENTS IntelJITEvents)
endif (LLVM_USE_INTEL_JITEVENTS)
if (LLVM_USE_OPROFILE)
set(LLVMOPTIONALCOMPONENTS ${LLVMOPTIONALCOMPONENTS} OProfileJIT)
endif (LLVM_USE_OPROFILE)
if (LLVM_USE_PERF)
set(LLVMOPTIONALCOMPONENTS ${LLVMOPTIONALCOMPONENTS} PerfJITEvents)
endif (LLVM_USE_PERF)
message(STATUS "Constructing LLVMBuild project information")
execute_process(
COMMAND ${PYTHON_EXECUTABLE} -B ${LLVMBUILDTOOL}
--native-target "${LLVM_NATIVE_ARCH}"
--enable-targets "${LLVM_TARGETS_TO_BUILD}"
--enable-optional-components "${LLVMOPTIONALCOMPONENTS}"
--write-library-table ${LLVMCONFIGLIBRARYDEPENDENCIESINC}
--write-cmake-fragment ${LLVMBUILDCMAKEFRAG}
OUTPUT_VARIABLE LLVMBUILDOUTPUT
ERROR_VARIABLE LLVMBUILDERRORS
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_STRIP_TRAILING_WHITESPACE
RESULT_VARIABLE LLVMBUILDRESULT)
# On Win32, CMake doesn't properly handle piping the default output/error
# streams into the GUI console. So, we explicitly catch and report them.
if( NOT "${LLVMBUILDOUTPUT}" STREQUAL "")
message(STATUS "llvm-build output: ${LLVMBUILDOUTPUT}")
endif()
if( NOT "${LLVMBUILDRESULT}" STREQUAL "0" )
message(FATAL_ERROR
"Unexpected failure executing llvm-build: ${LLVMBUILDERRORS}")
endif()
# Include the generated CMake fragment. This will define properties from the
# LLVMBuild files in a format which is easy to consume from CMake, and will add
# the dependencies so that CMake will reconfigure properly when the LLVMBuild
# files change.
include(${LLVMBUILDCMAKEFRAG})
######
# Configure all of the various header file fragments LLVM uses which depend on
# configuration variables.
set(LLVM_ENUM_TARGETS "")
set(LLVM_ENUM_ASM_PRINTERS "")
set(LLVM_ENUM_ASM_PARSERS "")
set(LLVM_ENUM_DISASSEMBLERS "")
foreach(t ${LLVM_TARGETS_TO_BUILD})
set( td ${LLVM_MAIN_SRC_DIR}/lib/Target/${t} )
list(FIND LLVM_ALL_TARGETS ${t} idx)
list(FIND LLVM_EXPERIMENTAL_TARGETS_TO_BUILD ${t} idy)
# At this point, LLVMBUILDTOOL already checked all the targets passed in
# LLVM_TARGETS_TO_BUILD and LLVM_EXPERIMENTAL_TARGETS_TO_BUILD, so
# this test just makes sure that any experimental targets were passed via
# LLVM_EXPERIMENTAL_TARGETS_TO_BUILD, not LLVM_TARGETS_TO_BUILD.
if( idx LESS 0 AND idy LESS 0 )
message(FATAL_ERROR "The target `${t}' is experimental and must be passed "
"via LLVM_EXPERIMENTAL_TARGETS_TO_BUILD.")
else()
set(LLVM_ENUM_TARGETS "${LLVM_ENUM_TARGETS}LLVM_TARGET(${t})\n")
endif()
file(GLOB asmp_file "${td}/*AsmPrinter.cpp")
if( asmp_file )
set(LLVM_ENUM_ASM_PRINTERS
"${LLVM_ENUM_ASM_PRINTERS}LLVM_ASM_PRINTER(${t})\n")
endif()
if( EXISTS ${td}/AsmParser/CMakeLists.txt )
set(LLVM_ENUM_ASM_PARSERS
"${LLVM_ENUM_ASM_PARSERS}LLVM_ASM_PARSER(${t})\n")
endif()
if( EXISTS ${td}/Disassembler/CMakeLists.txt )
set(LLVM_ENUM_DISASSEMBLERS
"${LLVM_ENUM_DISASSEMBLERS}LLVM_DISASSEMBLER(${t})\n")
endif()
endforeach(t)
# Produce the target definition files, which provide a way for clients to easily
# include various classes of targets.
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/AsmPrinters.def.in
${LLVM_INCLUDE_DIR}/llvm/Config/AsmPrinters.def
)
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/AsmParsers.def.in
${LLVM_INCLUDE_DIR}/llvm/Config/AsmParsers.def
)
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/Disassemblers.def.in
${LLVM_INCLUDE_DIR}/llvm/Config/Disassemblers.def
)
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/Targets.def.in
${LLVM_INCLUDE_DIR}/llvm/Config/Targets.def
)
# Configure the three LLVM configuration header files.
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/config.h.cmake
${LLVM_INCLUDE_DIR}/llvm/Config/config.h)
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/llvm-config.h.cmake
${LLVM_INCLUDE_DIR}/llvm/Config/llvm-config.h)
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/abi-breaking.h.cmake
${LLVM_INCLUDE_DIR}/llvm/Config/abi-breaking.h)
# Add target for generating source rpm package.
set(LLVM_SRPM_USER_BINARY_SPECFILE ${CMAKE_CURRENT_SOURCE_DIR}/llvm.spec.in
CACHE FILEPATH ".spec file to use for srpm generation")
set(LLVM_SRPM_BINARY_SPECFILE ${CMAKE_CURRENT_BINARY_DIR}/llvm.spec)
set(LLVM_SRPM_DIR "${CMAKE_CURRENT_BINARY_DIR}/srpm")
# SVN_REVISION and GIT_COMMIT get set by the call to add_version_info_from_vcs.
# DUMMY_VAR contains a version string which we don't care about.
add_version_info_from_vcs(DUMMY_VAR)
if ( SVN_REVISION )
set(LLVM_RPM_SPEC_REVISION "r${SVN_REVISION}")
elseif ( GIT_COMMIT )
set (LLVM_RPM_SPEC_REVISION "g${GIT_COMMIT}")
endif()
configure_file(
${LLVM_SRPM_USER_BINARY_SPECFILE}
${LLVM_SRPM_BINARY_SPECFILE} @ONLY)
add_custom_target(srpm
COMMAND cpack -G TGZ --config CPackSourceConfig.cmake -B ${LLVM_SRPM_DIR}/SOURCES
COMMAND rpmbuild -bs --define '_topdir ${LLVM_SRPM_DIR}' ${LLVM_SRPM_BINARY_SPECFILE})
set_target_properties(srpm PROPERTIES FOLDER "Misc")
# They are not referenced. See set_output_directory().
set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/bin )
set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} )
set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} )
if(APPLE AND DARWIN_LTO_LIBRARY)
set(CMAKE_EXE_LINKER_FLAGS
"${CMAKE_EXE_LINKER_FLAGS} -Wl,-lto_library -Wl,${DARWIN_LTO_LIBRARY}")
set(CMAKE_SHARED_LINKER_FLAGS
"${CMAKE_SHARED_LINKER_FLAGS} -Wl,-lto_library -Wl,${DARWIN_LTO_LIBRARY}")
set(CMAKE_MODULE_LINKER_FLAGS
"${CMAKE_MODULE_LINKER_FLAGS} -Wl,-lto_library -Wl,${DARWIN_LTO_LIBRARY}")
endif()
# Work around a broken bfd ld behavior. When linking a binary with a
# foo.so library, it will try to find any library that foo.so uses and
# check its symbols. This is wasteful (the check was done when foo.so
# was created) and can fail since it is not the dynamic linker and
# doesn't know how to handle search paths correctly.
if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS|AIX")
set(CMAKE_EXE_LINKER_FLAGS
"${CMAKE_EXE_LINKER_FLAGS} -Wl,-allow-shlib-undefined")
endif()
set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories( ${LLVM_INCLUDE_DIR} ${LLVM_MAIN_INCLUDE_DIR})
# when crosscompiling import the executable targets from a file
if(LLVM_USE_HOST_TOOLS)
include(CrossCompile)
endif(LLVM_USE_HOST_TOOLS)
if(LLVM_TARGET_IS_CROSSCOMPILE_HOST)
# Dummy use to avoid CMake Warning: Manually-specified variables were not used
# (this is a variable that CrossCompile sets on recursive invocations)
endif()
if(${CMAKE_SYSTEM_NAME} MATCHES "(FreeBSD|DragonFly)")
# On FreeBSD, /usr/local/* is not used by default. In order to build LLVM
# with libxml2, iconv.h, etc., we must add /usr/local paths.
include_directories(SYSTEM "/usr/local/include")
link_directories("/usr/local/lib")
endif(${CMAKE_SYSTEM_NAME} MATCHES "(FreeBSD|DragonFly)")
if( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
# special hack for Solaris to handle crazy system sys/regset.h
include_directories("${LLVM_MAIN_INCLUDE_DIR}/llvm/Support/Solaris")
endif( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
# Make sure we don't get -rdynamic in every binary. For those that need it,
# use export_executable_symbols(target).
set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set(LLVM_PROFDATA_FILE "" CACHE FILEPATH
"Profiling data file to use when compiling in order to improve runtime performance.")
if(LLVM_PROFDATA_FILE AND EXISTS ${LLVM_PROFDATA_FILE})
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" )
add_definitions("-fprofile-instr-use=${LLVM_PROFDATA_FILE}")
else()
message(FATAL_ERROR "LLVM_PROFDATA_FILE can only be specified when compiling with clang")
endif()
endif()
include(AddLLVM)
include(TableGen)
if( MINGW AND NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" )
# People report that -O3 is unreliable on MinGW. The traditional
# build also uses -O2 for that reason:
llvm_replace_compiler_option(CMAKE_CXX_FLAGS_RELEASE "-O3" "-O2")
endif()
# Put this before tblgen. Else we have a circular dependence.
add_subdirectory(lib/Demangle)
add_subdirectory(lib/Support)
add_subdirectory(lib/TableGen)
add_subdirectory(utils/TableGen)
add_subdirectory(include/llvm)
add_subdirectory(lib)
if( LLVM_INCLUDE_UTILS )
add_subdirectory(utils/FileCheck)
add_subdirectory(utils/PerfectShuffle)
add_subdirectory(utils/count)
add_subdirectory(utils/not)
add_subdirectory(utils/yaml-bench)
else()
if ( LLVM_INCLUDE_TESTS )
message(FATAL_ERROR "Including tests when not building utils will not work.
Either set LLVM_INCLUDE_UTILS to On, or set LLVM_INCLUDE_TESTS to Off.")
endif()
endif()
# Use LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION instead of LLVM_INCLUDE_UTILS because it is not really a util
if (LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION)
add_subdirectory(utils/LLVMVisualizers)
endif()
foreach( binding ${LLVM_BINDINGS_LIST} )
if( EXISTS "${LLVM_MAIN_SRC_DIR}/bindings/${binding}/CMakeLists.txt" )
add_subdirectory(bindings/${binding})
endif()
endforeach()
add_subdirectory(projects)
if( LLVM_INCLUDE_TOOLS )
add_subdirectory(tools)
endif()
if( LLVM_INCLUDE_RUNTIMES )
add_subdirectory(runtimes)
endif()
if( LLVM_INCLUDE_EXAMPLES )
add_subdirectory(examples)
endif()
if( LLVM_INCLUDE_TESTS )
if(EXISTS ${LLVM_MAIN_SRC_DIR}/projects/test-suite AND TARGET clang)
include(LLVMExternalProjectUtils)
llvm_ExternalProject_Add(test-suite ${LLVM_MAIN_SRC_DIR}/projects/test-suite
USE_TOOLCHAIN
EXCLUDE_FROM_ALL
NO_INSTALL
ALWAYS_CLEAN)
endif()
add_subdirectory(utils/lit)
add_subdirectory(test)
add_subdirectory(unittests)
if( LLVM_INCLUDE_UTILS )
add_subdirectory(utils/unittest)
endif()
if (WIN32)
# This utility is used to prevent crashing tests from calling Dr. Watson on
# Windows.
add_subdirectory(utils/KillTheDoctor)
endif()
# Add a global check rule now that all subdirectories have been traversed
# and we know the total set of lit testsuites.
get_property(LLVM_LIT_TESTSUITES GLOBAL PROPERTY LLVM_LIT_TESTSUITES)
get_property(LLVM_LIT_PARAMS GLOBAL PROPERTY LLVM_LIT_PARAMS)
get_property(LLVM_LIT_DEPENDS GLOBAL PROPERTY LLVM_LIT_DEPENDS)
get_property(LLVM_LIT_EXTRA_ARGS GLOBAL PROPERTY LLVM_LIT_EXTRA_ARGS)
get_property(LLVM_ADDITIONAL_TEST_TARGETS
GLOBAL PROPERTY LLVM_ADDITIONAL_TEST_TARGETS)
get_property(LLVM_ADDITIONAL_TEST_DEPENDS
GLOBAL PROPERTY LLVM_ADDITIONAL_TEST_DEPENDS)
add_lit_target(check-all
"Running all regression tests"
${LLVM_LIT_TESTSUITES}
PARAMS ${LLVM_LIT_PARAMS}
DEPENDS ${LLVM_LIT_DEPENDS} ${LLVM_ADDITIONAL_TEST_TARGETS}
ARGS ${LLVM_LIT_EXTRA_ARGS}
)
if(TARGET check-runtimes)
add_dependencies(check-all check-runtimes)
endif()
add_custom_target(test-depends
DEPENDS ${LLVM_LIT_DEPENDS} ${LLVM_ADDITIONAL_TEST_DEPENDS})
set_target_properties(test-depends PROPERTIES FOLDER "Tests")
endif()
if (LLVM_INCLUDE_DOCS)
add_subdirectory(docs)
endif()
add_subdirectory(cmake/modules)
# Do this last so that all lit targets have already been created.
if (LLVM_INCLUDE_UTILS)
add_subdirectory(utils/llvm-lit)
endif()
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
install(DIRECTORY include/llvm include/llvm-c
DESTINATION include
COMPONENT llvm-headers
FILES_MATCHING
PATTERN "*.def"
PATTERN "*.h"
PATTERN "*.td"
PATTERN "*.inc"
PATTERN "LICENSE.TXT"
PATTERN ".svn" EXCLUDE
)
install(DIRECTORY ${LLVM_INCLUDE_DIR}/llvm ${LLVM_INCLUDE_DIR}/llvm-c
DESTINATION include
COMPONENT llvm-headers
FILES_MATCHING
PATTERN "*.def"
PATTERN "*.h"
PATTERN "*.gen"
PATTERN "*.inc"
# Exclude include/llvm/CMakeFiles/intrinsics_gen.dir, matched by "*.def"
PATTERN "CMakeFiles" EXCLUDE
PATTERN "config.h" EXCLUDE
PATTERN ".svn" EXCLUDE
)
if (LLVM_INSTALL_MODULEMAPS)
install(DIRECTORY include/llvm include/llvm-c
DESTINATION include
COMPONENT llvm-headers
FILES_MATCHING
PATTERN "module.modulemap"
)
install(FILES include/llvm/module.install.modulemap
DESTINATION include/llvm
COMPONENT llvm-headers
RENAME "module.extern.modulemap"
)
endif(LLVM_INSTALL_MODULEMAPS)
# Installing the headers needs to depend on generating any public
# tablegen'd headers.
add_custom_target(llvm-headers DEPENDS intrinsics_gen)
set_target_properties(llvm-headers PROPERTIES FOLDER "Misc")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-llvm-headers
DEPENDS llvm-headers
COMPONENT llvm-headers)
endif()
# Custom target to install all libraries.
add_custom_target(llvm-libraries)
set_target_properties(llvm-libraries PROPERTIES FOLDER "Misc")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-llvm-libraries
DEPENDS llvm-libraries
COMPONENT llvm-libraries)
endif()
get_property(LLVM_LIBS GLOBAL PROPERTY LLVM_LIBS)
if(LLVM_LIBS)
list(REMOVE_DUPLICATES LLVM_LIBS)
foreach(lib ${LLVM_LIBS})
add_dependencies(llvm-libraries ${lib})
if (NOT LLVM_ENABLE_IDE)
add_dependencies(install-llvm-libraries install-${lib})
endif()
endforeach()
endif()
endif()
# This must be at the end of the LLVM root CMakeLists file because it must run
# after all targets are created.
if(LLVM_DISTRIBUTION_COMPONENTS)
if(LLVM_ENABLE_IDE)
message(FATAL_ERROR "LLVM_DISTRIBUTION_COMPONENTS cannot be specified with multi-configuration generators (i.e. Xcode or Visual Studio)")
endif()
add_custom_target(distribution)
add_custom_target(install-distribution)
add_custom_target(install-distribution-stripped)
foreach(target ${LLVM_DISTRIBUTION_COMPONENTS} ${LLVM_RUNTIME_DISTRIBUTION_COMPONENTS})
if(TARGET ${target})
add_dependencies(distribution ${target})
else()
message(SEND_ERROR "Specified distribution component '${target}' doesn't have a target")
endif()
if(TARGET install-${target})
add_dependencies(install-distribution install-${target})
else()
message(SEND_ERROR "Specified distribution component '${target}' doesn't have an install target")
endif()
if(TARGET install-${target}-stripped)
add_dependencies(install-distribution-stripped install-${target}-stripped)
else()
message(SEND_ERROR "Specified distribution component '${target}' doesn't have an install-stripped target."
" Its installation target creation should be changed to use add_llvm_install_targets,"
" or you should manually create the 'install-${target}-stripped' target.")
endif()
endforeach()
endif()
# This allows us to deploy the Universal CRT DLLs by passing -DCMAKE_INSTALL_UCRT_LIBRARIES=ON to CMake
if (MSVC AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows" AND CMAKE_INSTALL_UCRT_LIBRARIES)
include(InstallRequiredSystemLibraries)
endif()
if (LLVM_INCLUDE_BENCHMARKS)
# Override benchmark defaults so that when the library itself is updated these
# modifications are not lost.
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Disable benchmark testing" FORCE)
set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "Disable benchmark exceptions" FORCE)
set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "Don't install benchmark" FORCE)
set(BENCHMARK_DOWNLOAD_DEPENDENCIES OFF CACHE BOOL "Don't download dependencies" FORCE)
set(BENCHMARK_ENABLE_GTEST_TESTS OFF CACHE BOOL "Disable Google Test in benchmark" FORCE)
# Since LLVM requires C++11 it is safe to assume that std::regex is available.
set(HAVE_STD_REGEX ON CACHE BOOL "OK" FORCE)
add_subdirectory(utils/benchmark)
add_subdirectory(benchmarks)
endif()
Index: vendor/llvm/dist-release_80/bindings/go/llvm/ir_test.go
===================================================================
--- vendor/llvm/dist-release_80/bindings/go/llvm/ir_test.go (revision 344165)
+++ vendor/llvm/dist-release_80/bindings/go/llvm/ir_test.go (revision 344166)
@@ -1,167 +1,167 @@
//===- ir_test.go - Tests for ir ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file tests bindings for the ir component.
//
//===----------------------------------------------------------------------===//
package llvm
import (
"strings"
"testing"
)
func testAttribute(t *testing.T, name string) {
mod := NewModule("")
defer mod.Dispose()
ftyp := FunctionType(VoidType(), nil, false)
fn := AddFunction(mod, "foo", ftyp)
kind := AttributeKindID(name)
attr := mod.Context().CreateEnumAttribute(kind, 0)
fn.AddFunctionAttr(attr)
newattr := fn.GetEnumFunctionAttribute(kind)
if attr != newattr {
- t.Errorf("got attribute mask %d, want %d", newattr, attr)
+ t.Errorf("got attribute %p, want %p", newattr.C, attr.C)
}
text := mod.String()
if !strings.Contains(text, " "+name+" ") {
t.Errorf("expected attribute '%s', got:\n%s", name, text)
}
fn.RemoveEnumFunctionAttribute(kind)
newattr = fn.GetEnumFunctionAttribute(kind)
if !newattr.IsNil() {
- t.Errorf("got attribute mask %d, want 0", newattr)
+ t.Errorf("got attribute %p, want 0", newattr.C)
}
}
func TestAttributes(t *testing.T) {
// Tests that our attribute constants haven't drifted from LLVM's.
attrTests := []string{
"sanitize_address",
"alwaysinline",
"builtin",
"byval",
"convergent",
"inalloca",
"inlinehint",
"inreg",
"jumptable",
"minsize",
"naked",
"nest",
"noalias",
"nobuiltin",
"nocapture",
"noduplicate",
"noimplicitfloat",
"noinline",
"nonlazybind",
"nonnull",
"noredzone",
"noreturn",
"nounwind",
"optnone",
"optsize",
"readnone",
"readonly",
"returned",
"returns_twice",
"signext",
"safestack",
"ssp",
"sspreq",
"sspstrong",
"sret",
"sanitize_thread",
"sanitize_memory",
"uwtable",
"zeroext",
"cold",
"nocf_check",
}
for _, name := range attrTests {
testAttribute(t, name)
}
}
func TestDebugLoc(t *testing.T) {
mod := NewModule("")
defer mod.Dispose()
ctx := mod.Context()
b := ctx.NewBuilder()
defer b.Dispose()
d := NewDIBuilder(mod)
defer func() {
d.Destroy()
}()
file := d.CreateFile("dummy_file", "dummy_dir")
voidInfo := d.CreateBasicType(DIBasicType{Name: "void"})
typeInfo := d.CreateSubroutineType(DISubroutineType{
File: file,
Parameters: []Metadata{voidInfo},
Flags: 0,
})
scope := d.CreateFunction(file, DIFunction{
Name: "foo",
LinkageName: "foo",
Line: 10,
ScopeLine: 10,
Type: typeInfo,
File: file,
IsDefinition: true,
})
b.SetCurrentDebugLocation(10, 20, scope, Metadata{})
loc := b.GetCurrentDebugLocation()
if loc.Line != 10 {
t.Errorf("Got line %d, though wanted 10", loc.Line)
}
if loc.Col != 20 {
t.Errorf("Got column %d, though wanted 20", loc.Col)
}
if loc.Scope.C != scope.C {
t.Errorf("Got metadata %v as scope, though wanted %v", loc.Scope.C, scope.C)
}
}
func TestSubtypes(t *testing.T) {
cont := NewContext()
defer cont.Dispose()
int_pointer := PointerType(cont.Int32Type(), 0)
int_inner := int_pointer.Subtypes()
if len(int_inner) != 1 {
t.Errorf("Got size %d, though wanted 1", len(int_inner))
}
if int_inner[0] != cont.Int32Type() {
t.Errorf("Expected int32 type")
}
st_pointer := cont.StructType([]Type{cont.Int32Type(), cont.Int8Type()}, false)
st_inner := st_pointer.Subtypes()
if len(st_inner) != 2 {
t.Errorf("Got size %d, though wanted 2", len(int_inner))
}
if st_inner[0] != cont.Int32Type() {
t.Errorf("Expected first struct field to be int32")
}
if st_inner[1] != cont.Int8Type() {
t.Errorf("Expected second struct field to be int8")
}
}
Index: vendor/llvm/dist-release_80/cmake/modules/CheckCompilerVersion.cmake
===================================================================
--- vendor/llvm/dist-release_80/cmake/modules/CheckCompilerVersion.cmake (revision 344165)
+++ vendor/llvm/dist-release_80/cmake/modules/CheckCompilerVersion.cmake (revision 344166)
@@ -1,52 +1,94 @@
-# Check if the host compiler is new enough. LLVM requires at least GCC 4.8,
-# MSVC 2015 (Update 3), or Clang 3.1.
+# Check if the host compiler is new enough.
+# These versions are updated based on the following policy:
+# llvm.org/docs/DeveloperPolicy.html#toolchain
include(CheckCXXSourceCompiles)
-if(NOT DEFINED LLVM_COMPILER_CHECKED)
- set(LLVM_COMPILER_CHECKED ON)
+set(GCC_MIN 4.8)
+set(GCC_SOFT_ERROR 5.1)
+set(CLANG_MIN 3.1)
+set(CLANG_SOFT_ERROR 3.5)
+set(APPLECLANG_MIN 3.1)
+set(APPLECLANG_SOFT_ERROR 6.0)
+set(MSVC_MIN 19.00.24213.1)
+set(MSVC_SOFT_ERROR 19.1)
- if(NOT LLVM_FORCE_USE_OLD_TOOLCHAIN)
- if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
- message(FATAL_ERROR "Host GCC version must be at least 4.8!")
- endif()
- elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.1)
- message(FATAL_ERROR "Host Clang version must be at least 3.1!")
- endif()
+# Map the above GCC versions to dates: https://gcc.gnu.org/develop.html#timeline
+set(GCC_MIN_DATE 20130322)
+set(GCC_SOFT_ERROR_DATE 20150422)
- if (CMAKE_CXX_SIMULATE_ID MATCHES "MSVC")
- if (CMAKE_CXX_SIMULATE_VERSION VERSION_LESS 19.0)
- message(FATAL_ERROR "Host Clang must have at least -fms-compatibility-version=19.0")
- endif()
- set(CLANG_CL 1)
- elseif(NOT LLVM_ENABLE_LIBCXX)
- # Otherwise, test that we aren't using too old of a version of libstdc++
- # with the Clang compiler. This is tricky as there is no real way to
- # check the version of libstdc++ directly. Instead we test for a known
- # bug in libstdc++4.6 that is fixed in libstdc++4.7.
- set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
- set(OLD_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
- set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=c++0x")
- check_cxx_source_compiles("
-#include <atomic>
-std::atomic<float> x(0.0f);
-int main() { return (float)x; }"
- LLVM_NO_OLD_LIBSTDCXX)
- if(NOT LLVM_NO_OLD_LIBSTDCXX)
- message(FATAL_ERROR "Host Clang must be able to find libstdc++4.8 or newer!")
- endif()
- set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
- set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQUIRED_LIBRARIES})
+
+if(DEFINED LLVM_COMPILER_CHECKED)
+ return()
+endif()
+set(LLVM_COMPILER_CHECKED ON)
+
+if(LLVM_FORCE_USE_OLD_TOOLCHAIN)
+ return()
+endif()
+
+function(check_compiler_version NAME NICE_NAME MINIMUM_VERSION SOFT_ERROR_VERSION)
+ if(NOT CMAKE_CXX_COMPILER_ID STREQUAL NAME)
+ return()
+ endif()
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS MINIMUM_VERSION)
+ message(FATAL_ERROR "Host ${NICE_NAME} version must be at least ${MINIMUM_VERSION}, your version is ${CMAKE_CXX_COMPILER_VERSION}.")
+ elseif(CMAKE_CXX_COMPILER_VERSION VERSION_LESS SOFT_ERROR_VERSION)
+ if(LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN)
+ message(WARNING "Host ${NICE_NAME} version should be at least ${SOFT_ERROR_VERSION} because LLVM will soon use new C++ features which your toolchain version doesn't support. Your version is ${CMAKE_CXX_COMPILER_VERSION}. Ignoring because you've set LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN, but very soon your toolchain won't be supported.")
+ else()
+ message(FATAL_ERROR "Host ${NICE_NAME} version should be at least ${SOFT_ERROR_VERSION} because LLVM will soon use new C++ features which your toolchain version doesn't support. Your version is ${CMAKE_CXX_COMPILER_VERSION}. You can temporarily opt out using LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN, but very soon your toolchain won't be supported.")
+ endif()
+ endif()
+endfunction(check_compiler_version)
+
+check_compiler_version("GNU" "GCC" ${GCC_MIN} ${GCC_SOFT_ERROR})
+check_compiler_version("Clang" "Clang" ${CLANG_MIN} ${CLANG_SOFT_ERROR})
+check_compiler_version("AppleClang" "Apple Clang" ${APPLECLANG_MIN} ${APPLECLANG_SOFT_ERROR})
+check_compiler_version("MSVC" "Visual Studio" ${MSVC_MIN} ${MSVC_SOFT_ERROR})
+
+if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ if (CMAKE_CXX_SIMULATE_ID MATCHES "MSVC")
+ if (CMAKE_CXX_SIMULATE_VERSION VERSION_LESS MSVC_MIN)
+ message(FATAL_ERROR "Host Clang must have at least -fms-compatibility-version=${MSVC_MIN}, your version is ${CMAKE_CXX_COMPILER_VERSION}.")
+ endif()
+ set(CLANG_CL 1)
+ elseif(NOT LLVM_ENABLE_LIBCXX)
+ # Test that we aren't using too old of a version of libstdc++.
+ set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+ set(OLD_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=c++0x")
+ check_cxx_source_compiles("
+#include <iosfwd>
+#if defined(__GLIBCXX__)
+#if __GLIBCXX__ < ${GCC_MIN_DATE}
+#error Unsupported libstdc++ version
+#endif
+#endif
+int main() { return 0; }
+"
+ LLVM_LIBSTDCXX_MIN)
+ if(NOT LLVM_LIBSTDCXX_MIN)
+ message(FATAL_ERROR "libstdc++ version must be at least ${GCC_MIN}.")
+ endif()
+ check_cxx_source_compiles("
+#include <iosfwd>
+#if defined(__GLIBCXX__)
+#if __GLIBCXX__ < ${GCC_SOFT_ERROR_DATE}
+#error Unsupported libstdc++ version
+#endif
+#endif
+int main() { return 0; }
+"
+ LLVM_LIBSTDCXX_SOFT_ERROR)
+ if(NOT LLVM_LIBSTDCXX_SOFT_ERROR)
+ if(LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN)
+ message(WARNING "libstdc++ version should be at least ${GCC_SOFT_ERROR} because LLVM will soon use new C++ features which your toolchain version doesn't support. Ignoring because you've set LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN, but very soon your toolchain won't be supported.")
+ else()
+ message(FATAL_ERROR "libstdc++ version should be at least ${GCC_SOFT_ERROR} because LLVM will soon use new C++ features which your toolchain version doesn't support. You can temporarily opt out using LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN, but very soon your toolchain won't be supported.")
endif()
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
- if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0)
- message(FATAL_ERROR "Host Visual Studio must be at least 2015")
- elseif(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.00.24213.1)
- message(WARNING "Host Visual Studio should at least be 2015 Update 3 (MSVC 19.00.24213.1)"
- " due to miscompiles from earlier versions")
- endif()
endif()
+ set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
+ set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQUIRED_LIBRARIES})
endif()
endif()
Index: vendor/llvm/dist-release_80/cmake/modules/CrossCompile.cmake
===================================================================
--- vendor/llvm/dist-release_80/cmake/modules/CrossCompile.cmake (revision 344165)
+++ vendor/llvm/dist-release_80/cmake/modules/CrossCompile.cmake (revision 344166)
@@ -1,69 +1,70 @@
function(llvm_create_cross_target_internal target_name toolchain buildtype)
if(NOT DEFINED LLVM_${target_name}_BUILD)
set(LLVM_${target_name}_BUILD "${CMAKE_BINARY_DIR}/${target_name}")
set(LLVM_${target_name}_BUILD ${LLVM_${target_name}_BUILD} PARENT_SCOPE)
message(STATUS "Setting native build dir to " ${LLVM_${target_name}_BUILD})
endif(NOT DEFINED LLVM_${target_name}_BUILD)
if (EXISTS ${LLVM_MAIN_SRC_DIR}/cmake/platforms/${toolchain}.cmake)
set(CROSS_TOOLCHAIN_FLAGS_INIT
-DCMAKE_TOOLCHAIN_FILE=\"${LLVM_MAIN_SRC_DIR}/cmake/platforms/${toolchain}.cmake\")
else()
set(CROSS_TOOLCHAIN_FLAGS_INIT
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
)
endif()
set(CROSS_TOOLCHAIN_FLAGS_${target_name} ${CROSS_TOOLCHAIN_FLAGS_INIT}
CACHE STRING "Toolchain configuration for ${target_name}")
if (buildtype)
set(build_type_flags "-DCMAKE_BUILD_TYPE=${buildtype}")
endif()
if (LLVM_USE_LINKER AND NOT CMAKE_CROSSCOMPILING)
set(linker_flag "-DLLVM_USE_LINKER=${LLVM_USE_LINKER}")
endif()
if (LLVM_EXTERNAL_CLANG_SOURCE_DIR)
# Propagate LLVM_EXTERNAL_CLANG_SOURCE_DIR so that clang-tblgen can be built
set(external_clang_dir "-DLLVM_EXTERNAL_CLANG_SOURCE_DIR=${LLVM_EXTERNAL_CLANG_SOURCE_DIR}")
endif()
add_custom_command(OUTPUT ${LLVM_${target_name}_BUILD}
COMMAND ${CMAKE_COMMAND} -E make_directory ${LLVM_${target_name}_BUILD}
COMMENT "Creating ${LLVM_${target_name}_BUILD}...")
add_custom_target(CREATE_LLVM_${target_name}
DEPENDS ${LLVM_${target_name}_BUILD})
# Escape semicolons in the targets list so that cmake doesn't expand
# them to spaces.
string(REPLACE ";" "$<SEMICOLON>" targets_to_build_arg
"${LLVM_TARGETS_TO_BUILD}")
string(REPLACE ";" "$<SEMICOLON>" experimental_targets_to_build_arg
"${LLVM_EXPERIMENTAL_TARGETS_TO_BUILD}")
add_custom_command(OUTPUT ${LLVM_${target_name}_BUILD}/CMakeCache.txt
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}"
-DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}"
${CROSS_TOOLCHAIN_FLAGS_${target_name}} ${CMAKE_SOURCE_DIR}
-DLLVM_TARGET_IS_CROSSCOMPILE_HOST=TRUE
-DLLVM_TARGETS_TO_BUILD="${targets_to_build_arg}"
-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="${experimental_targets_to_build_arg}"
-DLLVM_DEFAULT_TARGET_TRIPLE="${TARGET_TRIPLE}"
-DLLVM_TARGET_ARCH="${LLVM_TARGET_ARCH}"
+ -DLLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN="${LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN}"
${build_type_flags} ${linker_flag} ${external_clang_dir}
WORKING_DIRECTORY ${LLVM_${target_name}_BUILD}
DEPENDS CREATE_LLVM_${target_name}
COMMENT "Configuring ${target_name} LLVM...")
add_custom_target(CONFIGURE_LLVM_${target_name}
DEPENDS ${LLVM_${target_name}_BUILD}/CMakeCache.txt)
endfunction()
function(llvm_create_cross_target target_name sysroot)
llvm_create_cross_target_internal(${target_name} ${sysroot} ${CMAKE_BUILD_TYPE})
endfunction()
llvm_create_cross_target_internal(NATIVE "" Release)
Index: vendor/llvm/dist-release_80/docs/CMake.rst
===================================================================
--- vendor/llvm/dist-release_80/docs/CMake.rst (revision 344165)
+++ vendor/llvm/dist-release_80/docs/CMake.rst (revision 344166)
@@ -1,815 +1,824 @@
========================
Building LLVM with CMake
========================
.. contents::
:local:
Introduction
============
`CMake <http://www.cmake.org/>`_ is a cross-platform build-generator tool. CMake
does not build the project, it generates the files needed by your build tool
(GNU make, Visual Studio, etc.) for building LLVM.
If **you are a new contributor**, please start with the :doc:`GettingStarted`
page. This page is geared for existing contributors moving from the
legacy configure/make system.
If you are really anxious about getting a functional LLVM build, go to the
`Quick start`_ section. If you are a CMake novice, start with `Basic CMake usage`_
and then go back to the `Quick start`_ section once you know what you are doing. The
`Options and variables`_ section is a reference for customizing your build. If
you already have experience with CMake, this is the recommended starting point.
This page is geared towards users of the LLVM CMake build. If you're looking for
information about modifying the LLVM CMake build system you may want to see the
:doc:`CMakePrimer` page. It has a basic overview of the CMake language.
.. _Quick start:
Quick start
===========
We use here the command-line, non-interactive CMake interface.
#. `Download <http://www.cmake.org/cmake/resources/software.html>`_ and install
CMake. Version 3.4.3 is the minimum required.
#. Open a shell. Your development tools must be reachable from this shell
through the PATH environment variable.
#. Create a build directory. Building LLVM in the source
directory is not supported. cd to this directory:
.. code-block:: console
$ mkdir mybuilddir
$ cd mybuilddir
#. Execute this command in the shell replacing `path/to/llvm/source/root` with
the path to the root of your LLVM source tree:
.. code-block:: console
$ cmake path/to/llvm/source/root
CMake will detect your development environment, perform a series of tests, and
generate the files required for building LLVM. CMake will use default values
for all build parameters. See the `Options and variables`_ section for
a list of build parameters that you can modify.
This can fail if CMake can't detect your toolset, or if it thinks that the
environment is not sane enough. In this case, make sure that the toolset that
you intend to use is the only one reachable from the shell, and that the shell
itself is the correct one for your development environment. CMake will refuse
to build MinGW makefiles if you have a POSIX shell reachable through the PATH
environment variable, for instance. You can force CMake to use a given build
tool; for instructions, see the `Usage`_ section, below.
#. After CMake has finished running, proceed to use IDE project files, or start
the build from the build directory:
.. code-block:: console
$ cmake --build .
The ``--build`` option tells ``cmake`` to invoke the underlying build
tool (``make``, ``ninja``, ``xcodebuild``, ``msbuild``, etc.)
The underlying build tool can be invoked directly, of course, but
the ``--build`` option is portable.
#. After LLVM has finished building, install it from the build directory:
.. code-block:: console
$ cmake --build . --target install
The ``--target`` option with ``install`` parameter in addition to
the ``--build`` option tells ``cmake`` to build the ``install`` target.
It is possible to set a different install prefix at installation time
by invoking the ``cmake_install.cmake`` script generated in the
build directory:
.. code-block:: console
$ cmake -DCMAKE_INSTALL_PREFIX=/tmp/llvm -P cmake_install.cmake
.. _Basic CMake usage:
.. _Usage:
Basic CMake usage
=================
This section explains basic aspects of CMake
which you may need in your day-to-day usage.
CMake comes with extensive documentation, in the form of html files, and as
online help accessible via the ``cmake`` executable itself. Execute ``cmake
--help`` for further help options.
CMake allows you to specify a build tool (e.g., GNU make, Visual Studio,
or Xcode). If not specified on the command line, CMake tries to guess which
build tool to use, based on your environment. Once it has identified your
build tool, CMake uses the corresponding *Generator* to create files for your
build tool (e.g., Makefiles or Visual Studio or Xcode project files). You can
explicitly specify the generator with the command line option ``-G "Name of the
generator"``. To see a list of the available generators on your system, execute
.. code-block:: console
$ cmake --help
This will list the generator names at the end of the help text.
Generators' names are case-sensitive, and may contain spaces. For this reason,
you should enter them exactly as they are listed in the ``cmake --help``
output, in quotes. For example, to generate project files specifically for
Visual Studio 12, you can execute:
.. code-block:: console
$ cmake -G "Visual Studio 12" path/to/llvm/source/root
For a given development platform there can be more than one adequate
generator. If you use Visual Studio, "NMake Makefiles" is a generator you can use
for building with NMake. By default, CMake chooses the most specific generator
supported by your development environment. If you want an alternative generator,
you must tell this to CMake with the ``-G`` option.
.. todo::
Explain variables and cache. Move explanation here from #options section.
.. _Options and variables:
Options and variables
=====================
Variables customize how the build will be generated. Options are boolean
variables, with possible values ON/OFF. Options and variables are defined on the
CMake command line like this:
.. code-block:: console
$ cmake -DVARIABLE=value path/to/llvm/source
You can set a variable after the initial CMake invocation to change its
value. You can also undefine a variable:
.. code-block:: console
$ cmake -UVARIABLE path/to/llvm/source
Variables are stored in the CMake cache. This is a file named ``CMakeCache.txt``
stored at the root of your build directory that is generated by ``cmake``.
Editing it yourself is not recommended.
Variables are listed in the CMake cache and later in this document with
the variable name and type separated by a colon. You can also specify the
variable and type on the CMake command line:
.. code-block:: console
$ cmake -DVARIABLE:TYPE=value path/to/llvm/source
Frequently-used CMake variables
-------------------------------
Here are some of the CMake variables that are used often, along with a
brief explanation and LLVM-specific notes. For full documentation, consult the
CMake manual, or execute ``cmake --help-variable VARIABLE_NAME``.
**CMAKE_BUILD_TYPE**:STRING
Sets the build type for ``make``-based generators. Possible values are
Release, Debug, RelWithDebInfo and MinSizeRel. If you are using an IDE such as
Visual Studio, you should use the IDE settings to set the build type.
Be aware that Release and RelWithDebInfo use different optimization levels on
most platforms.
**CMAKE_INSTALL_PREFIX**:PATH
Path where LLVM will be installed if "make install" is invoked or the
"install" target is built.
**LLVM_LIBDIR_SUFFIX**:STRING
Extra suffix to append to the directory where libraries are to be
installed. On a 64-bit architecture, one could use ``-DLLVM_LIBDIR_SUFFIX=64``
to install libraries to ``/usr/lib64``.
**CMAKE_C_FLAGS**:STRING
Extra flags to use when compiling C source files.
**CMAKE_CXX_FLAGS**:STRING
Extra flags to use when compiling C++ source files.
.. _LLVM-specific variables:
LLVM-specific variables
-----------------------
**LLVM_TARGETS_TO_BUILD**:STRING
Semicolon-separated list of targets to build, or *all* for building all
targets. Case-sensitive. Defaults to *all*. Example:
``-DLLVM_TARGETS_TO_BUILD="X86;PowerPC"``.
**LLVM_BUILD_TOOLS**:BOOL
Build LLVM tools. Defaults to ON. Targets for building each tool are generated
in any case. You can build a tool separately by invoking its target. For
example, you can build *llvm-as* with a Makefile-based system by executing *make
llvm-as* at the root of your build directory.
**LLVM_INCLUDE_TOOLS**:BOOL
Generate build targets for the LLVM tools. Defaults to ON. You can use this
option to disable the generation of build targets for the LLVM tools.
**LLVM_INSTALL_BINUTILS_SYMLINKS**:BOOL
Install symlinks from the binutils tool names to the corresponding LLVM tools.
For example, ar will be symlinked to llvm-ar.
**LLVM_BUILD_EXAMPLES**:BOOL
Build LLVM examples. Defaults to OFF. Targets for building each example are
generated in any case. See documentation for *LLVM_BUILD_TOOLS* above for more
details.
**LLVM_INCLUDE_EXAMPLES**:BOOL
Generate build targets for the LLVM examples. Defaults to ON. You can use this
option to disable the generation of build targets for the LLVM examples.
**LLVM_BUILD_TESTS**:BOOL
Build LLVM unit tests. Defaults to OFF. Targets for building each unit test
are generated in any case. You can build a specific unit test using the
targets defined under *unittests*, such as ADTTests, IRTests, SupportTests,
etc. (Search for ``add_llvm_unittest`` in the subdirectories of *unittests*
for a complete list of unit tests.) It is possible to build all unit tests
with the target *UnitTests*.
**LLVM_INCLUDE_TESTS**:BOOL
Generate build targets for the LLVM unit tests. Defaults to ON. You can use
this option to disable the generation of build targets for the LLVM unit
tests.
**LLVM_BUILD_BENCHMARKS**:BOOL
Adds benchmarks to the list of default targets. Defaults to OFF.
**LLVM_INCLUDE_BENCHMARKS**:BOOL
Generate build targets for the LLVM benchmarks. Defaults to ON.
**LLVM_APPEND_VC_REV**:BOOL
Embed version control revision info (svn revision number or Git revision id).
The version info is provided by the ``LLVM_REVISION`` macro in
``llvm/include/llvm/Support/VCSRevision.h``. Developers using git who don't
need revision info can disable this option to avoid re-linking most binaries
after a branch switch. Defaults to ON.
**LLVM_ENABLE_THREADS**:BOOL
Build with threads support, if available. Defaults to ON.
**LLVM_ENABLE_CXX1Y**:BOOL
Build in C++1y mode, if available. Defaults to OFF.
**LLVM_ENABLE_ASSERTIONS**:BOOL
Enables code assertions. Defaults to ON if and only if ``CMAKE_BUILD_TYPE``
is *Debug*.
**LLVM_ENABLE_EH**:BOOL
Build LLVM with exception-handling support. This is necessary if you wish to
link against LLVM libraries and make use of C++ exceptions in your own code
that need to propagate through LLVM code. Defaults to OFF.
**LLVM_ENABLE_EXPENSIVE_CHECKS**:BOOL
Enable additional time/memory expensive checking. Defaults to OFF.
**LLVM_ENABLE_PIC**:BOOL
Add the ``-fPIC`` flag to the compiler command-line, if the compiler supports
this flag. Some systems, like Windows, do not need this flag. Defaults to ON.
**LLVM_ENABLE_RTTI**:BOOL
Build LLVM with run-time type information. Defaults to OFF.
**LLVM_ENABLE_WARNINGS**:BOOL
Enable all compiler warnings. Defaults to ON.
**LLVM_ENABLE_PEDANTIC**:BOOL
Enable pedantic mode. This disables compiler-specific extensions, if
possible. Defaults to ON.
**LLVM_ENABLE_WERROR**:BOOL
Stop and fail the build, if a compiler warning is triggered. Defaults to OFF.
**LLVM_ABI_BREAKING_CHECKS**:STRING
Used to decide if LLVM should be built with ABI breaking checks or
not. Allowed values are `WITH_ASSERTS` (default), `FORCE_ON` and
`FORCE_OFF`. `WITH_ASSERTS` turns on ABI breaking checks in an
assertion enabled build. `FORCE_ON` (`FORCE_OFF`) turns them on
(off) irrespective of whether normal (`NDEBUG`-based) assertions are
enabled or not. A version of LLVM built with ABI breaking checks
is not ABI compatible with a version built without it.
**LLVM_BUILD_32_BITS**:BOOL
Build 32-bit executables and libraries on 64-bit systems. This option is
available only on some 64-bit Unix systems. Defaults to OFF.
**LLVM_TARGET_ARCH**:STRING
LLVM target to use for native code generation. This is required for JIT
generation. It defaults to "host", meaning that it shall pick the architecture
of the machine where LLVM is being built. If you are cross-compiling, set it
to the target architecture name.
**LLVM_TABLEGEN**:STRING
Full path to a native TableGen executable (usually named ``llvm-tblgen``). This is
intended for cross-compiling: if the user sets this variable, no native
TableGen will be created.
**LLVM_LIT_ARGS**:STRING
Arguments given to lit. ``make check`` and ``make clang-test`` are affected.
By default, ``'-sv --no-progress-bar'`` on Visual C++ and Xcode, ``'-sv'`` on
others.
**LLVM_LIT_TOOLS_DIR**:PATH
The path to GnuWin32 tools for tests. Valid on Windows host. Defaults to
the empty string, in which case lit will look for tools needed for tests
(e.g. ``grep``, ``sort``, etc.) in your %PATH%. If GnuWin32 is not in your
%PATH%, then you can set this variable to the GnuWin32 directory so that
lit can find tools needed for tests in that directory.
**LLVM_ENABLE_FFI**:BOOL
Indicates whether the LLVM Interpreter will be linked with the Foreign Function
Interface library (libffi) in order to enable calling external functions.
If the library or its headers are installed in a custom
location, you can also set the variables FFI_INCLUDE_DIR and
FFI_LIBRARY_DIR to the directories where ffi.h and libffi.so can be found,
respectively. Defaults to OFF.
**LLVM_EXTERNAL_{CLANG,LLD,POLLY}_SOURCE_DIR**:PATH
These variables specify the path to the source directory for the external
LLVM projects Clang, lld, and Polly, respectively, relative to the top-level
source directory. If the in-tree subdirectory for an external project
exists (e.g., llvm/tools/clang for Clang), then the corresponding variable
will not be used. If the variable for an external project does not point
to a valid path, then that project will not be built.
**LLVM_ENABLE_PROJECTS**:STRING
Semicolon-separated list of projects to build, or *all* for building all
(clang, libcxx, libcxxabi, lldb, compiler-rt, lld, polly) projects.
This flag assumes that projects are checked out side-by-side and not nested,
i.e. clang needs to be in parallel of llvm instead of nested in `llvm/tools`.
This feature allows to have one build for only LLVM and another for clang+llvm
using the same source checkout.
**LLVM_EXTERNAL_PROJECTS**:STRING
Semicolon-separated list of additional external projects to build as part of
llvm. For each project LLVM_EXTERNAL_<NAME>_SOURCE_DIR have to be specified
with the path for the source code of the project. Example:
``-DLLVM_EXTERNAL_PROJECTS="Foo;Bar"
-DLLVM_EXTERNAL_FOO_SOURCE_DIR=/src/foo
-DLLVM_EXTERNAL_BAR_SOURCE_DIR=/src/bar``.
**LLVM_USE_OPROFILE**:BOOL
Enable building OProfile JIT support. Defaults to OFF.
**LLVM_PROFDATA_FILE**:PATH
Path to a profdata file to pass into clang's -fprofile-instr-use flag. This
can only be specified if you're building with clang.
**LLVM_USE_INTEL_JITEVENTS**:BOOL
Enable building support for Intel JIT Events API. Defaults to OFF.
**LLVM_ENABLE_LIBPFM**:BOOL
Enable building with libpfm to support hardware counter measurements in LLVM
tools.
Defaults to ON.
**LLVM_USE_PERF**:BOOL
Enable building support for Perf (linux profiling tool) JIT support. Defaults to OFF.
**LLVM_ENABLE_ZLIB**:BOOL
Enable building with zlib to support compression/uncompression in LLVM tools.
Defaults to ON.
**LLVM_ENABLE_DIA_SDK**:BOOL
Enable building with MSVC DIA SDK for PDB debugging support. Available
only with MSVC. Defaults to ON.
**LLVM_USE_SANITIZER**:STRING
Define the sanitizer used to build LLVM binaries and tests. Possible values
are ``Address``, ``Memory``, ``MemoryWithOrigins``, ``Undefined``, ``Thread``,
and ``Address;Undefined``. Defaults to empty string.
**LLVM_ENABLE_LTO**:STRING
Add ``-flto`` or ``-flto=`` flags to the compile and link command
lines, enabling link-time optimization. Possible values are ``Off``,
``On``, ``Thin`` and ``Full``. Defaults to OFF.
**LLVM_USE_LINKER**:STRING
Add ``-fuse-ld={name}`` to the link invocation. The possible value depend on
your compiler, for clang the value can be an absolute path to your custom
linker, otherwise clang will prefix the name with ``ld.`` and apply its usual
search. For example to link LLVM with the Gold linker, cmake can be invoked
with ``-DLLVM_USE_LINKER=gold``.
**LLVM_ENABLE_LLD**:BOOL
This option is equivalent to `-DLLVM_USE_LINKER=lld`, except during a 2-stage
build where a dependency is added from the first stage to the second ensuring
that lld is built before stage2 begins.
**LLVM_PARALLEL_COMPILE_JOBS**:STRING
Define the maximum number of concurrent compilation jobs.
**LLVM_PARALLEL_LINK_JOBS**:STRING
Define the maximum number of concurrent link jobs.
**LLVM_BUILD_DOCS**:BOOL
Adds all *enabled* documentation targets (i.e. Doxgyen and Sphinx targets) as
dependencies of the default build targets. This results in all of the (enabled)
documentation targets being as part of a normal build. If the ``install``
target is run then this also enables all built documentation targets to be
installed. Defaults to OFF. To enable a particular documentation target, see
see LLVM_ENABLE_SPHINX and LLVM_ENABLE_DOXYGEN.
**LLVM_ENABLE_DOXYGEN**:BOOL
Enables the generation of browsable HTML documentation using doxygen.
Defaults to OFF.
**LLVM_ENABLE_DOXYGEN_QT_HELP**:BOOL
Enables the generation of a Qt Compressed Help file. Defaults to OFF.
This affects the make target ``doxygen-llvm``. When enabled, apart from
the normal HTML output generated by doxygen, this will produce a QCH file
named ``org.llvm.qch``. You can then load this file into Qt Creator.
This option is only useful in combination with ``-DLLVM_ENABLE_DOXYGEN=ON``;
otherwise this has no effect.
**LLVM_DOXYGEN_QCH_FILENAME**:STRING
The filename of the Qt Compressed Help file that will be generated when
``-DLLVM_ENABLE_DOXYGEN=ON`` and
``-DLLVM_ENABLE_DOXYGEN_QT_HELP=ON`` are given. Defaults to
``org.llvm.qch``.
This option is only useful in combination with
``-DLLVM_ENABLE_DOXYGEN_QT_HELP=ON``;
otherwise it has no effect.
**LLVM_DOXYGEN_QHP_NAMESPACE**:STRING
Namespace under which the intermediate Qt Help Project file lives. See `Qt
Help Project`_
for more information. Defaults to "org.llvm". This option is only useful in
combination with ``-DLLVM_ENABLE_DOXYGEN_QT_HELP=ON``; otherwise
it has no effect.
**LLVM_DOXYGEN_QHP_CUST_FILTER_NAME**:STRING
See `Qt Help Project`_ for
more information. Defaults to the CMake variable ``${PACKAGE_STRING}`` which
is a combination of the package name and version string. This filter can then
be used in Qt Creator to select only documentation from LLVM when browsing
through all the help files that you might have loaded. This option is only
useful in combination with ``-DLLVM_ENABLE_DOXYGEN_QT_HELP=ON``;
otherwise it has no effect.
.. _Qt Help Project: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-filters
**LLVM_DOXYGEN_QHELPGENERATOR_PATH**:STRING
The path to the ``qhelpgenerator`` executable. Defaults to whatever CMake's
``find_program()`` can find. This option is only useful in combination with
``-DLLVM_ENABLE_DOXYGEN_QT_HELP=ON``; otherwise it has no
effect.
**LLVM_DOXYGEN_SVG**:BOOL
Uses .svg files instead of .png files for graphs in the Doxygen output.
Defaults to OFF.
**LLVM_INSTALL_DOXYGEN_HTML_DIR**:STRING
The path to install Doxygen-generated HTML documentation to. This path can
either be absolute or relative to the CMAKE_INSTALL_PREFIX. Defaults to
`share/doc/llvm/doxygen-html`.
**LLVM_ENABLE_SPHINX**:BOOL
If specified, CMake will search for the ``sphinx-build`` executable and will make
the ``SPHINX_OUTPUT_HTML`` and ``SPHINX_OUTPUT_MAN`` CMake options available.
Defaults to OFF.
**SPHINX_EXECUTABLE**:STRING
The path to the ``sphinx-build`` executable detected by CMake.
For installation instructions, see
http://www.sphinx-doc.org/en/latest/install.html
**SPHINX_OUTPUT_HTML**:BOOL
If enabled (and ``LLVM_ENABLE_SPHINX`` is enabled) then the targets for
building the documentation as html are added (but not built by default unless
``LLVM_BUILD_DOCS`` is enabled). There is a target for each project in the
source tree that uses sphinx (e.g. ``docs-llvm-html``, ``docs-clang-html``
and ``docs-lld-html``). Defaults to ON.
**SPHINX_OUTPUT_MAN**:BOOL
If enabled (and ``LLVM_ENABLE_SPHINX`` is enabled) the targets for building
the man pages are added (but not built by default unless ``LLVM_BUILD_DOCS``
is enabled). Currently the only target added is ``docs-llvm-man``. Defaults
to ON.
**SPHINX_WARNINGS_AS_ERRORS**:BOOL
If enabled then sphinx documentation warnings will be treated as
errors. Defaults to ON.
**LLVM_INSTALL_SPHINX_HTML_DIR**:STRING
The path to install Sphinx-generated HTML documentation to. This path can
either be absolute or relative to the CMAKE_INSTALL_PREFIX. Defaults to
`share/doc/llvm/html`.
**LLVM_INSTALL_OCAMLDOC_HTML_DIR**:STRING
The path to install OCamldoc-generated HTML documentation to. This path can
either be absolute or relative to the CMAKE_INSTALL_PREFIX. Defaults to
`share/doc/llvm/ocaml-html`.
**LLVM_CREATE_XCODE_TOOLCHAIN**:BOOL
OS X Only: If enabled CMake will generate a target named
'install-xcode-toolchain'. This target will create a directory at
$CMAKE_INSTALL_PREFIX/Toolchains containing an xctoolchain directory which can
be used to override the default system tools.
**LLVM_BUILD_LLVM_DYLIB**:BOOL
If enabled, the target for building the libLLVM shared library is added.
This library contains all of LLVM's components in a single shared library.
Defaults to OFF. This cannot be used in conjunction with BUILD_SHARED_LIBS.
Tools will only be linked to the libLLVM shared library if LLVM_LINK_LLVM_DYLIB
is also ON.
The components in the library can be customised by setting LLVM_DYLIB_COMPONENTS
to a list of the desired components.
**LLVM_LINK_LLVM_DYLIB**:BOOL
If enabled, tools will be linked with the libLLVM shared library. Defaults
to OFF. Setting LLVM_LINK_LLVM_DYLIB to ON also sets LLVM_BUILD_LLVM_DYLIB
to ON.
**BUILD_SHARED_LIBS**:BOOL
Flag indicating if each LLVM component (e.g. Support) is built as a shared
library (ON) or as a static library (OFF). Its default value is OFF. On
Windows, shared libraries may be used when building with MinGW, including
mingw-w64, but not when building with the Microsoft toolchain.
.. note:: BUILD_SHARED_LIBS is only recommended for use by LLVM developers.
If you want to build LLVM as a shared library, you should use the
``LLVM_BUILD_LLVM_DYLIB`` option.
**LLVM_OPTIMIZED_TABLEGEN**:BOOL
If enabled and building a debug or asserts build the CMake build system will
generate a Release build tree to build a fully optimized tablegen for use
during the build. Enabling this option can significantly speed up build times
especially when building LLVM in Debug configurations.
**LLVM_REVERSE_ITERATION**:BOOL
If enabled, all supported unordered llvm containers would be iterated in
reverse order. This is useful for uncovering non-determinism caused by
iteration of unordered containers.
**LLVM_BUILD_INSTRUMENTED_COVERAGE**:BOOL
If enabled, `source-based code coverage
<http://clang.llvm.org/docs/SourceBasedCodeCoverage.html>`_ instrumentation
is enabled while building llvm.
**LLVM_CCACHE_BUILD**:BOOL
If enabled and the ``ccache`` program is available, then LLVM will be
built using ``ccache`` to speed up rebuilds of LLVM and its components.
Defaults to OFF. The size and location of the cache maintained
by ``ccache`` can be adjusted via the LLVM_CCACHE_MAXSIZE and LLVM_CCACHE_DIR
options, which are passed to the CCACHE_MAXSIZE and CCACHE_DIR environment
variables, respectively.
+**LLVM_FORCE_USE_OLD_TOOLCHAIN**:BOOL
+ If enabled, the compiler and standard library versions won't be checked. LLVM
+ may not compile at all, or might fail at runtime due to known bugs in these
+ toolchains.
+
+**LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN**:BOOL
+ If enabled, the compiler version check will only warn when using a toolchain
+ which is about to be deprecated, instead of emitting an error.
+
CMake Caches
============
Recently LLVM and Clang have been adding some more complicated build system
features. Utilizing these new features often involves a complicated chain of
CMake variables passed on the command line. Clang provides a collection of CMake
cache scripts to make these features more approachable.
CMake cache files are utilized using CMake's -C flag:
.. code-block:: console
$ cmake -C <path to cache file> <path to sources>
CMake cache scripts are processed in an isolated scope, only cached variables
remain set when the main configuration runs. CMake cached variables do not reset
variables that are already set unless the FORCE option is specified.
A few notes about CMake Caches:
- Order of command line arguments is important
- -D arguments specified before -C are set before the cache is processed and
can be read inside the cache file
- -D arguments specified after -C are set after the cache is processed and
are unset inside the cache file
- All -D arguments will override cache file settings
- CMAKE_TOOLCHAIN_FILE is evaluated after both the cache file and the command
line arguments
- It is recommended that all -D options should be specified *before* -C
For more information about some of the advanced build configurations supported
via Cache files see :doc:`AdvancedBuilds`.
Executing the Tests
===================
Testing is performed when the *check-all* target is built. For instance, if you are
using Makefiles, execute this command in the root of your build directory:
.. code-block:: console
$ make check-all
On Visual Studio, you may run tests by building the project "check-all".
For more information about testing, see the :doc:`TestingGuide`.
Cross compiling
===============
See `this wiki page <http://www.vtk.org/Wiki/CMake_Cross_Compiling>`_ for
generic instructions on how to cross-compile with CMake. It goes into detailed
explanations and may seem daunting, but it is not. On the wiki page there are
several examples including toolchain files. Go directly to `this section
<http://www.vtk.org/Wiki/CMake_Cross_Compiling#Information_how_to_set_up_various_cross_compiling_toolchains>`_
for a quick solution.
Also see the `LLVM-specific variables`_ section for variables used when
cross-compiling.
Embedding LLVM in your project
==============================
From LLVM 3.5 onwards both the CMake and autoconf/Makefile build systems export
LLVM libraries as importable CMake targets. This means that clients of LLVM can
now reliably use CMake to develop their own LLVM-based projects against an
installed version of LLVM regardless of how it was built.
Here is a simple example of a CMakeLists.txt file that imports the LLVM libraries
and uses them to build a simple application ``simple-tool``.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.4.3)
project(SimpleProject)
find_package(LLVM REQUIRED CONFIG)
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
# Set your project compile flags.
# E.g. if using the C++ header files
# you will need to enable C++11 support
# for your compiler.
include_directories(${LLVM_INCLUDE_DIRS})
add_definitions(${LLVM_DEFINITIONS})
# Now build our tools
add_executable(simple-tool tool.cpp)
# Find the libraries that correspond to the LLVM components
# that we wish to use
llvm_map_components_to_libnames(llvm_libs support core irreader)
# Link against LLVM libraries
target_link_libraries(simple-tool ${llvm_libs})
The ``find_package(...)`` directive when used in CONFIG mode (as in the above
example) will look for the ``LLVMConfig.cmake`` file in various locations (see
cmake manual for details). It creates a ``LLVM_DIR`` cache entry to save the
directory where ``LLVMConfig.cmake`` is found or allows the user to specify the
directory (e.g. by passing ``-DLLVM_DIR=/usr/lib/cmake/llvm`` to
the ``cmake`` command or by setting it directly in ``ccmake`` or ``cmake-gui``).
This file is available in two different locations.
* ``<INSTALL_PREFIX>/lib/cmake/llvm/LLVMConfig.cmake`` where
``<INSTALL_PREFIX>`` is the install prefix of an installed version of LLVM.
On Linux typically this is ``/usr/lib/cmake/llvm/LLVMConfig.cmake``.
* ``<LLVM_BUILD_ROOT>/lib/cmake/llvm/LLVMConfig.cmake`` where
``<LLVM_BUILD_ROOT>`` is the root of the LLVM build tree. **Note: this is only
available when building LLVM with CMake.**
If LLVM is installed in your operating system's normal installation prefix (e.g.
on Linux this is usually ``/usr/``) ``find_package(LLVM ...)`` will
automatically find LLVM if it is installed correctly. If LLVM is not installed
or you wish to build directly against the LLVM build tree you can use
``LLVM_DIR`` as previously mentioned.
The ``LLVMConfig.cmake`` file sets various useful variables. Notable variables
include
``LLVM_CMAKE_DIR``
The path to the LLVM CMake directory (i.e. the directory containing
LLVMConfig.cmake).
``LLVM_DEFINITIONS``
A list of preprocessor defines that should be used when building against LLVM.
``LLVM_ENABLE_ASSERTIONS``
This is set to ON if LLVM was built with assertions, otherwise OFF.
``LLVM_ENABLE_EH``
This is set to ON if LLVM was built with exception handling (EH) enabled,
otherwise OFF.
``LLVM_ENABLE_RTTI``
This is set to ON if LLVM was built with run time type information (RTTI),
otherwise OFF.
``LLVM_INCLUDE_DIRS``
A list of include paths to directories containing LLVM header files.
``LLVM_PACKAGE_VERSION``
The LLVM version. This string can be used with CMake conditionals, e.g., ``if
(${LLVM_PACKAGE_VERSION} VERSION_LESS "3.5")``.
``LLVM_TOOLS_BINARY_DIR``
The path to the directory containing the LLVM tools (e.g. ``llvm-as``).
Notice that in the above example we link ``simple-tool`` against several LLVM
libraries. The list of libraries is determined by using the
``llvm_map_components_to_libnames()`` CMake function. For a list of available
components look at the output of running ``llvm-config --components``.
Note that for LLVM < 3.5 ``llvm_map_components_to_libraries()`` was
used instead of ``llvm_map_components_to_libnames()``. This is now deprecated
and will be removed in a future version of LLVM.
.. _cmake-out-of-source-pass:
Developing LLVM passes out of source
------------------------------------
It is possible to develop LLVM passes out of LLVM's source tree (i.e. against an
installed or built LLVM). An example of a project layout is provided below.
.. code-block:: none
<project dir>/
|
CMakeLists.txt
<pass name>/
|
CMakeLists.txt
Pass.cpp
...
Contents of ``<project dir>/CMakeLists.txt``:
.. code-block:: cmake
find_package(LLVM REQUIRED CONFIG)
add_definitions(${LLVM_DEFINITIONS})
include_directories(${LLVM_INCLUDE_DIRS})
add_subdirectory(<pass name>)
Contents of ``<project dir>/<pass name>/CMakeLists.txt``:
.. code-block:: cmake
add_library(LLVMPassname MODULE Pass.cpp)
Note if you intend for this pass to be merged into the LLVM source tree at some
point in the future it might make more sense to use LLVM's internal
``add_llvm_library`` function with the MODULE argument instead by...
Adding the following to ``<project dir>/CMakeLists.txt`` (after
``find_package(LLVM ...)``)
.. code-block:: cmake
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
include(AddLLVM)
And then changing ``<project dir>/<pass name>/CMakeLists.txt`` to
.. code-block:: cmake
add_llvm_library(LLVMPassname MODULE
Pass.cpp
)
When you are done developing your pass, you may wish to integrate it
into the LLVM source tree. You can achieve it in two easy steps:
#. Copying ``<pass name>`` folder into ``<LLVM root>/lib/Transform`` directory.
#. Adding ``add_subdirectory(<pass name>)`` line into
``<LLVM root>/lib/Transform/CMakeLists.txt``.
Compiler/Platform-specific topics
=================================
Notes for specific compilers and/or platforms.
Microsoft Visual C++
--------------------
**LLVM_COMPILER_JOBS**:STRING
Specifies the maximum number of parallel compiler jobs to use per project
when building with msbuild or Visual Studio. Only supported for the Visual
Studio 2010 CMake generator. 0 means use all processors. Default is 0.
Index: vendor/llvm/dist-release_80/docs/DeveloperPolicy.rst
===================================================================
--- vendor/llvm/dist-release_80/docs/DeveloperPolicy.rst (revision 344165)
+++ vendor/llvm/dist-release_80/docs/DeveloperPolicy.rst (revision 344166)
@@ -1,744 +1,785 @@
=====================
LLVM Developer Policy
=====================
.. contents::
:local:
Introduction
============
This document contains the LLVM Developer Policy which defines the project's
policy towards developers and their contributions. The intent of this policy is
to eliminate miscommunication, rework, and confusion that might arise from the
distributed nature of LLVM's development. By stating the policy in clear terms,
we hope each developer can know ahead of time what to expect when making LLVM
contributions. This policy covers all llvm.org subprojects, including Clang,
LLDB, libc++, etc.
This policy is also designed to accomplish the following objectives:
#. Attract both users and developers to the LLVM project.
#. Make life as simple and easy for contributors as possible.
-#. Keep the tip of tree as stable as possible.
+#. Keep the top of tree as stable as possible.
#. Establish awareness of the project's :ref:`copyright, license, and patent
policies <copyright-license-patents>` with contributors to the project.
This policy is aimed at frequent contributors to LLVM. People interested in
contributing one-off patches can do so in an informal way by sending them to the
`llvm-commits mailing list
<http://lists.llvm.org/mailman/listinfo/llvm-commits>`_ and engaging another
developer to see it through the process.
Developer Policies
==================
This section contains policies that pertain to frequent LLVM developers. We
always welcome `one-off patches`_ from people who do not routinely contribute to
LLVM, but we expect more from frequent contributors to keep the system as
efficient as possible for everyone. Frequent LLVM contributors are expected to
meet the following requirements in order for LLVM to maintain a high standard of
quality.
Stay Informed
-------------
Developers should stay informed by reading at least the "dev" mailing list for
the projects you are interested in, such as `llvm-dev
<http://lists.llvm.org/mailman/listinfo/llvm-dev>`_ for LLVM, `cfe-dev
<http://lists.llvm.org/mailman/listinfo/cfe-dev>`_ for Clang, or `lldb-dev
<http://lists.llvm.org/mailman/listinfo/lldb-dev>`_ for LLDB. If you are
doing anything more than just casual work on LLVM, it is suggested that you also
subscribe to the "commits" mailing list for the subproject you're interested in,
such as `llvm-commits
<http://lists.llvm.org/mailman/listinfo/llvm-commits>`_, `cfe-commits
<http://lists.llvm.org/mailman/listinfo/cfe-commits>`_, or `lldb-commits
<http://lists.llvm.org/mailman/listinfo/lldb-commits>`_. Reading the
"commits" list and paying attention to changes being made by others is a good
way to see what other people are interested in and watching the flow of the
project as a whole.
We recommend that active developers register an email account with `LLVM
Bugzilla <https://bugs.llvm.org/>`_ and preferably subscribe to the `llvm-bugs
<http://lists.llvm.org/mailman/listinfo/llvm-bugs>`_ email list to keep track
of bugs and enhancements occurring in LLVM. We really appreciate people who are
proactive at catching incoming bugs in their components and dealing with them
promptly.
Please be aware that all public LLVM mailing lists are public and archived, and
that notices of confidentiality or non-disclosure cannot be respected.
.. _patch:
.. _one-off patches:
Making and Submitting a Patch
-----------------------------
When making a patch for review, the goal is to make it as easy for the reviewer
to read it as possible. As such, we recommend that you:
#. Make your patch against git master, not a branch, and not an old version
of LLVM. This makes it easy to apply the patch. For information on how to
clone from git, please see the :ref:`Getting Started Guide
<checkout>`.
#. Similarly, patches should be submitted soon after they are generated. Old
patches may not apply correctly if the underlying code changes between the
time the patch was created and the time it is applied.
#. Patches should be made with ``git format-patch``, or similar. If you use a
different tool, make sure it uses the ``diff -u`` format and that it
doesn't contain clutter which makes it hard to read.
Once your patch is ready, submit it by emailing it to the appropriate project's
commit mailing list (or commit it directly if applicable). Alternatively, some
patches get sent to the project's development list or component of the LLVM bug
tracker, but the commit list is the primary place for reviews and should
generally be preferred.
When sending a patch to a mailing list, it is a good idea to send it as an
*attachment* to the message, not embedded into the text of the message. This
ensures that your mailer will not mangle the patch when it sends it (e.g. by
making whitespace changes or by wrapping lines).
*For Thunderbird users:* Before submitting a patch, please open *Preferences >
Advanced > General > Config Editor*, find the key
``mail.content_disposition_type``, and set its value to ``1``. Without this
setting, Thunderbird sends your attachment using ``Content-Disposition: inline``
rather than ``Content-Disposition: attachment``. Apple Mail gamely displays such
a file inline, making it difficult to work with for reviewers using that
program.
When submitting patches, please do not add confidentiality or non-disclosure
notices to the patches themselves. These notices conflict with the `LLVM
License`_ and may result in your contribution being excluded.
.. _code review:
Code Reviews
------------
LLVM has a code review policy. Code review is one way to increase the quality of
software. We generally follow these policies:
#. All developers are required to have significant changes reviewed before they
are committed to the repository.
#. Code reviews are conducted by email on the relevant project's commit mailing
list, or alternatively on the project's development list or bug tracker.
#. Code can be reviewed either before it is committed or after. We expect major
changes to be reviewed before being committed, but smaller changes (or
changes where the developer owns the component) can be reviewed after commit.
#. The developer responsible for a code change is also responsible for making
all necessary review-related changes.
#. Code review can be an iterative process, which continues until the patch is
ready to be committed. Specifically, once a patch is sent out for review, it
needs an explicit "looks good" before it is submitted. Do not assume silent
approval, or request active objections to the patch with a deadline.
Sometimes code reviews will take longer than you would hope for, especially for
larger features. Accepted ways to speed up review times for your patches are:
* Review other people's patches. If you help out, everybody will be more
willing to do the same for you; goodwill is our currency.
* Ping the patch. If it is urgent, provide reasons why it is important to you to
get this patch landed and ping it every couple of days. If it is
not urgent, the common courtesy ping rate is one week. Remember that you're
asking for valuable time from other professional developers.
* Ask for help on IRC. Developers on IRC will be able to either help you
directly, or tell you who might be a good reviewer.
* Split your patch into multiple smaller patches that build on each other. The
smaller your patch, the higher the probability that somebody will take a quick
look at it.
Developers should participate in code reviews as both reviewers and
reviewees. If someone is kind enough to review your code, you should return the
favor for someone else. Note that anyone is welcome to review and give feedback
on a patch, but only people with Subversion write access can approve it.
There is a web based code review tool that can optionally be used
for code reviews. See :doc:`Phabricator`.
.. _code owners:
Code Owners
-----------
The LLVM Project relies on two features of its process to maintain rapid
development in addition to the high quality of its source base: the combination
of code review plus post-commit review for trusted maintainers. Having both is
a great way for the project to take advantage of the fact that most people do
the right thing most of the time, and only commit patches without pre-commit
review when they are confident they are right.
The trick to this is that the project has to guarantee that all patches that are
committed are reviewed after they go in: you don't want everyone to assume
someone else will review it, allowing the patch to go unreviewed. To solve this
problem, we have a notion of an 'owner' for a piece of the code. The sole
responsibility of a code owner is to ensure that a commit to their area of the
code is appropriately reviewed, either by themself or by someone else. The list
of current code owners can be found in the file `CODE_OWNERS.TXT
<https://github.com/llvm/llvm-project/blob/master/llvm/CODE_OWNERS.TXT>`_ in the
root of the LLVM source tree.
Note that code ownership is completely different than reviewers: anyone can
review a piece of code, and we welcome code review from anyone who is
interested. Code owners are the "last line of defense" to guarantee that all
patches that are committed are actually reviewed.
Being a code owner is a somewhat unglamorous position, but it is incredibly
important for the ongoing success of the project. Because people get busy,
interests change, and unexpected things happen, code ownership is purely opt-in,
and anyone can choose to resign their "title" at any time. For now, we do not
have an official policy on how one gets elected to be a code owner.
.. _include a testcase:
Test Cases
----------
Developers are required to create test cases for any bugs fixed and any new
features added. Some tips for getting your testcase approved:
* All feature and regression test cases are added to the ``llvm/test``
directory. The appropriate sub-directory should be selected (see the
:doc:`Testing Guide <TestingGuide>` for details).
* Test cases should be written in :doc:`LLVM assembly language <LangRef>`.
* Test cases, especially for regressions, should be reduced as much as possible,
by :doc:`bugpoint <Bugpoint>` or manually. It is unacceptable to place an
entire failing program into ``llvm/test`` as this creates a *time-to-test*
burden on all developers. Please keep them short.
Note that llvm/test and clang/test are designed for regression and small feature
tests only. More extensive test cases (e.g., entire applications, benchmarks,
etc) should be added to the ``llvm-test`` test suite. The llvm-test suite is
for coverage (correctness, performance, etc) testing, not feature or regression
testing.
Quality
-------
The minimum quality standards that any change must satisfy before being
committed to the main development branch are:
#. Code must adhere to the `LLVM Coding Standards <CodingStandards.html>`_.
#. Code must compile cleanly (no errors, no warnings) on at least one platform.
#. Bug fixes and new features should `include a testcase`_ so we know if the
fix/feature ever regresses in the future.
#. Code must pass the ``llvm/test`` test suite.
#. The code must not cause regressions on a reasonable subset of llvm-test,
where "reasonable" depends on the contributor's judgement and the scope of
the change (more invasive changes require more testing). A reasonable subset
might be something like "``llvm-test/MultiSource/Benchmarks``".
Additionally, the committer is responsible for addressing any problems found in
the future that the change is responsible for. For example:
* The code should compile cleanly on all supported platforms.
* The changes should not cause any correctness regressions in the ``llvm-test``
suite and must not cause any major performance regressions.
* The change set should not cause performance or correctness regressions for the
LLVM tools.
* The changes should not cause performance or correctness regressions in code
compiled by LLVM on all applicable targets.
* You are expected to address any `Bugzilla bugs <https://bugs.llvm.org/>`_ that
result from your change.
We prefer for this to be handled before submission but understand that it isn't
possible to test all of this for every submission. Our build bots and nightly
testing infrastructure normally finds these problems. A good rule of thumb is
to check the nightly testers for regressions the day after your change. Build
bots will directly email you if a group of commits that included yours caused a
failure. You are expected to check the build bot messages to see if they are
your fault and, if so, fix the breakage.
Commits that violate these quality standards (e.g. are very broken) may be
reverted. This is necessary when the change blocks other developers from making
progress. The developer is welcome to re-commit the change after the problem has
been fixed.
.. _commit messages:
Commit messages
---------------
Although we don't enforce the format of commit messages, we prefer that
you follow these guidelines to help review, search in logs, email formatting
and so on. These guidelines are very similar to rules used by other open source
projects.
Most importantly, the contents of the message should be carefully written to
convey the rationale of the change (without delving too much in detail). It
also should avoid being vague or overly specific. For example, "bits were not
set right" will leave the reviewer wondering about which bits, and why they
weren't right, while "Correctly set overflow bits in TargetInfo" conveys almost
all there is to the change.
Below are some guidelines about the format of the message itself:
* Separate the commit message into title, body and, if you're not the original
author, a "Patch by" attribution line (see below).
* The title should be concise. Because all commits are emailed to the list with
the first line as the subject, long titles are frowned upon. Short titles
also look better in `git log`.
* When the changes are restricted to a specific part of the code (e.g. a
back-end or optimization pass), it is customary to add a tag to the
beginning of the line in square brackets. For example, "[SCEV] ..."
or "[OpenMP] ...". This helps email filters and searches for post-commit
reviews.
* The body, if it exists, should be separated from the title by an empty line.
* The body should be concise, but explanatory, including a complete
reasoning. Unless it is required to understand the change, examples,
code snippets and gory details should be left to bug comments, web
review or the mailing list.
* If the patch fixes a bug in bugzilla, please include the PR# in the message.
* `Attribution of Changes`_ should be in a separate line, after the end of
the body, as simple as "Patch by John Doe.". This is how we officially
handle attribution, and there are automated processes that rely on this
format.
* Text formatting and spelling should follow the same rules as documentation
and in-code comments, ex. capitalization, full stop, etc.
* If the commit is a bug fix on top of another recently committed patch, or a
revert or reapply of a patch, include the svn revision number of the prior
related commit. This could be as simple as "Revert rNNNN because it caused
PR#".
For minor violations of these recommendations, the community normally favors
reminding the contributor of this policy over reverting. Minor corrections and
omissions can be handled by sending a reply to the commits mailing list.
Obtaining Commit Access
-----------------------
We grant commit access to contributors with a track record of submitting high
quality patches. If you would like commit access, please send an email to
`Chris <mailto:clattner@llvm.org>`_ with the following information:
#. The user name you want to commit with, e.g. "hacker".
#. The full name and email address you want message to llvm-commits to come
from, e.g. "J. Random Hacker <hacker@yoyodyne.com>".
#. A "password hash" of the password you want to use, e.g. "``2ACR96qjUqsyM``".
Note that you don't ever tell us what your password is; you just give it to
us in an encrypted form. To get this, run "``htpasswd``" (a utility that
comes with apache) in *crypt* mode (often enabled with "``-d``"), or find a web
page that will do it for you. Note that our system does not work with MD5
hashes. These are significantly longer than a crypt hash - e.g.
"``$apr1$vea6bBV2$Z8IFx.AfeD8LhqlZFqJer0``", we only accept the shorter crypt hash.
Once you've been granted commit access, you should be able to check out an LLVM
tree with an SVN URL of "https://username@llvm.org/..." instead of the normal
anonymous URL of "http://llvm.org/...". The first time you commit you'll have
to type in your password. Note that you may get a warning from SVN about an
untrusted key; you can ignore this. To verify that your commit access works,
please do a test commit (e.g. change a comment or add a blank line). Your first
commit to a repository may require the autogenerated email to be approved by a
moderator of the mailing list.
This is normal and will be done when the mailing list owner has time.
If you have recently been granted commit access, these policies apply:
#. You are granted *commit-after-approval* to all parts of LLVM. To get
approval, submit a `patch`_ to `llvm-commits
<http://lists.llvm.org/mailman/listinfo/llvm-commits>`_. When approved,
you may commit it yourself.
#. You are allowed to commit patches without approval which you think are
obvious. This is clearly a subjective decision --- we simply expect you to
use good judgement. Examples include: fixing build breakage, reverting
obviously broken patches, documentation/comment changes, any other minor
changes. Avoid committing formatting- or whitespace-only changes outside of
code you plan to make subsequent changes to. Also, try to separate
formatting or whitespace changes from functional changes, either by
correcting the format first (ideally) or afterward. Such changes should be
highly localized and the commit message should clearly state that the commit
is not intended to change functionality, usually by stating it is
:ref:`NFC <nfc>`.
#. You are allowed to commit patches without approval to those portions of LLVM
that you have contributed or maintain (i.e., have been assigned
responsibility for), with the proviso that such commits must not break the
build. This is a "trust but verify" policy, and commits of this nature are
reviewed after they are committed.
#. Multiple violations of these policies or a single egregious violation may
cause commit access to be revoked.
In any case, your changes are still subject to `code review`_ (either before or
after they are committed, depending on the nature of the change). You are
encouraged to review other peoples' patches as well, but you aren't required
to do so.
.. _discuss the change/gather consensus:
Making a Major Change
---------------------
When a developer begins a major new project with the aim of contributing it back
to LLVM, they should inform the community with an email to the `llvm-dev
<http://lists.llvm.org/mailman/listinfo/llvm-dev>`_ email list, to the extent
possible. The reason for this is to:
#. keep the community informed about future changes to LLVM,
#. avoid duplication of effort by preventing multiple parties working on the
same thing and not knowing about it, and
#. ensure that any technical issues around the proposed work are discussed and
resolved before any significant work is done.
The design of LLVM is carefully controlled to ensure that all the pieces fit
together well and are as consistent as possible. If you plan to make a major
change to the way LLVM works or want to add a major new extension, it is a good
idea to get consensus with the development community before you start working on
it.
Once the design of the new feature is finalized, the work itself should be done
as a series of `incremental changes`_, not as a long-term development branch.
.. _incremental changes:
Incremental Development
-----------------------
In the LLVM project, we do all significant changes as a series of incremental
patches. We have a strong dislike for huge changes or long-term development
branches. Long-term development branches have a number of drawbacks:
#. Branches must have mainline merged into them periodically. If the branch
development and mainline development occur in the same pieces of code,
resolving merge conflicts can take a lot of time.
#. Other people in the community tend to ignore work on branches.
#. Huge changes (produced when a branch is merged back onto mainline) are
extremely difficult to `code review`_.
#. Branches are not routinely tested by our nightly tester infrastructure.
#. Changes developed as monolithic large changes often don't work until the
entire set of changes is done. Breaking it down into a set of smaller
changes increases the odds that any of the work will be committed to the main
repository.
To address these problems, LLVM uses an incremental development style and we
require contributors to follow this practice when making a large/invasive
change. Some tips:
* Large/invasive changes usually have a number of secondary changes that are
required before the big change can be made (e.g. API cleanup, etc). These
sorts of changes can often be done before the major change is done,
independently of that work.
* The remaining inter-related work should be decomposed into unrelated sets of
changes if possible. Once this is done, define the first increment and get
consensus on what the end goal of the change is.
* Each change in the set can be stand alone (e.g. to fix a bug), or part of a
planned series of changes that works towards the development goal.
* Each change should be kept as small as possible. This simplifies your work
(into a logical progression), simplifies code review and reduces the chance
that you will get negative feedback on the change. Small increments also
facilitate the maintenance of a high quality code base.
* Often, an independent precursor to a big change is to add a new API and slowly
migrate clients to use the new API. Each change to use the new API is often
"obvious" and can be committed without review. Once the new API is in place
and used, it is much easier to replace the underlying implementation of the
API. This implementation change is logically separate from the API
change.
If you are interested in making a large change, and this scares you, please make
sure to first `discuss the change/gather consensus`_ then ask about the best way
to go about making the change.
Attribution of Changes
----------------------
When contributors submit a patch to an LLVM project, other developers with
commit access may commit it for the author once appropriate (based on the
progression of code review, etc.). When doing so, it is important to retain
correct attribution of contributions to their contributors. However, we do not
want the source code to be littered with random attributions "this code written
by J. Random Hacker" (this is noisy and distracting). In practice, the revision
control system keeps a perfect history of who changed what, and the CREDITS.txt
file describes higher-level contributions. If you commit a patch for someone
else, please follow the attribution of changes in the simple manner as outlined
by the `commit messages`_ section. Overall, please do not add contributor names
to the source code.
Also, don't commit patches authored by others unless they have submitted the
patch to the project or you have been authorized to submit them on their behalf
(you work together and your company authorized you to contribute the patches,
etc.). The author should first submit them to the relevant project's commit
list, development list, or LLVM bug tracker component. If someone sends you
a patch privately, encourage them to submit it to the appropriate list first.
.. _IR backwards compatibility:
IR Backwards Compatibility
--------------------------
When the IR format has to be changed, keep in mind that we try to maintain some
backwards compatibility. The rules are intended as a balance between convenience
for llvm users and not imposing a big burden on llvm developers:
* The textual format is not backwards compatible. We don't change it too often,
but there are no specific promises.
* Additions and changes to the IR should be reflected in
``test/Bitcode/compatibility.ll``.
* The current LLVM version supports loading any bitcode since version 3.0.
* After each X.Y release, ``compatibility.ll`` must be copied to
``compatibility-X.Y.ll``. The corresponding bitcode file should be assembled
using the X.Y build and committed as ``compatibility-X.Y.ll.bc``.
* Newer releases can ignore features from older releases, but they cannot
miscompile them. For example, if nsw is ever replaced with something else,
dropping it would be a valid way to upgrade the IR.
* Debug metadata is special in that it is currently dropped during upgrades.
* Non-debug metadata is defined to be safe to drop, so a valid way to upgrade
it is to drop it. That is not very user friendly and a bit more effort is
expected, but no promises are made.
C API Changes
----------------
* Stability Guarantees: The C API is, in general, a "best effort" for stability.
This means that we make every attempt to keep the C API stable, but that
stability will be limited by the abstractness of the interface and the
stability of the C++ API that it wraps. In practice, this means that things
like "create debug info" or "create this type of instruction" are likely to be
less stable than "take this IR file and JIT it for my current machine".
* Release stability: We won't break the C API on the release branch with patches
that go on that branch, with the exception that we will fix an unintentional
C API break that will keep the release consistent with both the previous and
next release.
* Testing: Patches to the C API are expected to come with tests just like any
other patch.
* Including new things into the API: If an LLVM subcomponent has a C API already
included, then expanding that C API is acceptable. Adding C API for
subcomponents that don't currently have one needs to be discussed on the
mailing list for design and maintainability feedback prior to implementation.
* Documentation: Any changes to the C API are required to be documented in the
release notes so that it's clear to external users who do not follow the
project how the C API is changing and evolving.
New Targets
-----------
LLVM is very receptive to new targets, even experimental ones, but a number of
problems can appear when adding new large portions of code, and back-ends are
normally added in bulk. We have found that landing large pieces of new code
and then trying to fix emergent problems in-tree is problematic for a variety
of reasons.
For these reasons, new targets are *always* added as *experimental* until
they can be proven stable, and later moved to non-experimental. The difference
between both classes is that experimental targets are not built by default
(need to be added to -DLLVM_TARGETS_TO_BUILD at CMake time).
The basic rules for a back-end to be upstreamed in **experimental** mode are:
* Every target must have a :ref:`code owner<code owners>`. The `CODE_OWNERS.TXT`
file has to be updated as part of the first merge. The code owner makes sure
that changes to the target get reviewed and steers the overall effort.
* There must be an active community behind the target. This community
will help maintain the target by providing buildbots, fixing
bugs, answering the LLVM community's questions and making sure the new
target doesn't break any of the other targets, or generic code. This
behavior is expected to continue throughout the lifetime of the
target's code.
* The code must be free of contentious issues, for example, large
changes in how the IR behaves or should be formed by the front-ends,
unless agreed by the majority of the community via refactoring of the
(:doc:`IR standard<LangRef>`) **before** the merge of the new target changes,
following the :ref:`IR backwards compatibility`.
* The code conforms to all of the policies laid out in this developer policy
document, including license, patent, and coding standards.
* The target should have either reasonable documentation on how it
works (ISA, ABI, etc.) or a publicly available simulator/hardware
(either free or cheap enough) - preferably both. This allows
developers to validate assumptions, understand constraints and review code
that can affect the target.
In addition, the rules for a back-end to be promoted to **official** are:
* The target must have addressed every other minimum requirement and
have been stable in tree for at least 3 months. This cool down
period is to make sure that the back-end and the target community can
endure continuous upstream development for the foreseeable future.
* The target's code must have been completely adapted to this policy
as well as the :doc:`coding standards<CodingStandards>`. Any exceptions that
were made to move into experimental mode must have been fixed **before**
becoming official.
* The test coverage needs to be broad and well written (small tests,
well documented). The build target ``check-all`` must pass with the
new target built, and where applicable, the ``test-suite`` must also
pass without errors, in at least one configuration (publicly
demonstrated, for example, via buildbots).
* Public buildbots need to be created and actively maintained, unless
the target requires no additional buildbots (ex. ``check-all`` covers
all tests). The more relevant and public the new target's CI infrastructure
is, the more the LLVM community will embrace it.
To **continue** as a supported and official target:
* The maintainer(s) must continue following these rules throughout the lifetime
of the target. Continuous violations of aforementioned rules and policies
could lead to complete removal of the target from the code base.
* Degradation in support, documentation or test coverage will make the target as
nuisance to other targets and be considered a candidate for deprecation and
ultimately removed.
In essences, these rules are necessary for targets to gain and retain their
status, but also markers to define bit-rot, and will be used to clean up the
tree from unmaintained targets.
+
+.. _toolchain:
+
+Updating Toolchain Requirements
+-------------------------------
+
+We intend to require newer toolchains as time goes by. This means LLVM's
+codebase can use newer versions of C++ as they get standardized. Requiring newer
+toolchains to build LLVM can be painful for those building LLVM; therefore, it
+will only be done through the following process:
+
+ * Generally, try to support LLVM and GCC versions from the last 3 years at a
+ minimum. This time-based guideline is not strict: we may support much older
+ compilers, or decide to support fewer versions.
+
+ * An RFC is sent to the `llvm-dev mailing list <http://lists.llvm.org/mailman/listinfo/llvm-dev>`_
+
+ - Detail upsides of the version increase (e.g. which newer C++ language or
+ library features LLVM should use; avoid miscompiles in particular compiler
+ versions, etc).
+ - Detail downsides on important platforms (e.g. Ubuntu LTS status).
+
+ * Once the RFC reaches consensus, update the CMake toolchain version checks as
+ well as the :doc:`getting started<GettingStarted>` guide. We want to
+ soft-error when developers compile LLVM. We say "soft-error" because the
+ error can be turned into a warning using a CMake flag. This is an important
+ step: LLVM still doesn't have code which requires the new toolchains, but it
+ soon will. If you compile LLVM but don't read the mailing list, we should
+ tell you!
+
+ * Ensure that at least one LLVM release has had this soft-error. Not all
+ developers compile LLVM top-of-tree. These release-bound developers should
+ also be told about upcoming changes.
+
+ * Turn the soft-error into a hard-error after said LLVM release has branched.
+
+ * Update the :doc:`coding standards<CodingStandards>` to allow the new
+ features we've explicitly approved in the RFC.
+
+ * Start using the new features in LLVM's codebase.
+
.. _copyright-license-patents:
Copyright, License, and Patents
===============================
.. note::
This section deals with legal matters but does not provide legal advice. We
are not lawyers --- please seek legal counsel from an attorney.
This section addresses the issues of copyright, license and patents for the LLVM
project. The copyright for the code is held by the individual contributors of
the code and the terms of its license to LLVM users and developers is the
`University of Illinois/NCSA Open Source License
<http://www.opensource.org/licenses/UoI-NCSA.php>`_ (with portions dual licensed
under the `MIT License <http://www.opensource.org/licenses/mit-license.php>`_,
see below). As contributor to the LLVM project, you agree to allow any
contributions to the project to licensed under these terms.
Copyright
---------
The LLVM project does not require copyright assignments, which means that the
copyright for the code in the project is held by its respective contributors who
have each agreed to release their contributed code under the terms of the `LLVM
License`_.
An implication of this is that the LLVM license is unlikely to ever change:
changing it would require tracking down all the contributors to LLVM and getting
them to agree that a license change is acceptable for their contribution. Since
there are no plans to change the license, this is not a cause for concern.
As a contributor to the project, this means that you (or your company) retain
ownership of the code you contribute, that it cannot be used in a way that
contradicts the license (which is a liberal BSD-style license), and that the
license for your contributions won't change without your approval in the
future.
.. _LLVM License:
License
-------
We intend to keep LLVM perpetually open source and to use a liberal open source
license. **As a contributor to the project, you agree that any contributions be
licensed under the terms of the corresponding subproject.** All of the code in
LLVM is available under the `University of Illinois/NCSA Open Source License
<http://www.opensource.org/licenses/UoI-NCSA.php>`_, which boils down to
this:
* You can freely distribute LLVM.
* You must retain the copyright notice if you redistribute LLVM.
* Binaries derived from LLVM must reproduce the copyright notice (e.g. in an
included readme file).
* You can't use our names to promote your LLVM derived products.
* There's no warranty on LLVM at all.
We believe this fosters the widest adoption of LLVM because it **allows
commercial products to be derived from LLVM** with few restrictions and without
a requirement for making any derived works also open source (i.e. LLVM's
license is not a "copyleft" license like the GPL). We suggest that you read the
`License <http://www.opensource.org/licenses/UoI-NCSA.php>`_ if further
clarification is needed.
In addition to the UIUC license, the runtime library components of LLVM
(**compiler_rt, libc++, and libclc**) are also licensed under the `MIT License
<http://www.opensource.org/licenses/mit-license.php>`_, which does not contain
the binary redistribution clause. As a user of these runtime libraries, it
means that you can choose to use the code under either license (and thus don't
need the binary redistribution clause), and as a contributor to the code that
you agree that any contributions to these libraries be licensed under both
licenses. We feel that this is important for runtime libraries, because they
are implicitly linked into applications and therefore should not subject those
applications to the binary redistribution clause. This also means that it is ok
to move code from (e.g.) libc++ to the LLVM core without concern, but that code
cannot be moved from the LLVM core to libc++ without the copyright owner's
permission.
Note that the LLVM Project does distribute dragonegg, **which is
GPL.** This means that anything "linked" into dragonegg must itself be compatible
with the GPL, and must be releasable under the terms of the GPL. This implies
that **any code linked into dragonegg and distributed to others may be subject to
the viral aspects of the GPL** (for example, a proprietary code generator linked
into dragonegg must be made available under the GPL). This is not a problem for
code already distributed under a more liberal license (like the UIUC license),
and GPL-containing subprojects are kept in separate SVN repositories whose
LICENSE.txt files specifically indicate that they contain GPL code.
Patents
-------
To the best of our knowledge, LLVM does not infringe on any patents (we have
actually removed code from LLVM in the past that was found to infringe). Having
code in LLVM that infringes on patents would violate an important goal of the
project by making it hard or impossible to reuse the code for arbitrary purposes
(including commercial use).
When contributing code, we expect contributors to notify us of any potential for
patent-related trouble with their changes (including from third parties). If
you or your employer own the rights to a patent and would like to contribute
code to LLVM that relies on it, we require that the copyright owner sign an
agreement that allows any other user of LLVM to freely use your patent. Please
contact the `LLVM Foundation Board of Directors <mailto:board@llvm.org>`_ for more
details.
Index: vendor/llvm/dist-release_80/docs/GettingStarted.rst
===================================================================
--- vendor/llvm/dist-release_80/docs/GettingStarted.rst (revision 344165)
+++ vendor/llvm/dist-release_80/docs/GettingStarted.rst (revision 344166)
@@ -1,1103 +1,1116 @@
====================================
Getting Started with the LLVM System
====================================
.. contents::
:local:
Overview
========
Welcome to the LLVM project! In order to get started, you first need to know
some basic information.
First, the LLVM project has multiple components. The core of the project is
itself called "LLVM". This contains all of the tools, libraries, and header
files needed to process an intermediate representation and convert it into
object files. It contains an assembler, disassembler, bitcode analyzer and
bitcode optimizer. It also contains basic regression tests.
Another piece is the `Clang <http://clang.llvm.org/>`_ front end. This
component compiles C, C++, Objective C, and Objective C++ code into LLVM bitcode
-- and from there into object files, using LLVM.
There are other components as well:
the `libc++ C++ standard library <https://libcxx.llvm.org>`_,
the `LLD linker <https://lld.llvm.org>`_, and more.
Getting Started Quickly (A Summary)
===================================
The LLVM Getting Started documentation may be out of date. So, the `Clang
Getting Started <http://clang.llvm.org/get_started.html>`_ page might also be a
good place to start.
Here's the short story for getting up and running quickly with LLVM:
#. Read the documentation.
#. Read the documentation.
#. Remember that you were warned twice about reading the documentation.
#. Checkout LLVM (including related subprojects like Clang):
* ``git clone https://github.com/llvm/llvm-project.git``
* Or, on windows, ``git clone --config core.autocrlf=false
https://github.com/llvm/llvm-project.git``
#. Configure and build LLVM and Clang:.
* ``cd llvm-project``
* ``mkdir build``
* ``cd build``
* ``cmake -G <generator> [options] ../llvm``
Some common generators are:
* ``Ninja`` --- for generating `Ninja <https://ninja-build.org>`_
build files. Most llvm developers use Ninja.
* ``Unix Makefiles`` --- for generating make-compatible parallel makefiles.
* ``Visual Studio`` --- for generating Visual Studio projects and
solutions.
* ``Xcode`` --- for generating Xcode projects.
Some Common options:
* ``-DLLVM_ENABLE_PROJECTS='...'`` --- semicolon-separated list of the LLVM
subprojects you'd like to additionally build. Can include any of: clang,
libcxx, libcxxabi, libunwind, lldb, compiler-rt, lld, polly, or
debuginfo-tests.
For example, to build LLVM, Clang, libcxx, and libcxxabi, use
``-DLLVM_ENABLE_PROJECTS="clang;libcxx;libcxxabi"``.
* ``-DCMAKE_INSTALL_PREFIX=directory`` --- Specify for *directory* the full
pathname of where you want the LLVM tools and libraries to be installed
(default ``/usr/local``).
* ``-DCMAKE_BUILD_TYPE=type`` --- Valid options for *type* are Debug,
Release, RelWithDebInfo, and MinSizeRel. Default is Debug.
* ``-DLLVM_ENABLE_ASSERTIONS=On`` --- Compile with assertion checks enabled
(default is Yes for Debug builds, No for all other build types).
* Run your build tool of choice!
* The default target (i.e. ``ninja`` or ``make``) will build all of LLVM.
* The ``check-all`` target (i.e. ``ninja check-all``) will run the
regression tests to ensure everything is in working order.
* CMake will generate build targets for each tool and library, and most
LLVM sub-projects generate their own ``check-<project>`` target.
* Running a serial build will be *slow*. Make sure you run a parallel
build. That's already done by default in Ninja; for ``make``, use
``make -j NNN`` (with an appropriate value of NNN, e.g. number of CPUs
you have.)
* For more information see `CMake <CMake.html>`_
* If you get an "internal compiler error (ICE)" or test failures, see
`below`_.
Consult the `Getting Started with LLVM`_ section for detailed information on
configuring and compiling LLVM. Go to `Directory Layout`_ to learn about the
layout of the source code tree.
Requirements
============
Before you begin to use the LLVM system, review the requirements given below.
This may save you some trouble by knowing ahead of time what hardware and
software you will need.
Hardware
--------
LLVM is known to work on the following host platforms:
================== ===================== =============
OS Arch Compilers
================== ===================== =============
Linux x86\ :sup:`1` GCC, Clang
Linux amd64 GCC, Clang
Linux ARM GCC, Clang
Linux PowerPC GCC, Clang
Solaris V9 (Ultrasparc) GCC
FreeBSD x86\ :sup:`1` GCC, Clang
FreeBSD amd64 GCC, Clang
NetBSD x86\ :sup:`1` GCC, Clang
NetBSD amd64 GCC, Clang
MacOS X\ :sup:`2` PowerPC GCC
MacOS X x86 GCC, Clang
Cygwin/Win32 x86\ :sup:`1, 3` GCC
Windows x86\ :sup:`1` Visual Studio
Windows x64 x86-64 Visual Studio
================== ===================== =============
.. note::
#. Code generation supported for Pentium processors and up
#. Code generation supported for 32-bit ABI only
#. To use LLVM modules on Win32-based system, you may configure LLVM
with ``-DBUILD_SHARED_LIBS=On``.
Note that Debug builds require a lot of time and disk space. An LLVM-only build
will need about 1-3 GB of space. A full build of LLVM and Clang will need around
15-20 GB of disk space. The exact space requirements will vary by system. (It
is so large because of all the debugging information and the fact that the
libraries are statically linked into multiple tools).
If you are space-constrained, you can build only selected tools or only
selected targets. The Release build requires considerably less space.
The LLVM suite *may* compile on other platforms, but it is not guaranteed to do
so. If compilation is successful, the LLVM utilities should be able to
assemble, disassemble, analyze, and optimize LLVM bitcode. Code generation
should work as well, although the generated native code may not work on your
platform.
Software
--------
Compiling LLVM requires that you have several software packages installed. The
table below lists those required packages. The Package column is the usual name
for the software package that LLVM depends on. The Version column provides
"known to work" versions of the package. The Notes column describes how LLVM
uses the package and provides other details.
=========================================================== ============ ==========================================
Package Version Notes
=========================================================== ============ ==========================================
`GNU Make <http://savannah.gnu.org/projects/make>`_ 3.79, 3.79.1 Makefile/build processor
-`GCC <http://gcc.gnu.org/>`_ >=4.8.0 C/C++ compiler\ :sup:`1`
+`GCC <http://gcc.gnu.org/>`_ >=5.1.0 C/C++ compiler\ :sup:`1`
`python <http://www.python.org/>`_ >=2.7 Automated test suite\ :sup:`2`
`zlib <http://zlib.net>`_ >=1.2.3.4 Compression library\ :sup:`3`
=========================================================== ============ ==========================================
.. note::
#. Only the C and C++ languages are needed so there's no need to build the
other languages for LLVM's purposes. See `below` for specific version
info.
#. Only needed if you want to run the automated test suite in the
``llvm/test`` directory.
#. Optional, adds compression / uncompression capabilities to selected LLVM
tools.
Additionally, your compilation host is expected to have the usual plethora of
Unix utilities. Specifically:
* **ar** --- archive library builder
* **bzip2** --- bzip2 command for distribution generation
* **bunzip2** --- bunzip2 command for distribution checking
* **chmod** --- change permissions on a file
* **cat** --- output concatenation utility
* **cp** --- copy files
* **date** --- print the current date/time
* **echo** --- print to standard output
* **egrep** --- extended regular expression search utility
* **find** --- find files/dirs in a file system
* **grep** --- regular expression search utility
* **gzip** --- gzip command for distribution generation
* **gunzip** --- gunzip command for distribution checking
* **install** --- install directories/files
* **mkdir** --- create a directory
* **mv** --- move (rename) files
* **ranlib** --- symbol table builder for archive libraries
* **rm** --- remove (delete) files and directories
* **sed** --- stream editor for transforming output
* **sh** --- Bourne shell for make build scripts
* **tar** --- tape archive for distribution generation
* **test** --- test things in file system
* **unzip** --- unzip command for distribution checking
* **zip** --- zip command for distribution generation
.. _below:
.. _check here:
Host C++ Toolchain, both Compiler and Standard Library
------------------------------------------------------
LLVM is very demanding of the host C++ compiler, and as such tends to expose
-bugs in the compiler. We are also planning to follow improvements and
-developments in the C++ language and library reasonably closely. As such, we
-require a modern host C++ toolchain, both compiler and standard library, in
-order to build LLVM.
+bugs in the compiler. We also attempt to follow improvements and developments in
+the C++ language and library reasonably closely. As such, we require a modern
+host C++ toolchain, both compiler and standard library, in order to build LLVM.
-For the most popular host toolchains we check for specific minimum versions in
-our build systems:
+LLVM is written using the subset of C++ documented in :doc:`coding
+standards<CodingStandards>`. To enforce this language version, we check the most
+popular host toolchains for specific minimum versions in our build systems:
+* Clang 3.5
+* Apple Clang 6.0
+* GCC 5.1
+* Visual Studio 2017
+
+The below versions currently soft-error as we transition to the new compiler
+versions listed above. The LLVM codebase is currently known to compile correctly
+with the following compilers, though this will change in the near future:
+
* Clang 3.1
+* Apple Clang 3.1
* GCC 4.8
* Visual Studio 2015 (Update 3)
Anything older than these toolchains *may* work, but will require forcing the
build system with a special option and is not really a supported host platform.
Also note that older versions of these compilers have often crashed or
miscompiled LLVM.
For less widely used host toolchains such as ICC or xlC, be aware that a very
recent version may be required to support all of the C++ features used in LLVM.
We track certain versions of software that are *known* to fail when used as
part of the host toolchain. These even include linkers at times.
**GNU ld 2.16.X**. Some 2.16.X versions of the ld linker will produce very long
warning messages complaining that some "``.gnu.linkonce.t.*``" symbol was
defined in a discarded section. You can safely ignore these messages as they are
erroneous and the linkage is correct. These messages disappear using ld 2.17.
**GNU binutils 2.17**: Binutils 2.17 contains `a bug
<http://sourceware.org/bugzilla/show_bug.cgi?id=3111>`__ which causes huge link
times (minutes instead of seconds) when building LLVM. We recommend upgrading
to a newer version (2.17.50.0.4 or later).
**GNU Binutils 2.19.1 Gold**: This version of Gold contained `a bug
<http://sourceware.org/bugzilla/show_bug.cgi?id=9836>`__ which causes
intermittent failures when building LLVM with position independent code. The
symptom is an error about cyclic dependencies. We recommend upgrading to a
newer version of Gold.
Getting a Modern Host C++ Toolchain
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This section mostly applies to Linux and older BSDs. On Mac OS X, you should
have a sufficiently modern Xcode, or you will likely need to upgrade until you
do. Windows does not have a "system compiler", so you must install either Visual
Studio 2015 or a recent version of mingw64. FreeBSD 10.0 and newer have a modern
Clang as the system compiler.
However, some Linux distributions and some other or older BSDs sometimes have
extremely old versions of GCC. These steps attempt to help you upgrade you
compiler even on such a system. However, if at all possible, we encourage you
to use a recent version of a distribution with a modern system compiler that
meets these requirements. Note that it is tempting to install a prior
version of Clang and libc++ to be the host compiler, however libc++ was not
well tested or set up to build on Linux until relatively recently. As
a consequence, this guide suggests just using libstdc++ and a modern GCC as the
initial host in a bootstrap, and then using Clang (and potentially libc++).
The first step is to get a recent GCC toolchain installed. The most common
distribution on which users have struggled with the version requirements is
Ubuntu Precise, 12.04 LTS. For this distribution, one easy option is to install
the `toolchain testing PPA`_ and use it to install a modern GCC. There is
-a really nice discussions of this on the `ask ubuntu stack exchange`_. However,
-not all users can use PPAs and there are many other distributions, so it may be
-necessary (or just useful, if you're here you *are* doing compiler development
-after all) to build and install GCC from source. It is also quite easy to do
-these days.
+a really nice discussions of this on the `ask ubuntu stack exchange`_ and a
+`github gist`_ with updated commands. However, not all users can use PPAs and
+there are many other distributions, so it may be necessary (or just useful, if
+you're here you *are* doing compiler development after all) to build and install
+GCC from source. It is also quite easy to do these days.
.. _toolchain testing PPA:
https://launchpad.net/~ubuntu-toolchain-r/+archive/test
.. _ask ubuntu stack exchange:
- http://askubuntu.com/questions/271388/how-to-install-gcc-4-8-in-ubuntu-12-04-from-the-terminal
+ https://askubuntu.com/questions/466651/how-do-i-use-the-latest-gcc-on-ubuntu/581497#58149
+.. _github gist:
+ https://gist.github.com/application2000/73fd6f4bf1be6600a2cf9f56315a2d91
-Easy steps for installing GCC 4.8.2:
+Easy steps for installing GCC 5.1.0:
.. code-block:: console
- % wget https://ftp.gnu.org/gnu/gcc/gcc-4.8.2/gcc-4.8.2.tar.bz2
- % wget https://ftp.gnu.org/gnu/gcc/gcc-4.8.2/gcc-4.8.2.tar.bz2.sig
+ % gcc_version=5.1.0
+ % wget https://ftp.gnu.org/gnu/gcc/gcc-${gcc_version}/gcc-${gcc_version}.tar.bz2
+ % wget https://ftp.gnu.org/gnu/gcc/gcc-${gcc_version}/gcc-${gcc_version}.tar.bz2.sig
% wget https://ftp.gnu.org/gnu/gnu-keyring.gpg
- % signature_invalid=`gpg --verify --no-default-keyring --keyring ./gnu-keyring.gpg gcc-4.8.2.tar.bz2.sig`
+ % signature_invalid=`gpg --verify --no-default-keyring --keyring ./gnu-keyring.gpg gcc-${gcc_version}.tar.bz2.sig`
% if [ $signature_invalid ]; then echo "Invalid signature" ; exit 1 ; fi
- % tar -xvjf gcc-4.8.2.tar.bz2
- % cd gcc-4.8.2
+ % tar -xvjf gcc-${gcc_version}.tar.bz2
+ % cd gcc-${gcc_version}
% ./contrib/download_prerequisites
% cd ..
- % mkdir gcc-4.8.2-build
- % cd gcc-4.8.2-build
- % $PWD/../gcc-4.8.2/configure --prefix=$HOME/toolchains --enable-languages=c,c++
+ % mkdir gcc-${gcc_version}-build
+ % cd gcc-${gcc_version}-build
+ % $PWD/../gcc-${gcc_version}/configure --prefix=$HOME/toolchains --enable-languages=c,c++
% make -j$(nproc)
% make install
For more details, check out the excellent `GCC wiki entry`_, where I got most
of this information from.
.. _GCC wiki entry:
- http://gcc.gnu.org/wiki/InstallingGCC
+ https://gcc.gnu.org/wiki/InstallingGCC
Once you have a GCC toolchain, configure your build of LLVM to use the new
toolchain for your host compiler and C++ standard library. Because the new
version of libstdc++ is not on the system library search path, you need to pass
extra linker flags so that it can be found at link time (``-L``) and at runtime
(``-rpath``). If you are using CMake, this invocation should produce working
binaries:
.. code-block:: console
% mkdir build
% cd build
% CC=$HOME/toolchains/bin/gcc CXX=$HOME/toolchains/bin/g++ \
cmake .. -DCMAKE_CXX_LINK_FLAGS="-Wl,-rpath,$HOME/toolchains/lib64 -L$HOME/toolchains/lib64"
If you fail to set rpath, most LLVM binaries will fail on startup with a message
from the loader similar to ``libstdc++.so.6: version `GLIBCXX_3.4.20' not
found``. This means you need to tweak the -rpath linker flag.
-When you build Clang, you will need to give *it* access to modern C++11
+When you build Clang, you will need to give *it* access to modern C++
standard library in order to use it as your new host in part of a bootstrap.
There are two easy ways to do this, either build (and install) libc++ along
with Clang and then use it with the ``-stdlib=libc++`` compile and link flag,
or install Clang into the same prefix (``$HOME/toolchains`` above) as GCC.
Clang will look within its own prefix for libstdc++ and use it if found. You
can also add an explicit prefix for Clang to look in for a GCC toolchain with
the ``--gcc-toolchain=/opt/my/gcc/prefix`` flag, passing it to both compile and
link commands when using your just-built-Clang to bootstrap.
.. _Getting Started with LLVM:
Getting Started with LLVM
=========================
The remainder of this guide is meant to get you up and running with LLVM and to
give you some basic information about the LLVM environment.
The later sections of this guide describe the `general layout`_ of the LLVM
source tree, a `simple example`_ using the LLVM tool chain, and `links`_ to find
more information about LLVM or to get help via e-mail.
Terminology and Notation
------------------------
Throughout this manual, the following names are used to denote paths specific to
the local system and working environment. *These are not environment variables
you need to set but just strings used in the rest of this document below*. In
any of the examples below, simply replace each of these names with the
appropriate pathname on your local system. All these paths are absolute:
``SRC_ROOT``
This is the top level directory of the LLVM source tree.
``OBJ_ROOT``
This is the top level directory of the LLVM object tree (i.e. the tree where
object files and compiled programs will be placed. It can be the same as
SRC_ROOT).
Unpacking the LLVM Archives
---------------------------
If you have the LLVM distribution, you will need to unpack it before you can
begin to compile it. LLVM is distributed as a number of different
subprojects. Each one has its own download which is a TAR archive that is
compressed with the gzip program.
The files are as follows, with *x.y* marking the version number:
``llvm-x.y.tar.gz``
Source release for the LLVM libraries and tools.
``cfe-x.y.tar.gz``
Source release for the Clang frontend.
.. _checkout:
Checkout LLVM from Git
----------------------
You can also checkout the source code for LLVM from Git. While the LLVM
project's official source-code repository is Subversion, we are in the process
of migrating to git. We currently recommend that all developers use Git for
day-to-day development.
.. note::
Passing ``--config core.autocrlf=false`` should not be required in
the future after we adjust the .gitattribute settings correctly, but
is required for Windows users at the time of this writing.
Simply run:
.. code-block:: console
% git clone https://github.com/llvm/llvm-project.git`
or on Windows,
.. code-block:: console
% git clone --config core.autocrlf=false https://github.com/llvm/llvm-project.git
This will create an '``llvm-project``' directory in the current directory and
fully populate it with all of the source code, test directories, and local
copies of documentation files for LLVM and all the related subprojects. Note
that unlike the tarballs, which contain each subproject in a separate file, the
git repository contains all of the projects together.
If you want to get a specific release (as opposed to the most recent revision),
you can check out a tag after cloning the repository. E.g., `git checkout
llvmorg-6.0.1` inside the ``llvm-project`` directory created by the above
command. Use `git tag -l` to list all of them.
Sending patches
^^^^^^^^^^^^^^^
Please read `Developer Policy <DeveloperPolicy.html#one-off-patches>`_, too.
We don't currently accept github pull requests, so you'll need to send patches
either via emailing to llvm-commits, or, preferably, via :ref:`Phabricator
<phabricator-reviews>`.
You'll generally want to make sure your branch has a single commit,
corresponding to the review you wish to send, up-to-date with the upstream
``origin/master`` branch, and doesn't contain merges. Once you have that, you
can use ``git show`` or ``git format-patch`` to output the diff, and attach it
to a Phabricator review (or to an email message).
However, using the "Arcanist" tool is often easier. After `installing
arcanist`_, you can upload the latest commit using:
.. code-block:: console
% arc diff HEAD~1
Additionally, before sending a patch for review, please also try to ensure it's
formatted properly. We use ``clang-format`` for this, which has git integration
through the ``git-clang-format`` script. On some systems, it may already be
installed (or be installable via your package manager). If so, you can simply
run it -- the following command will format only the code changed in the most
recent commit:
.. code-block:: console
% git clang-format HEAD~1
Note that this modifies the files, but doesn't commit them -- you'll likely want
to run
.. code-block:: console
% git commit --amend -a
in order to update the last commit with all pending changes.
.. note::
If you don't already have ``clang-format`` or ``git clang-format`` installed
on your system, the ``clang-format`` binary will be built alongside clang, and
the git integration can be run from
``clang/tools/clang-format/git-clang-format``.
.. _commit_from_git:
For developers to commit changes from Git
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A helper script is provided in ``llvm/utils/git-svn/git-llvm``. After you add it
to your path, you can push committed changes upstream with ``git llvm
push``. While this creates a Subversion checkout and patches it under the hood,
it does not require you to have interaction with it.
.. code-block:: console
% export PATH=$PATH:$TOP_LEVEL_DIR/llvm-project/llvm/utils/git-svn/
% git llvm push
Within a couple minutes after pushing to subversion, the svn commit will have
been converted back to a Git commit, and made its way into the official Git
repository. At that point, ``git pull`` should get back the changes as they were
committed.
You'll likely want to ``git pull --rebase`` to get the official git commit
downloaded back to your repository. The SVN revision numbers of each commit can
be found at the end of the commit message, e.g. ``llvm-svn: 350914``.
You may also find the ``-n`` flag useful, like ``git llvm push -n``. This runs
through all the steps of committing _without_ actually doing the commit, and
tell you what it would have done. That can be useful if you're unsure whether
the right thing will happen.
Checkout via SVN (deprecated)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Until we have fully migrated to Git, you may also get a fresh copy of
the code from the official Subversion repository.
* ``cd where-you-want-llvm-to-live``
* Read-Only: ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm``
* Read-Write: ``svn co https://user@llvm.org/svn/llvm-project/llvm/trunk llvm``
This will create an '``llvm``' directory in the current directory and fully
populate it with the LLVM source code, Makefiles, test directories, and local
copies of documentation files.
If you want to get a specific release (as opposed to the most recent revision),
you can check it out from the '``tags``' directory (instead of '``trunk``'). The
following releases are located in the following subdirectories of the '``tags``'
directory:
* Release 3.5.0 and later: **RELEASE_350/final** and so on
* Release 2.9 through 3.4: **RELEASE_29/final** and so on
* Release 1.1 through 2.8: **RELEASE_11** and so on
* Release 1.0: **RELEASE_1**
Local LLVM Configuration
------------------------
Once checked out repository, the LLVM suite source code must be configured
before being built. This process uses CMake. Unlinke the normal ``configure``
script, CMake generates the build files in whatever format you request as well
as various ``*.inc`` files, and ``llvm/include/Config/config.h``.
Variables are passed to ``cmake`` on the command line using the format
``-D<variable name>=<value>``. The following variables are some common options
used by people developing LLVM.
+-------------------------+----------------------------------------------------+
| Variable | Purpose |
+=========================+====================================================+
| CMAKE_C_COMPILER | Tells ``cmake`` which C compiler to use. By |
| | default, this will be /usr/bin/cc. |
+-------------------------+----------------------------------------------------+
| CMAKE_CXX_COMPILER | Tells ``cmake`` which C++ compiler to use. By |
| | default, this will be /usr/bin/c++. |
+-------------------------+----------------------------------------------------+
| CMAKE_BUILD_TYPE | Tells ``cmake`` what type of build you are trying |
| | to generate files for. Valid options are Debug, |
| | Release, RelWithDebInfo, and MinSizeRel. Default |
| | is Debug. |
+-------------------------+----------------------------------------------------+
| CMAKE_INSTALL_PREFIX | Specifies the install directory to target when |
| | running the install action of the build files. |
+-------------------------+----------------------------------------------------+
| LLVM_TARGETS_TO_BUILD | A semicolon delimited list controlling which |
| | targets will be built and linked into llvm. |
| | The default list is defined as |
| | ``LLVM_ALL_TARGETS``, and can be set to include |
| | out-of-tree targets. The default value includes: |
| | ``AArch64, AMDGPU, ARM, BPF, Hexagon, Mips, |
| | MSP430, NVPTX, PowerPC, Sparc, SystemZ, X86, |
| | XCore``. |
| | |
+-------------------------+----------------------------------------------------+
| LLVM_ENABLE_DOXYGEN | Build doxygen-based documentation from the source |
| | code This is disabled by default because it is |
| | slow and generates a lot of output. |
+-------------------------+----------------------------------------------------+
| LLVM_ENABLE_PROJECTS | A semicolon-delimited list selecting which of the |
| | other LLVM subprojects to additionally build. (Only|
| | effective when using a side-by-side project layout |
| | e.g. via git). The default list is empty. Can |
| | include: clang, libcxx, libcxxabi, libunwind, lldb,|
| | compiler-rt, lld, polly, or debuginfo-tests. |
+-------------------------+----------------------------------------------------+
| LLVM_ENABLE_SPHINX | Build sphinx-based documentation from the source |
| | code. This is disabled by default because it is |
| | slow and generates a lot of output. Sphinx version |
| | 1.5 or later recommended. |
+-------------------------+----------------------------------------------------+
| LLVM_BUILD_LLVM_DYLIB | Generate libLLVM.so. This library contains a |
| | default set of LLVM components that can be |
| | overridden with ``LLVM_DYLIB_COMPONENTS``. The |
| | default contains most of LLVM and is defined in |
| | ``tools/llvm-shlib/CMakelists.txt``. |
+-------------------------+----------------------------------------------------+
| LLVM_OPTIMIZED_TABLEGEN | Builds a release tablegen that gets used during |
| | the LLVM build. This can dramatically speed up |
| | debug builds. |
+-------------------------+----------------------------------------------------+
To configure LLVM, follow these steps:
#. Change directory into the object root directory:
.. code-block:: console
% cd OBJ_ROOT
#. Run the ``cmake``:
.. code-block:: console
% cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/install/path
[other options] SRC_ROOT
Compiling the LLVM Suite Source Code
------------------------------------
Unlike with autotools, with CMake your build type is defined at configuration.
If you want to change your build type, you can re-run cmake with the following
invocation:
.. code-block:: console
% cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=type SRC_ROOT
Between runs, CMake preserves the values set for all options. CMake has the
following build types defined:
Debug
These builds are the default. The build system will compile the tools and
libraries unoptimized, with debugging information, and asserts enabled.
Release
For these builds, the build system will compile the tools and libraries
with optimizations enabled and not generate debug info. CMakes default
optimization level is -O3. This can be configured by setting the
``CMAKE_CXX_FLAGS_RELEASE`` variable on the CMake command line.
RelWithDebInfo
These builds are useful when debugging. They generate optimized binaries with
debug information. CMakes default optimization level is -O2. This can be
configured by setting the ``CMAKE_CXX_FLAGS_RELWITHDEBINFO`` variable on the
CMake command line.
Once you have LLVM configured, you can build it by entering the *OBJ_ROOT*
directory and issuing the following command:
.. code-block:: console
% make
If the build fails, please `check here`_ to see if you are using a version of
GCC that is known not to compile LLVM.
If you have multiple processors in your machine, you may wish to use some of the
parallel build options provided by GNU Make. For example, you could use the
command:
.. code-block:: console
% make -j2
There are several special targets which are useful when working with the LLVM
source code:
``make clean``
Removes all files generated by the build. This includes object files,
generated C/C++ files, libraries, and executables.
``make install``
Installs LLVM header files, libraries, tools, and documentation in a hierarchy
under ``$PREFIX``, specified with ``CMAKE_INSTALL_PREFIX``, which
defaults to ``/usr/local``.
``make docs-llvm-html``
If configured with ``-DLLVM_ENABLE_SPHINX=On``, this will generate a directory
at ``OBJ_ROOT/docs/html`` which contains the HTML formatted documentation.
Cross-Compiling LLVM
--------------------
It is possible to cross-compile LLVM itself. That is, you can create LLVM
executables and libraries to be hosted on a platform different from the platform
where they are built (a Canadian Cross build). To generate build files for
cross-compiling CMake provides a variable ``CMAKE_TOOLCHAIN_FILE`` which can
define compiler flags and variables used during the CMake test operations.
The result of such a build is executables that are not runnable on the build
host but can be executed on the target. As an example the following CMake
invocation can generate build files targeting iOS. This will work on Mac OS X
with the latest Xcode:
.. code-block:: console
% cmake -G "Ninja" -DCMAKE_OSX_ARCHITECTURES="armv7;armv7s;arm64"
-DCMAKE_TOOLCHAIN_FILE=<PATH_TO_LLVM>/cmake/platforms/iOS.cmake
-DCMAKE_BUILD_TYPE=Release -DLLVM_BUILD_RUNTIME=Off -DLLVM_INCLUDE_TESTS=Off
-DLLVM_INCLUDE_EXAMPLES=Off -DLLVM_ENABLE_BACKTRACES=Off [options]
<PATH_TO_LLVM>
Note: There are some additional flags that need to be passed when building for
iOS due to limitations in the iOS SDK.
Check :doc:`HowToCrossCompileLLVM` and `Clang docs on how to cross-compile in general
<http://clang.llvm.org/docs/CrossCompilation.html>`_ for more information
about cross-compiling.
The Location of LLVM Object Files
---------------------------------
The LLVM build system is capable of sharing a single LLVM source tree among
several LLVM builds. Hence, it is possible to build LLVM for several different
platforms or configurations using the same source tree.
* Change directory to where the LLVM object files should live:
.. code-block:: console
% cd OBJ_ROOT
* Run ``cmake``:
.. code-block:: console
% cmake -G "Unix Makefiles" SRC_ROOT
The LLVM build will create a structure underneath *OBJ_ROOT* that matches the
LLVM source tree. At each level where source files are present in the source
tree there will be a corresponding ``CMakeFiles`` directory in the *OBJ_ROOT*.
Underneath that directory there is another directory with a name ending in
``.dir`` under which you'll find object files for each source.
For example:
.. code-block:: console
% cd llvm_build_dir
% find lib/Support/ -name APFloat*
lib/Support/CMakeFiles/LLVMSupport.dir/APFloat.cpp.o
Optional Configuration Items
----------------------------
If you're running on a Linux system that supports the `binfmt_misc
<http://en.wikipedia.org/wiki/binfmt_misc>`_
module, and you have root access on the system, you can set your system up to
execute LLVM bitcode files directly. To do this, use commands like this (the
first command may not be required if you are already using the module):
.. code-block:: console
% mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
% echo ':llvm:M::BC::/path/to/lli:' > /proc/sys/fs/binfmt_misc/register
% chmod u+x hello.bc (if needed)
% ./hello.bc
This allows you to execute LLVM bitcode files directly. On Debian, you can also
use this command instead of the 'echo' command above:
.. code-block:: console
% sudo update-binfmts --install llvm /path/to/lli --magic 'BC'
.. _Program Layout:
.. _general layout:
Directory Layout
================
One useful source of information about the LLVM source base is the LLVM `doxygen
<http://www.doxygen.org/>`_ documentation available at
`<http://llvm.org/doxygen/>`_. The following is a brief introduction to code
layout:
``llvm/examples``
-----------------
Simple examples using the LLVM IR and JIT.
``llvm/include``
----------------
Public header files exported from the LLVM library. The three main subdirectories:
``llvm/include/llvm``
All LLVM-specific header files, and subdirectories for different portions of
LLVM: ``Analysis``, ``CodeGen``, ``Target``, ``Transforms``, etc...
``llvm/include/llvm/Support``
Generic support libraries provided with LLVM but not necessarily specific to
LLVM. For example, some C++ STL utilities and a Command Line option processing
library store header files here.
``llvm/include/llvm/Config``
Header files configured by ``cmake``. They wrap "standard" UNIX and
C header files. Source code can include these header files which
automatically take care of the conditional #includes that ``cmake``
generates.
``llvm/lib``
------------
Most source files are here. By putting code in libraries, LLVM makes it easy to
share code among the `tools`_.
``llvm/lib/IR/``
Core LLVM source files that implement core classes like Instruction and
BasicBlock.
``llvm/lib/AsmParser/``
Source code for the LLVM assembly language parser library.
``llvm/lib/Bitcode/``
Code for reading and writing bitcode.
``llvm/lib/Analysis/``
A variety of program analyses, such as Call Graphs, Induction Variables,
Natural Loop Identification, etc.
``llvm/lib/Transforms/``
IR-to-IR program transformations, such as Aggressive Dead Code Elimination,
Sparse Conditional Constant Propagation, Inlining, Loop Invariant Code Motion,
Dead Global Elimination, and many others.
``llvm/lib/Target/``
Files describing target architectures for code generation. For example,
``llvm/lib/Target/X86`` holds the X86 machine description.
``llvm/lib/CodeGen/``
The major parts of the code generator: Instruction Selector, Instruction
Scheduling, and Register Allocation.
``llvm/lib/MC/``
(FIXME: T.B.D.) ....?
``llvm/lib/ExecutionEngine/``
Libraries for directly executing bitcode at runtime in interpreted and
JIT-compiled scenarios.
``llvm/lib/Support/``
Source code that corresponding to the header files in ``llvm/include/ADT/``
and ``llvm/include/Support/``.
``llvm/projects``
-----------------
Projects not strictly part of LLVM but shipped with LLVM. This is also the
directory for creating your own LLVM-based projects which leverage the LLVM
build system.
``llvm/test``
-------------
Feature and regression tests and other sanity checks on LLVM infrastructure. These
are intended to run quickly and cover a lot of territory without being exhaustive.
``test-suite``
--------------
A comprehensive correctness, performance, and benchmarking test suite
for LLVM. This comes in a ``separate git repository
<https://github.com/llvm/llvm-test-suite>``, because it contains a
large amount of third-party code under a variety of licenses. For
details see the :doc:`Testing Guide <TestingGuide>` document.
.. _tools:
``llvm/tools``
--------------
Executables built out of the libraries
above, which form the main part of the user interface. You can always get help
for a tool by typing ``tool_name -help``. The following is a brief introduction
to the most important tools. More detailed information is in
the `Command Guide <CommandGuide/index.html>`_.
``bugpoint``
``bugpoint`` is used to debug optimization passes or code generation backends
by narrowing down the given test case to the minimum number of passes and/or
instructions that still cause a problem, whether it is a crash or
miscompilation. See `<HowToSubmitABug.html>`_ for more information on using
``bugpoint``.
``llvm-ar``
The archiver produces an archive containing the given LLVM bitcode files,
optionally with an index for faster lookup.
``llvm-as``
The assembler transforms the human readable LLVM assembly to LLVM bitcode.
``llvm-dis``
The disassembler transforms the LLVM bitcode to human readable LLVM assembly.
``llvm-link``
``llvm-link``, not surprisingly, links multiple LLVM modules into a single
program.
``lli``
``lli`` is the LLVM interpreter, which can directly execute LLVM bitcode
(although very slowly...). For architectures that support it (currently x86,
Sparc, and PowerPC), by default, ``lli`` will function as a Just-In-Time
compiler (if the functionality was compiled in), and will execute the code
*much* faster than the interpreter.
``llc``
``llc`` is the LLVM backend compiler, which translates LLVM bitcode to a
native code assembly file.
``opt``
``opt`` reads LLVM bitcode, applies a series of LLVM to LLVM transformations
(which are specified on the command line), and outputs the resultant
bitcode. '``opt -help``' is a good way to get a list of the
program transformations available in LLVM.
``opt`` can also run a specific analysis on an input LLVM bitcode
file and print the results. Primarily useful for debugging
analyses, or familiarizing yourself with what an analysis does.
``llvm/utils``
--------------
Utilities for working with LLVM source code; some are part of the build process
because they are code generators for parts of the infrastructure.
``codegen-diff``
``codegen-diff`` finds differences between code that LLC
generates and code that LLI generates. This is useful if you are
debugging one of them, assuming that the other generates correct output. For
the full user manual, run ```perldoc codegen-diff'``.
``emacs/``
Emacs and XEmacs syntax highlighting for LLVM assembly files and TableGen
description files. See the ``README`` for information on using them.
``getsrcs.sh``
Finds and outputs all non-generated source files,
useful if one wishes to do a lot of development across directories
and does not want to find each file. One way to use it is to run,
for example: ``xemacs `utils/getsources.sh``` from the top of the LLVM source
tree.
``llvmgrep``
Performs an ``egrep -H -n`` on each source file in LLVM and
passes to it a regular expression provided on ``llvmgrep``'s command
line. This is an efficient way of searching the source base for a
particular regular expression.
``TableGen/``
Contains the tool used to generate register
descriptions, instruction set descriptions, and even assemblers from common
TableGen description files.
``vim/``
vim syntax-highlighting for LLVM assembly files
and TableGen description files. See the ``README`` for how to use them.
.. _simple example:
An Example Using the LLVM Tool Chain
====================================
This section gives an example of using LLVM with the Clang front end.
Example with clang
------------------
#. First, create a simple C file, name it 'hello.c':
.. code-block:: c
#include <stdio.h>
int main() {
printf("hello world\n");
return 0;
}
#. Next, compile the C file into a native executable:
.. code-block:: console
% clang hello.c -o hello
.. note::
Clang works just like GCC by default. The standard -S and -c arguments
work as usual (producing a native .s or .o file, respectively).
#. Next, compile the C file into an LLVM bitcode file:
.. code-block:: console
% clang -O3 -emit-llvm hello.c -c -o hello.bc
The -emit-llvm option can be used with the -S or -c options to emit an LLVM
``.ll`` or ``.bc`` file (respectively) for the code. This allows you to use
the `standard LLVM tools <CommandGuide/index.html>`_ on the bitcode file.
#. Run the program in both forms. To run the program, use:
.. code-block:: console
% ./hello
and
.. code-block:: console
% lli hello.bc
The second examples shows how to invoke the LLVM JIT, :doc:`lli
<CommandGuide/lli>`.
#. Use the ``llvm-dis`` utility to take a look at the LLVM assembly code:
.. code-block:: console
% llvm-dis < hello.bc | less
#. Compile the program to native assembly using the LLC code generator:
.. code-block:: console
% llc hello.bc -o hello.s
#. Assemble the native assembly language file into a program:
.. code-block:: console
% /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native # On Solaris
% gcc hello.s -o hello.native # On others
#. Execute the native code program:
.. code-block:: console
% ./hello.native
Note that using clang to compile directly to native code (i.e. when the
``-emit-llvm`` option is not present) does steps 6/7/8 for you.
Common Problems
===============
If you are having problems building or using LLVM, or if you have any other
general questions about LLVM, please consult the `Frequently Asked
Questions <FAQ.html>`_ page.
.. _links:
Links
=====
This document is just an **introduction** on how to use LLVM to do some simple
things... there are many more interesting and complicated things that you can do
that aren't documented here (but we'll gladly accept a patch if you want to
write something up!). For more information about LLVM, check out:
* `LLVM Homepage <http://llvm.org/>`_
* `LLVM Doxygen Tree <http://llvm.org/doxygen/>`_
* `Starting a Project that Uses LLVM <http://llvm.org/docs/Projects.html>`_
.. _installing arcanist: https://secure.phabricator.com/book/phabricator/article/arcanist_quick_start/
Index: vendor/llvm/dist-release_80/docs/LibFuzzer.rst
===================================================================
--- vendor/llvm/dist-release_80/docs/LibFuzzer.rst (revision 344165)
+++ vendor/llvm/dist-release_80/docs/LibFuzzer.rst (revision 344166)
@@ -1,753 +1,763 @@
=======================================================
libFuzzer – a library for coverage-guided fuzz testing.
=======================================================
.. contents::
:local:
:depth: 1
Introduction
============
LibFuzzer is in-process, coverage-guided, evolutionary fuzzing engine.
LibFuzzer is linked with the library under test, and feeds fuzzed inputs to the
library via a specific fuzzing entrypoint (aka "target function"); the fuzzer
then tracks which areas of the code are reached, and generates mutations on the
corpus of input data in order to maximize the code coverage.
The code coverage
information for libFuzzer is provided by LLVM's SanitizerCoverage_
instrumentation.
Contact: libfuzzer(#)googlegroups.com
Versions
========
LibFuzzer is under active development so you will need the current
(or at least a very recent) version of the Clang compiler (see `building Clang from trunk`_)
Refer to https://releases.llvm.org/5.0.0/docs/LibFuzzer.html for documentation on the older version.
Getting Started
===============
.. contents::
:local:
:depth: 1
Fuzz Target
-----------
The first step in using libFuzzer on a library is to implement a
*fuzz target* -- a function that accepts an array of bytes and
does something interesting with these bytes using the API under test.
Like this:
.. code-block:: c++
// fuzz_target.cc
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
DoSomethingInterestingWithMyAPI(Data, Size);
return 0; // Non-zero return values are reserved for future use.
}
Note that this fuzz target does not depend on libFuzzer in any way
and so it is possible and even desirable to use it with other fuzzing engines
e.g. AFL_ and/or Radamsa_.
Some important things to remember about fuzz targets:
* The fuzzing engine will execute the fuzz target many times with different inputs in the same process.
* It must tolerate any kind of input (empty, huge, malformed, etc).
* It must not `exit()` on any input.
* It may use threads but ideally all threads should be joined at the end of the function.
* It must be as deterministic as possible. Non-determinism (e.g. random decisions not based on the input bytes) will make fuzzing inefficient.
* It must be fast. Try avoiding cubic or greater complexity, logging, or excessive memory consumption.
* Ideally, it should not modify any global state (although that's not strict).
* Usually, the narrower the target the better. E.g. if your target can parse several data formats, split it into several targets, one per format.
Fuzzer Usage
------------
Recent versions of Clang (starting from 6.0) include libFuzzer, and no extra installation is necessary.
In order to build your fuzzer binary, use the `-fsanitize=fuzzer` flag during the
compilation and linking. In most cases you may want to combine libFuzzer with
AddressSanitizer_ (ASAN), UndefinedBehaviorSanitizer_ (UBSAN), or both. You can
also build with MemorySanitizer_ (MSAN), but support is experimental::
clang -g -O1 -fsanitize=fuzzer mytarget.c # Builds the fuzz target w/o sanitizers
clang -g -O1 -fsanitize=fuzzer,address mytarget.c # Builds the fuzz target with ASAN
clang -g -O1 -fsanitize=fuzzer,signed-integer-overflow mytarget.c # Builds the fuzz target with a part of UBSAN
clang -g -O1 -fsanitize=fuzzer,memory mytarget.c # Builds the fuzz target with MSAN
This will perform the necessary instrumentation, as well as linking with the libFuzzer library.
Note that ``-fsanitize=fuzzer`` links in the libFuzzer's ``main()`` symbol.
If modifying ``CFLAGS`` of a large project, which also compiles executables
requiring their own ``main`` symbol, it may be desirable to request just the
instrumentation without linking::
clang -fsanitize=fuzzer-no-link mytarget.c
Then libFuzzer can be linked to the desired driver by passing in
``-fsanitize=fuzzer`` during the linking stage.
.. _libfuzzer-corpus:
Corpus
------
Coverage-guided fuzzers like libFuzzer rely on a corpus of sample inputs for the
code under test. This corpus should ideally be seeded with a varied collection
of valid and invalid inputs for the code under test; for example, for a graphics
library the initial corpus might hold a variety of different small PNG/JPG/GIF
files. The fuzzer generates random mutations based around the sample inputs in
the current corpus. If a mutation triggers execution of a previously-uncovered
path in the code under test, then that mutation is saved to the corpus for
future variations.
LibFuzzer will work without any initial seeds, but will be less
efficient if the library under test accepts complex,
structured inputs.
The corpus can also act as a sanity/regression check, to confirm that the
fuzzing entrypoint still works and that all of the sample inputs run through
the code under test without problems.
If you have a large corpus (either generated by fuzzing or acquired by other means)
you may want to minimize it while still preserving the full coverage. One way to do that
is to use the `-merge=1` flag:
.. code-block:: console
mkdir NEW_CORPUS_DIR # Store minimized corpus here.
./my_fuzzer -merge=1 NEW_CORPUS_DIR FULL_CORPUS_DIR
You may use the same flag to add more interesting items to an existing corpus.
Only the inputs that trigger new coverage will be added to the first corpus.
.. code-block:: console
./my_fuzzer -merge=1 CURRENT_CORPUS_DIR NEW_POTENTIALLY_INTERESTING_INPUTS_DIR
Running
-------
To run the fuzzer, first create a Corpus_ directory that holds the
initial "seed" sample inputs:
.. code-block:: console
mkdir CORPUS_DIR
cp /some/input/samples/* CORPUS_DIR
Then run the fuzzer on the corpus directory:
.. code-block:: console
./my_fuzzer CORPUS_DIR # -max_len=1000 -jobs=20 ...
As the fuzzer discovers new interesting test cases (i.e. test cases that
trigger coverage of new paths through the code under test), those test cases
will be added to the corpus directory.
By default, the fuzzing process will continue indefinitely – at least until
a bug is found. Any crashes or sanitizer failures will be reported as usual,
stopping the fuzzing process, and the particular input that triggered the bug
will be written to disk (typically as ``crash-<sha1>``, ``leak-<sha1>``,
or ``timeout-<sha1>``).
Parallel Fuzzing
----------------
Each libFuzzer process is single-threaded, unless the library under test starts
its own threads. However, it is possible to run multiple libFuzzer processes in
parallel with a shared corpus directory; this has the advantage that any new
inputs found by one fuzzer process will be available to the other fuzzer
processes (unless you disable this with the ``-reload=0`` option).
This is primarily controlled by the ``-jobs=N`` option, which indicates that
that `N` fuzzing jobs should be run to completion (i.e. until a bug is found or
time/iteration limits are reached). These jobs will be run across a set of
worker processes, by default using half of the available CPU cores; the count of
worker processes can be overridden by the ``-workers=N`` option. For example,
running with ``-jobs=30`` on a 12-core machine would run 6 workers by default,
with each worker averaging 5 bugs by completion of the entire process.
Resuming merge
--------------
Merging large corpora may be time consuming, and it is often desirable to do it
on preemptable VMs, where the process may be killed at any time.
In order to seamlessly resume the merge, use the ``-merge_control_file`` flag
and use ``killall -SIGUSR1 /path/to/fuzzer/binary`` to stop the merge gracefully. Example:
.. code-block:: console
% rm -f SomeLocalPath
% ./my_fuzzer CORPUS1 CORPUS2 -merge=1 -merge_control_file=SomeLocalPath
...
MERGE-INNER: using the control file 'SomeLocalPath'
...
# While this is running, do `killall -SIGUSR1 my_fuzzer` in another console
==9015== INFO: libFuzzer: exiting as requested
# This will leave the file SomeLocalPath with the partial state of the merge.
# Now, you can continue the merge by executing the same command. The merge
# will continue from where it has been interrupted.
% ./my_fuzzer CORPUS1 CORPUS2 -merge=1 -merge_control_file=SomeLocalPath
...
MERGE-OUTER: non-empty control file provided: 'SomeLocalPath'
MERGE-OUTER: control file ok, 32 files total, first not processed file 20
...
Options
=======
To run the fuzzer, pass zero or more corpus directories as command line
arguments. The fuzzer will read test inputs from each of these corpus
directories, and any new test inputs that are generated will be written
back to the first corpus directory:
.. code-block:: console
./fuzzer [-flag1=val1 [-flag2=val2 ...] ] [dir1 [dir2 ...] ]
If a list of files (rather than directories) are passed to the fuzzer program,
then it will re-run those files as test inputs but will not perform any fuzzing.
In this mode the fuzzer binary can be used as a regression test (e.g. on a
continuous integration system) to check the target function and saved inputs
still work.
The most important command line options are:
``-help``
Print help message.
``-seed``
Random seed. If 0 (the default), the seed is generated.
``-runs``
Number of individual test runs, -1 (the default) to run indefinitely.
``-max_len``
Maximum length of a test input. If 0 (the default), libFuzzer tries to guess
a good value based on the corpus (and reports it).
``-timeout``
Timeout in seconds, default 1200. If an input takes longer than this timeout,
the process is treated as a failure case.
``-rss_limit_mb``
Memory usage limit in Mb, default 2048. Use 0 to disable the limit.
If an input requires more than this amount of RSS memory to execute,
the process is treated as a failure case.
The limit is checked in a separate thread every second.
If running w/o ASAN/MSAN, you may use 'ulimit -v' instead.
``-malloc_limit_mb``
If non-zero, the fuzzer will exit if the target tries to allocate this
number of Mb with one malloc call.
If zero (default) same limit as rss_limit_mb is applied.
``-timeout_exitcode``
Exit code (default 77) used if libFuzzer reports a timeout.
``-error_exitcode``
Exit code (default 77) used if libFuzzer itself (not a sanitizer) reports a bug (leak, OOM, etc).
``-max_total_time``
If positive, indicates the maximum total time in seconds to run the fuzzer.
If 0 (the default), run indefinitely.
``-merge``
If set to 1, any corpus inputs from the 2nd, 3rd etc. corpus directories
that trigger new code coverage will be merged into the first corpus
directory. Defaults to 0. This flag can be used to minimize a corpus.
``-merge_control_file``
Specify a control file used for the merge proccess.
If a merge process gets killed it tries to leave this file in a state
suitable for resuming the merge. By default a temporary file will be used.
``-minimize_crash``
If 1, minimizes the provided crash input.
Use with -runs=N or -max_total_time=N to limit the number of attempts.
``-reload``
If set to 1 (the default), the corpus directory is re-read periodically to
check for new inputs; this allows detection of new inputs that were discovered
by other fuzzing processes.
``-jobs``
Number of fuzzing jobs to run to completion. Default value is 0, which runs a
single fuzzing process until completion. If the value is >= 1, then this
number of jobs performing fuzzing are run, in a collection of parallel
separate worker processes; each such worker process has its
``stdout``/``stderr`` redirected to ``fuzz-<JOB>.log``.
``-workers``
Number of simultaneous worker processes to run the fuzzing jobs to completion
in. If 0 (the default), ``min(jobs, NumberOfCpuCores()/2)`` is used.
``-dict``
Provide a dictionary of input keywords; see Dictionaries_.
``-use_counters``
Use `coverage counters`_ to generate approximate counts of how often code
blocks are hit; defaults to 1.
``-reduce_inputs``
Try to reduce the size of inputs while preserving their full feature sets;
defaults to 1.
``-use_value_profile``
Use `value profile`_ to guide corpus expansion; defaults to 0.
``-only_ascii``
If 1, generate only ASCII (``isprint``+``isspace``) inputs. Defaults to 0.
``-artifact_prefix``
Provide a prefix to use when saving fuzzing artifacts (crash, timeout, or
slow inputs) as ``$(artifact_prefix)file``. Defaults to empty.
``-exact_artifact_path``
Ignored if empty (the default). If non-empty, write the single artifact on
failure (crash, timeout) as ``$(exact_artifact_path)``. This overrides
``-artifact_prefix`` and will not use checksum in the file name. Do not use
the same path for several parallel processes.
``-print_pcs``
If 1, print out newly covered PCs. Defaults to 0.
``-print_final_stats``
If 1, print statistics at exit. Defaults to 0.
``-detect_leaks``
If 1 (default) and if LeakSanitizer is enabled
try to detect memory leaks during fuzzing (i.e. not only at shut down).
``-close_fd_mask``
Indicate output streams to close at startup. Be careful, this will
remove diagnostic output from target code (e.g. messages on assert failure).
- 0 (default): close neither ``stdout`` nor ``stderr``
- 1 : close ``stdout``
- 2 : close ``stderr``
- 3 : close both ``stdout`` and ``stderr``.
For the full list of flags run the fuzzer binary with ``-help=1``.
Output
======
During operation the fuzzer prints information to ``stderr``, for example::
INFO: Seed: 1523017872
INFO: Loaded 1 modules (16 guards): [0x744e60, 0x744ea0),
INFO: -max_len is not provided, using 64
INFO: A corpus is not provided, starting from an empty corpus
#0 READ units: 1
#1 INITED cov: 3 ft: 2 corp: 1/1b exec/s: 0 rss: 24Mb
#3811 NEW cov: 4 ft: 3 corp: 2/2b exec/s: 0 rss: 25Mb L: 1 MS: 5 ChangeBit-ChangeByte-ChangeBit-ShuffleBytes-ChangeByte-
#3827 NEW cov: 5 ft: 4 corp: 3/4b exec/s: 0 rss: 25Mb L: 2 MS: 1 CopyPart-
#3963 NEW cov: 6 ft: 5 corp: 4/6b exec/s: 0 rss: 25Mb L: 2 MS: 2 ShuffleBytes-ChangeBit-
#4167 NEW cov: 7 ft: 6 corp: 5/9b exec/s: 0 rss: 25Mb L: 3 MS: 1 InsertByte-
...
The early parts of the output include information about the fuzzer options and
configuration, including the current random seed (in the ``Seed:`` line; this
can be overridden with the ``-seed=N`` flag).
Further output lines have the form of an event code and statistics. The
possible event codes are:
``READ``
The fuzzer has read in all of the provided input samples from the corpus
directories.
``INITED``
The fuzzer has completed initialization, which includes running each of
the initial input samples through the code under test.
``NEW``
The fuzzer has created a test input that covers new areas of the code
under test. This input will be saved to the primary corpus directory.
``REDUCE``
The fuzzer has found a better (smaller) input that triggers previously
discovered features (set ``-reduce_inputs=0`` to disable).
``pulse``
The fuzzer has generated 2\ :sup:`n` inputs (generated periodically to reassure
the user that the fuzzer is still working).
``DONE``
The fuzzer has completed operation because it has reached the specified
iteration limit (``-runs``) or time limit (``-max_total_time``).
``RELOAD``
The fuzzer is performing a periodic reload of inputs from the corpus
directory; this allows it to discover any inputs discovered by other
fuzzer processes (see `Parallel Fuzzing`_).
Each output line also reports the following statistics (when non-zero):
``cov:``
Total number of code blocks or edges covered by executing the current corpus.
``ft:``
libFuzzer uses different signals to evaluate the code coverage:
edge coverage, edge counters, value profiles, indirect caller/callee pairs, etc.
These signals combined are called *features* (`ft:`).
``corp:``
Number of entries in the current in-memory test corpus and its size in bytes.
``lim:``
Current limit on the length of new entries in the corpus. Increases over time
until the max length (``-max_len``) is reached.
``exec/s:``
Number of fuzzer iterations per second.
``rss:``
Current memory consumption.
For ``NEW`` events, the output line also includes information about the mutation
operation that produced the new input:
``L:``
Size of the new input in bytes.
``MS: <n> <operations>``
Count and list of the mutation operations used to generate the input.
Examples
========
.. contents::
:local:
:depth: 1
Toy example
-----------
A simple function that does something interesting if it receives the input
"HI!"::
cat << EOF > test_fuzzer.cc
#include <stdint.h>
#include <stddef.h>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (size > 0 && data[0] == 'H')
if (size > 1 && data[1] == 'I')
if (size > 2 && data[2] == '!')
__builtin_trap();
return 0;
}
EOF
# Build test_fuzzer.cc with asan and link against libFuzzer.a
clang++ -fsanitize=address -fsanitize-coverage=trace-pc-guard test_fuzzer.cc libFuzzer.a
# Run the fuzzer with no corpus.
./a.out
You should get an error pretty quickly::
INFO: Seed: 1523017872
INFO: Loaded 1 modules (16 guards): [0x744e60, 0x744ea0),
INFO: -max_len is not provided, using 64
INFO: A corpus is not provided, starting from an empty corpus
#0 READ units: 1
#1 INITED cov: 3 ft: 2 corp: 1/1b exec/s: 0 rss: 24Mb
#3811 NEW cov: 4 ft: 3 corp: 2/2b exec/s: 0 rss: 25Mb L: 1 MS: 5 ChangeBit-ChangeByte-ChangeBit-ShuffleBytes-ChangeByte-
#3827 NEW cov: 5 ft: 4 corp: 3/4b exec/s: 0 rss: 25Mb L: 2 MS: 1 CopyPart-
#3963 NEW cov: 6 ft: 5 corp: 4/6b exec/s: 0 rss: 25Mb L: 2 MS: 2 ShuffleBytes-ChangeBit-
#4167 NEW cov: 7 ft: 6 corp: 5/9b exec/s: 0 rss: 25Mb L: 3 MS: 1 InsertByte-
==31511== ERROR: libFuzzer: deadly signal
...
artifact_prefix='./'; Test unit written to ./crash-b13e8756b13a00cf168300179061fb4b91fefbed
More examples
-------------
Examples of real-life fuzz targets and the bugs they find can be found
at http://tutorial.libfuzzer.info. Among other things you can learn how
to detect Heartbleed_ in one second.
Advanced features
=================
.. contents::
:local:
:depth: 1
Dictionaries
------------
LibFuzzer supports user-supplied dictionaries with input language keywords
or other interesting byte sequences (e.g. multi-byte magic values).
Use ``-dict=DICTIONARY_FILE``. For some input languages using a dictionary
may significantly improve the search speed.
The dictionary syntax is similar to that used by AFL_ for its ``-x`` option::
# Lines starting with '#' and empty lines are ignored.
# Adds "blah" (w/o quotes) to the dictionary.
kw1="blah"
# Use \\ for backslash and \" for quotes.
kw2="\"ac\\dc\""
# Use \xAB for hex values
kw3="\xF7\xF8"
# the name of the keyword followed by '=' may be omitted:
"foo\x0Abar"
Tracing CMP instructions
------------------------
With an additional compiler flag ``-fsanitize-coverage=trace-cmp``
(on by default as part of ``-fsanitize=fuzzer``, see SanitizerCoverageTraceDataFlow_)
libFuzzer will intercept CMP instructions and guide mutations based
on the arguments of intercepted CMP instructions. This may slow down
the fuzzing but is very likely to improve the results.
Value Profile
-------------
With ``-fsanitize-coverage=trace-cmp``
and extra run-time flag ``-use_value_profile=1`` the fuzzer will
collect value profiles for the parameters of compare instructions
and treat some new values as new coverage.
The current imlpementation does roughly the following:
* The compiler instruments all CMP instructions with a callback that receives both CMP arguments.
* The callback computes `(caller_pc&4095) | (popcnt(Arg1 ^ Arg2) << 12)` and uses this value to set a bit in a bitset.
* Every new observed bit in the bitset is treated as new coverage.
This feature has a potential to discover many interesting inputs,
but there are two downsides.
First, the extra instrumentation may bring up to 2x additional slowdown.
Second, the corpus may grow by several times.
Fuzzer-friendly build mode
---------------------------
Sometimes the code under test is not fuzzing-friendly. Examples:
- The target code uses a PRNG seeded e.g. by system time and
thus two consequent invocations may potentially execute different code paths
even if the end result will be the same. This will cause a fuzzer to treat
two similar inputs as significantly different and it will blow up the test corpus.
E.g. libxml uses ``rand()`` inside its hash table.
- The target code uses checksums to protect from invalid inputs.
E.g. png checks CRC for every chunk.
In many cases it makes sense to build a special fuzzing-friendly build
with certain fuzzing-unfriendly features disabled. We propose to use a common build macro
for all such cases for consistency: ``FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION``.
.. code-block:: c++
void MyInitPRNG() {
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
// In fuzzing mode the behavior of the code should be deterministic.
srand(0);
#else
srand(time(0));
#endif
}
AFL compatibility
-----------------
LibFuzzer can be used together with AFL_ on the same test corpus.
Both fuzzers expect the test corpus to reside in a directory, one file per input.
You can run both fuzzers on the same corpus, one after another:
.. code-block:: console
./afl-fuzz -i testcase_dir -o findings_dir /path/to/program @@
./llvm-fuzz testcase_dir findings_dir # Will write new tests to testcase_dir
Periodically restart both fuzzers so that they can use each other's findings.
Currently, there is no simple way to run both fuzzing engines in parallel while sharing the same corpus dir.
You may also use AFL on your target function ``LLVMFuzzerTestOneInput``:
see an example `here <https://github.com/llvm-mirror/compiler-rt/tree/master/lib/fuzzer/afl>`__.
How good is my fuzzer?
----------------------
Once you implement your target function ``LLVMFuzzerTestOneInput`` and fuzz it to death,
you will want to know whether the function or the corpus can be improved further.
One easy to use metric is, of course, code coverage.
We recommend to use
`Clang Coverage <http://clang.llvm.org/docs/SourceBasedCodeCoverage.html>`_,
to visualize and study your code coverage
(`example <https://github.com/google/fuzzer-test-suite/blob/master/tutorial/libFuzzerTutorial.md#visualizing-coverage>`_).
User-supplied mutators
----------------------
LibFuzzer allows to use custom (user-supplied) mutators,
see FuzzerInterface.h_
Startup initialization
----------------------
If the library being tested needs to be initialized, there are several options.
The simplest way is to have a statically initialized global object inside
`LLVMFuzzerTestOneInput` (or in global scope if that works for you):
.. code-block:: c++
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
static bool Initialized = DoInitialization();
...
Alternatively, you may define an optional init function and it will receive
the program arguments that you can read and modify. Do this **only** if you
really need to access ``argv``/``argc``.
.. code-block:: c++
extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) {
ReadAndMaybeModify(argc, argv);
return 0;
}
Leaks
-----
Binaries built with AddressSanitizer_ or LeakSanitizer_ will try to detect
memory leaks at the process shutdown.
For in-process fuzzing this is inconvenient
since the fuzzer needs to report a leak with a reproducer as soon as the leaky
mutation is found. However, running full leak detection after every mutation
is expensive.
By default (``-detect_leaks=1``) libFuzzer will count the number of
``malloc`` and ``free`` calls when executing every mutation.
If the numbers don't match (which by itself doesn't mean there is a leak)
libFuzzer will invoke the more expensive LeakSanitizer_
pass and if the actual leak is found, it will be reported with the reproducer
and the process will exit.
If your target has massive leaks and the leak detection is disabled
you will eventually run out of RAM (see the ``-rss_limit_mb`` flag).
Developing libFuzzer
====================
LibFuzzer is built as a part of LLVM project by default on macos and Linux.
Users of other operating systems can explicitly request compilation using
``-DLIBFUZZER_ENABLE=YES`` flag.
Tests are run using ``check-fuzzer`` target from the build directory
which was configured with ``-DLIBFUZZER_ENABLE_TESTS=ON`` flag.
.. code-block:: console
ninja check-fuzzer
FAQ
=========================
Q. Why doesn't libFuzzer use any of the LLVM support?
-----------------------------------------------------
There are two reasons.
First, we want this library to be used outside of the LLVM without users having to
build the rest of LLVM. This may sound unconvincing for many LLVM folks,
but in practice the need for building the whole LLVM frightens many potential
users -- and we want more users to use this code.
Second, there is a subtle technical reason not to rely on the rest of LLVM, or
any other large body of code (maybe not even STL). When coverage instrumentation
is enabled, it will also instrument the LLVM support code which will blow up the
coverage set of the process (since the fuzzer is in-process). In other words, by
using more external dependencies we will slow down the fuzzer while the main
reason for it to exist is extreme speed.
-Q. What about Windows then? The fuzzer contains code that does not build on Windows.
+Q. Does libFuzzer Support Windows?
------------------------------------------------------------------------------------
-Volunteers are welcome.
+Yes, libFuzzer now supports Windows. Initial support was added in r341082.
+You can download a build of Clang for Windows
+that has libFuzzer from
+`LLVM Snapshot Builds <https://llvm.org/builds/>`_.
+
+Using libFuzzer on Windows without ASAN is unsupported. Building fuzzers with the
+``/MD`` (dynamic runtime library) compile option is unsupported. Support for these
+may be added in the future. Linking fuzzers with the ``/INCREMENTAL`` link option
+(or the ``/DEBUG`` option which implies it) is also unsupported.
+
+Send any questions or comments to the mailing list: libfuzzer(#)googlegroups.com
Q. When libFuzzer is not a good solution for a problem?
---------------------------------------------------------
* If the test inputs are validated by the target library and the validator
asserts/crashes on invalid inputs, in-process fuzzing is not applicable.
* Bugs in the target library may accumulate without being detected. E.g. a memory
corruption that goes undetected at first and then leads to a crash while
testing another input. This is why it is highly recommended to run this
in-process fuzzer with all sanitizers to detect most bugs on the spot.
* It is harder to protect the in-process fuzzer from excessive memory
consumption and infinite loops in the target library (still possible).
* The target library should not have significant global state that is not
reset between the runs.
* Many interesting target libraries are not designed in a way that supports
the in-process fuzzer interface (e.g. require a file path instead of a
byte array).
* If a single test run takes a considerable fraction of a second (or
more) the speed benefit from the in-process fuzzer is negligible.
* If the target library runs persistent threads (that outlive
execution of one test) the fuzzing results will be unreliable.
Q. So, what exactly this Fuzzer is good for?
--------------------------------------------
This Fuzzer might be a good choice for testing libraries that have relatively
small inputs, each input takes < 10ms to run, and the library code is not expected
to crash on invalid inputs.
Examples: regular expression matchers, text or binary format parsers, compression,
network, crypto.
Trophies
========
* Thousands of bugs found on OSS-Fuzz: https://opensource.googleblog.com/2017/05/oss-fuzz-five-months-later-and.html
* GLIBC: https://sourceware.org/glibc/wiki/FuzzingLibc
* MUSL LIBC: `[1] <http://git.musl-libc.org/cgit/musl/commit/?id=39dfd58417ef642307d90306e1c7e50aaec5a35c>`__ `[2] <http://www.openwall.com/lists/oss-security/2015/03/30/3>`__
* `pugixml <https://github.com/zeux/pugixml/issues/39>`_
* PCRE: Search for "LLVM fuzzer" in http://vcs.pcre.org/pcre2/code/trunk/ChangeLog?view=markup;
also in `bugzilla <https://bugs.exim.org/buglist.cgi?bug_status=__all__&content=libfuzzer&no_redirect=1&order=Importance&product=PCRE&query_format=specific>`_
* `ICU <http://bugs.icu-project.org/trac/ticket/11838>`_
* `Freetype <https://savannah.nongnu.org/search/?words=LibFuzzer&type_of_search=bugs&Search=Search&exact=1#options>`_
* `Harfbuzz <https://github.com/behdad/harfbuzz/issues/139>`_
* `SQLite <http://www3.sqlite.org/cgi/src/info/088009efdd56160b>`_
* `Python <http://bugs.python.org/issue25388>`_
* OpenSSL/BoringSSL: `[1] <https://boringssl.googlesource.com/boringssl/+/cb852981cd61733a7a1ae4fd8755b7ff950e857d>`_ `[2] <https://openssl.org/news/secadv/20160301.txt>`_ `[3] <https://boringssl.googlesource.com/boringssl/+/2b07fa4b22198ac02e0cee8f37f3337c3dba91bc>`_ `[4] <https://boringssl.googlesource.com/boringssl/+/6b6e0b20893e2be0e68af605a60ffa2cbb0ffa64>`_ `[5] <https://github.com/openssl/openssl/pull/931/commits/dd5ac557f052cc2b7f718ac44a8cb7ac6f77dca8>`_ `[6] <https://github.com/openssl/openssl/pull/931/commits/19b5b9194071d1d84e38ac9a952e715afbc85a81>`_
* `Libxml2
<https://bugzilla.gnome.org/buglist.cgi?bug_status=__all__&content=libFuzzer&list_id=68957&order=Importance&product=libxml2&query_format=specific>`_ and `[HT206167] <https://support.apple.com/en-gb/HT206167>`_ (CVE-2015-5312, CVE-2015-7500, CVE-2015-7942)
* `Linux Kernel's BPF verifier <https://github.com/iovisor/bpf-fuzzer>`_
* `Linux Kernel's Crypto code <https://www.spinics.net/lists/stable/msg199712.html>`_
* Capstone: `[1] <https://github.com/aquynh/capstone/issues/600>`__ `[2] <https://github.com/aquynh/capstone/commit/6b88d1d51eadf7175a8f8a11b690684443b11359>`__
* file:`[1] <http://bugs.gw.com/view.php?id=550>`__ `[2] <http://bugs.gw.com/view.php?id=551>`__ `[3] <http://bugs.gw.com/view.php?id=553>`__ `[4] <http://bugs.gw.com/view.php?id=554>`__
* Radare2: `[1] <https://github.com/revskills?tab=contributions&from=2016-04-09>`__
* gRPC: `[1] <https://github.com/grpc/grpc/pull/6071/commits/df04c1f7f6aec6e95722ec0b023a6b29b6ea871c>`__ `[2] <https://github.com/grpc/grpc/pull/6071/commits/22a3dfd95468daa0db7245a4e8e6679a52847579>`__ `[3] <https://github.com/grpc/grpc/pull/6071/commits/9cac2a12d9e181d130841092e9d40fa3309d7aa7>`__ `[4] <https://github.com/grpc/grpc/pull/6012/commits/82a91c91d01ce9b999c8821ed13515883468e203>`__ `[5] <https://github.com/grpc/grpc/pull/6202/commits/2e3e0039b30edaf89fb93bfb2c1d0909098519fa>`__ `[6] <https://github.com/grpc/grpc/pull/6106/files>`__
* WOFF2: `[1] <https://github.com/google/woff2/commit/a15a8ab>`__
* LLVM: `Clang <https://llvm.org/bugs/show_bug.cgi?id=23057>`_, `Clang-format <https://llvm.org/bugs/show_bug.cgi?id=23052>`_, `libc++ <https://llvm.org/bugs/show_bug.cgi?id=24411>`_, `llvm-as <https://llvm.org/bugs/show_bug.cgi?id=24639>`_, `Demangler <https://bugs.chromium.org/p/chromium/issues/detail?id=606626>`_, Disassembler: http://reviews.llvm.org/rL247405, http://reviews.llvm.org/rL247414, http://reviews.llvm.org/rL247416, http://reviews.llvm.org/rL247417, http://reviews.llvm.org/rL247420, http://reviews.llvm.org/rL247422.
* Tensorflow: `[1] <https://da-data.blogspot.com/2017/01/finding-bugs-in-tensorflow-with.html>`__
* Ffmpeg: `[1] <https://github.com/FFmpeg/FFmpeg/commit/c92f55847a3d9cd12db60bfcd0831ff7f089c37c>`__ `[2] <https://github.com/FFmpeg/FFmpeg/commit/25ab1a65f3acb5ec67b53fb7a2463a7368f1ad16>`__ `[3] <https://github.com/FFmpeg/FFmpeg/commit/85d23e5cbc9ad6835eef870a5b4247de78febe56>`__ `[4] <https://github.com/FFmpeg/FFmpeg/commit/04bd1b38ee6b8df410d0ab8d4949546b6c4af26a>`__
* `Wireshark <https://bugs.wireshark.org/bugzilla/buglist.cgi?bug_status=UNCONFIRMED&bug_status=CONFIRMED&bug_status=IN_PROGRESS&bug_status=INCOMPLETE&bug_status=RESOLVED&bug_status=VERIFIED&f0=OP&f1=OP&f2=product&f3=component&f4=alias&f5=short_desc&f7=content&f8=CP&f9=CP&j1=OR&o2=substring&o3=substring&o4=substring&o5=substring&o6=substring&o7=matches&order=bug_id%20DESC&query_format=advanced&v2=libfuzzer&v3=libfuzzer&v4=libfuzzer&v5=libfuzzer&v6=libfuzzer&v7=%22libfuzzer%22>`_
* `QEMU <https://researchcenter.paloaltonetworks.com/2017/09/unit42-palo-alto-networks-discovers-new-qemu-vulnerability/>`_
.. _pcre2: http://www.pcre.org/
.. _AFL: http://lcamtuf.coredump.cx/afl/
.. _Radamsa: https://github.com/aoh/radamsa
.. _SanitizerCoverage: http://clang.llvm.org/docs/SanitizerCoverage.html
.. _SanitizerCoverageTraceDataFlow: http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-data-flow
.. _AddressSanitizer: http://clang.llvm.org/docs/AddressSanitizer.html
.. _LeakSanitizer: http://clang.llvm.org/docs/LeakSanitizer.html
.. _Heartbleed: http://en.wikipedia.org/wiki/Heartbleed
.. _FuzzerInterface.h: https://github.com/llvm-mirror/compiler-rt/blob/master/lib/fuzzer/FuzzerInterface.h
.. _3.7.0: http://llvm.org/releases/3.7.0/docs/LibFuzzer.html
.. _building Clang from trunk: http://clang.llvm.org/get_started.html
.. _MemorySanitizer: http://clang.llvm.org/docs/MemorySanitizer.html
.. _UndefinedBehaviorSanitizer: http://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
.. _`coverage counters`: http://clang.llvm.org/docs/SanitizerCoverage.html#coverage-counters
.. _`value profile`: #value-profile
.. _`caller-callee pairs`: http://clang.llvm.org/docs/SanitizerCoverage.html#caller-callee-coverage
.. _BoringSSL: https://boringssl.googlesource.com/boringssl/
Index: vendor/llvm/dist-release_80/docs/ReleaseNotes.rst
===================================================================
--- vendor/llvm/dist-release_80/docs/ReleaseNotes.rst (revision 344165)
+++ vendor/llvm/dist-release_80/docs/ReleaseNotes.rst (revision 344166)
@@ -1,182 +1,213 @@
========================
LLVM 8.0.0 Release Notes
========================
.. contents::
:local:
.. warning::
These are in-progress notes for the upcoming LLVM 8 release.
Release notes for previous releases can be found on
`the Download Page <https://releases.llvm.org/download.html>`_.
Introduction
============
This document contains the release notes for the LLVM Compiler Infrastructure,
release 8.0.0. Here we describe the status of LLVM, including major improvements
from the previous release, improvements in various subprojects of LLVM, and
some of the current users of the code. All LLVM releases may be downloaded
from the `LLVM releases web site <https://llvm.org/releases/>`_.
For more information about LLVM, including information about the latest
release, please check out the `main LLVM web site <https://llvm.org/>`_. If you
have questions or comments, the `LLVM Developer's Mailing List
<https://lists.llvm.org/mailman/listinfo/llvm-dev>`_ is a good place to send
them.
Note that if you are reading this file from a Subversion checkout or the main
LLVM web page, this document applies to the *next* release, not the current
one. To see the release notes for a specific release, please see the `releases
page <https://llvm.org/releases/>`_.
Non-comprehensive list of changes in this release
=================================================
.. NOTE
For small 1-3 sentence descriptions, just add an entry at the end of
this list. If your description won't fit comfortably in one bullet
point (e.g. maybe you would like to give an example of the
functionality, or simply have a lot to talk about), see the `NOTE` below
for adding a new subsection.
+* As `discussed on the mailing list
+ <https://lists.llvm.org/pipermail/llvm-dev/2019-January/129452.html>`_,
+ building LLVM will soon require more recent toolchains as follows:
+
+ ============= ====
+ Clang 3.5
+ Apple Clang 6.0
+ GCC 5.1
+ Visual Studio 2017
+ ============= ====
+
+ A new CMake check when configuring LLVM provides a soft-error if your
+ toolchain will become unsupported soon. You can opt out of the soft-error by
+ setting the ``LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN`` CMake variable to
+ ``ON``.
+
* The **llvm-cov** tool can now export lcov trace files using the
`-format=lcov` option of the `export` command.
* The add_llvm_loadable_module CMake macro has been removed. The
add_llvm_library macro with the MODULE argument now provides the same
functionality. See `Writing an LLVM Pass
<WritingAnLLVMPass.html#setting-up-the-build-environment>`_.
* For MinGW, references to data variables that might need to be imported
from a dll are accessed via a stub, to allow the linker to convert it to
a dllimport if needed.
* Added support for labels as offsets in ``.reloc`` directive.
.. NOTE
If you would like to document a larger change, then you can add a
subsection about it right here. You can copy the following boilerplate
and un-indent it (the indentation causes it to be inside this comment).
Special New Feature
-------------------
Makes programs 10x faster by doing Special New Thing.
Changes to the LLVM IR
----------------------
Changes to the AArch64 Target
-----------------------------
* Added support for the ``.arch_extension`` assembler directive, just like
on ARM.
Changes to the ARM Backend
--------------------------
During this release ...
Changes to the Hexagon Target
---------------------------
+-----------------------------
* Added support for Hexagon/HVX V66 ISA.
Changes to the MIPS Target
--------------------------
* Improved support of GlobalISel instruction selection framework.
* Implemented emission of ``R_MIPS_JALR`` and ``R_MICROMIPS_JALR``
relocations. These relocations provide hints to a linker for optimization
of jumps to protected symbols.
* ORC JIT has been supported for MIPS and MIPS64 architectures.
* Assembler now suggests alternative MIPS instruction mnemonics when
an invalid one is specified.
* Improved support for MIPS N32 ABI.
* Added new instructions (``pll.ps``, ``plu.ps``, ``cvt.s.pu``,
``cvt.s.pl``, ``cvt.ps``, ``sigrie``).
* Numerous bug fixes and code cleanups.
Changes to the PowerPC Target
-----------------------------
During this release ...
Changes to the X86 Target
-------------------------
* Machine model for AMD bdver2 (Piledriver) CPU was added. It is used to support
instruction scheduling and other instruction cost heuristics.
Changes to the AMDGPU Target
-----------------------------
During this release ...
Changes to the AVR Target
-----------------------------
During this release ...
Changes to the WebAssembly Target
---------------------------------
The WebAssembly target is no longer "experimental"! It's now built by default,
rather than needing to be enabled with LLVM_EXPERIMENTAL_TARGETS_TO_BUILD.
The object file format and core C ABI are now considered stable. That said,
the object file format has an ABI versioning capability, and one anticipated
use for it will be to add support for returning small structs as multiple
return values, once the underlying WebAssembly platform itself supports it.
Additionally, multithreading support is not yet included in the stable ABI.
Changes to the OCaml bindings
-----------------------------
Changes to the C API
--------------------
Changes to the DAG infrastructure
---------------------------------
External Open Source Projects Using LLVM 8
==========================================
+
+LDC - the LLVM-based D compiler
+-------------------------------
+
+`D <http://dlang.org>`_ is a language with C-like syntax and static typing. It
+pragmatically combines efficiency, control, and modeling power, with safety and
+programmer productivity. D supports powerful concepts like Compile-Time Function
+Execution (CTFE) and Template Meta-Programming, provides an innovative approach
+to concurrency and offers many classical paradigms.
+
+`LDC <http://wiki.dlang.org/LDC>`_ uses the frontend from the reference compiler
+combined with LLVM as backend to produce efficient native code. LDC targets
+x86/x86_64 systems like Linux, OS X, FreeBSD and Windows and also Linux on ARM
+and PowerPC (32/64 bit). Ports to other architectures like AArch64 and MIPS64
+are underway.
Zig Programming Language
------------------------
`Zig <https://ziglang.org>`_ is a system programming language intended to be
an alternative to C. It provides high level features such as generics, compile
time function execution, and partial evaluation, while exposing low level LLVM
IR features such as aliases and intrinsics. Zig uses Clang to provide automatic
import of .h symbols, including inline functions and simple macros. Zig uses
LLD combined with lazily building compiler-rt to provide out-of-the-box
cross-compiling for all supported targets.
Additional Information
======================
A wide variety of additional information is available on the `LLVM web page
<https://llvm.org/>`_, in particular in the `documentation
<https://llvm.org/docs/>`_ section. The web page also contains versions of the
API documentation which is up-to-date with the Subversion version of the source
code. You can access versions of these documents specific to this release by
going into the ``llvm/docs/`` directory in the LLVM tree.
If you have any questions or comments about LLVM, please feel free to contact
us via the `mailing lists <https://llvm.org/docs/#mailing-lists>`_.
Index: vendor/llvm/dist-release_80/include/llvm/BinaryFormat/Wasm.h
===================================================================
--- vendor/llvm/dist-release_80/include/llvm/BinaryFormat/Wasm.h (revision 344165)
+++ vendor/llvm/dist-release_80/include/llvm/BinaryFormat/Wasm.h (revision 344166)
@@ -1,342 +1,344 @@
//===- Wasm.h - Wasm object file format -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines manifest constants for the wasm object file format.
// See: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_BINARYFORMAT_WASM_H
#define LLVM_BINARYFORMAT_WASM_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
namespace wasm {
// Object file magic string.
const char WasmMagic[] = {'\0', 'a', 's', 'm'};
// Wasm binary format version
const uint32_t WasmVersion = 0x1;
// Wasm linking metadata version
const uint32_t WasmMetadataVersion = 0x2;
// Wasm uses a 64k page size
const uint32_t WasmPageSize = 65536;
struct WasmObjectHeader {
StringRef Magic;
uint32_t Version;
};
struct WasmDylinkInfo {
uint32_t MemorySize; // Memory size in bytes
uint32_t MemoryAlignment; // P2 alignment of memory
uint32_t TableSize; // Table size in elements
uint32_t TableAlignment; // P2 alignment of table
std::vector<StringRef> Needed; // Shared library depenedencies
};
struct WasmExport {
StringRef Name;
uint8_t Kind;
uint32_t Index;
};
struct WasmLimits {
uint8_t Flags;
uint32_t Initial;
uint32_t Maximum;
};
struct WasmTable {
uint8_t ElemType;
WasmLimits Limits;
};
struct WasmInitExpr {
uint8_t Opcode;
union {
int32_t Int32;
int64_t Int64;
int32_t Float32;
int64_t Float64;
uint32_t Global;
} Value;
};
struct WasmGlobalType {
uint8_t Type;
bool Mutable;
};
struct WasmGlobal {
uint32_t Index;
WasmGlobalType Type;
WasmInitExpr InitExpr;
StringRef SymbolName; // from the "linking" section
};
struct WasmEventType {
// Kind of event. Currently only WASM_EVENT_ATTRIBUTE_EXCEPTION is possible.
uint32_t Attribute;
uint32_t SigIndex;
};
struct WasmEvent {
uint32_t Index;
WasmEventType Type;
StringRef SymbolName; // from the "linking" section
};
struct WasmImport {
StringRef Module;
StringRef Field;
uint8_t Kind;
union {
uint32_t SigIndex;
WasmGlobalType Global;
WasmTable Table;
WasmLimits Memory;
WasmEventType Event;
};
};
struct WasmLocalDecl {
uint8_t Type;
uint32_t Count;
};
struct WasmFunction {
uint32_t Index;
std::vector<WasmLocalDecl> Locals;
ArrayRef<uint8_t> Body;
uint32_t CodeSectionOffset;
uint32_t Size;
uint32_t CodeOffset; // start of Locals and Body
StringRef SymbolName; // from the "linking" section
StringRef DebugName; // from the "name" section
uint32_t Comdat; // from the "comdat info" section
};
struct WasmDataSegment {
uint32_t MemoryIndex;
WasmInitExpr Offset;
ArrayRef<uint8_t> Content;
StringRef Name; // from the "segment info" section
uint32_t Alignment;
uint32_t Flags;
uint32_t Comdat; // from the "comdat info" section
};
struct WasmElemSegment {
uint32_t TableIndex;
WasmInitExpr Offset;
std::vector<uint32_t> Functions;
};
// Represents the location of a Wasm data symbol within a WasmDataSegment, as
// the index of the segment, and the offset and size within the segment.
struct WasmDataReference {
uint32_t Segment;
uint32_t Offset;
uint32_t Size;
};
struct WasmRelocation {
uint8_t Type; // The type of the relocation.
uint32_t Index; // Index into either symbol or type index space.
uint64_t Offset; // Offset from the start of the section.
int64_t Addend; // A value to add to the symbol.
};
struct WasmInitFunc {
uint32_t Priority;
uint32_t Symbol;
};
struct WasmSymbolInfo {
StringRef Name;
uint8_t Kind;
uint32_t Flags;
- StringRef Module; // For undefined symbols the module name of the import
+ StringRef ImportModule; // For undefined symbols the module of the import
+ StringRef ImportName; // For undefined symbols the name of the import
union {
// For function or global symbols, the index in function or global index
// space.
uint32_t ElementIndex;
// For a data symbols, the address of the data relative to segment.
WasmDataReference DataRef;
};
};
struct WasmFunctionName {
uint32_t Index;
StringRef Name;
};
struct WasmLinkingData {
uint32_t Version;
std::vector<WasmInitFunc> InitFunctions;
std::vector<StringRef> Comdats;
std::vector<WasmSymbolInfo> SymbolTable;
};
enum : unsigned {
WASM_SEC_CUSTOM = 0, // Custom / User-defined section
WASM_SEC_TYPE = 1, // Function signature declarations
WASM_SEC_IMPORT = 2, // Import declarations
WASM_SEC_FUNCTION = 3, // Function declarations
WASM_SEC_TABLE = 4, // Indirect function table and other tables
WASM_SEC_MEMORY = 5, // Memory attributes
WASM_SEC_GLOBAL = 6, // Global declarations
WASM_SEC_EXPORT = 7, // Exports
WASM_SEC_START = 8, // Start function declaration
WASM_SEC_ELEM = 9, // Elements section
WASM_SEC_CODE = 10, // Function bodies (code)
WASM_SEC_DATA = 11, // Data segments
WASM_SEC_DATACOUNT = 12, // Data segment count
WASM_SEC_EVENT = 13 // Event declarations
};
// Type immediate encodings used in various contexts.
enum : unsigned {
WASM_TYPE_I32 = 0x7F,
WASM_TYPE_I64 = 0x7E,
WASM_TYPE_F32 = 0x7D,
WASM_TYPE_F64 = 0x7C,
WASM_TYPE_V128 = 0x7B,
WASM_TYPE_FUNCREF = 0x70,
WASM_TYPE_EXCEPT_REF = 0x68,
WASM_TYPE_FUNC = 0x60,
WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
};
// Kinds of externals (for imports and exports).
enum : unsigned {
WASM_EXTERNAL_FUNCTION = 0x0,
WASM_EXTERNAL_TABLE = 0x1,
WASM_EXTERNAL_MEMORY = 0x2,
WASM_EXTERNAL_GLOBAL = 0x3,
WASM_EXTERNAL_EVENT = 0x4,
};
// Opcodes used in initializer expressions.
enum : unsigned {
WASM_OPCODE_END = 0x0b,
WASM_OPCODE_GLOBAL_GET = 0x23,
WASM_OPCODE_I32_CONST = 0x41,
WASM_OPCODE_I64_CONST = 0x42,
WASM_OPCODE_F32_CONST = 0x43,
WASM_OPCODE_F64_CONST = 0x44,
};
enum : unsigned {
WASM_LIMITS_FLAG_HAS_MAX = 0x1,
WASM_LIMITS_FLAG_IS_SHARED = 0x2,
};
// Kind codes used in the custom "name" section
enum : unsigned {
WASM_NAMES_FUNCTION = 0x1,
WASM_NAMES_LOCAL = 0x2,
};
// Kind codes used in the custom "linking" section
enum : unsigned {
WASM_SEGMENT_INFO = 0x5,
WASM_INIT_FUNCS = 0x6,
WASM_COMDAT_INFO = 0x7,
WASM_SYMBOL_TABLE = 0x8,
};
// Kind codes used in the custom "linking" section in the WASM_COMDAT_INFO
enum : unsigned {
WASM_COMDAT_DATA = 0x0,
WASM_COMDAT_FUNCTION = 0x1,
};
// Kind codes used in the custom "linking" section in the WASM_SYMBOL_TABLE
enum WasmSymbolType : unsigned {
WASM_SYMBOL_TYPE_FUNCTION = 0x0,
WASM_SYMBOL_TYPE_DATA = 0x1,
WASM_SYMBOL_TYPE_GLOBAL = 0x2,
WASM_SYMBOL_TYPE_SECTION = 0x3,
WASM_SYMBOL_TYPE_EVENT = 0x4,
};
// Kinds of event attributes.
enum WasmEventAttribute : unsigned {
WASM_EVENT_ATTRIBUTE_EXCEPTION = 0x0,
};
const unsigned WASM_SYMBOL_BINDING_MASK = 0x3;
const unsigned WASM_SYMBOL_VISIBILITY_MASK = 0xc;
const unsigned WASM_SYMBOL_BINDING_GLOBAL = 0x0;
const unsigned WASM_SYMBOL_BINDING_WEAK = 0x1;
const unsigned WASM_SYMBOL_BINDING_LOCAL = 0x2;
const unsigned WASM_SYMBOL_VISIBILITY_DEFAULT = 0x0;
const unsigned WASM_SYMBOL_VISIBILITY_HIDDEN = 0x4;
const unsigned WASM_SYMBOL_UNDEFINED = 0x10;
+const unsigned WASM_SYMBOL_EXPLICIT_NAME = 0x40;
#define WASM_RELOC(name, value) name = value,
enum : unsigned {
#include "WasmRelocs.def"
};
#undef WASM_RELOC
// Subset of types that a value can have
enum class ValType {
I32 = WASM_TYPE_I32,
I64 = WASM_TYPE_I64,
F32 = WASM_TYPE_F32,
F64 = WASM_TYPE_F64,
V128 = WASM_TYPE_V128,
EXCEPT_REF = WASM_TYPE_EXCEPT_REF,
};
struct WasmSignature {
SmallVector<wasm::ValType, 1> Returns;
SmallVector<wasm::ValType, 4> Params;
// Support empty and tombstone instances, needed by DenseMap.
enum { Plain, Empty, Tombstone } State = Plain;
WasmSignature(SmallVector<wasm::ValType, 1> &&InReturns,
SmallVector<wasm::ValType, 4> &&InParams)
: Returns(InReturns), Params(InParams) {}
WasmSignature() = default;
};
// Useful comparison operators
inline bool operator==(const WasmSignature &LHS, const WasmSignature &RHS) {
return LHS.State == RHS.State && LHS.Returns == RHS.Returns &&
LHS.Params == RHS.Params;
}
inline bool operator!=(const WasmSignature &LHS, const WasmSignature &RHS) {
return !(LHS == RHS);
}
inline bool operator==(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
return LHS.Type == RHS.Type && LHS.Mutable == RHS.Mutable;
}
inline bool operator!=(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
return !(LHS == RHS);
}
std::string toString(wasm::WasmSymbolType type);
std::string relocTypetoString(uint32_t type);
} // end namespace wasm
} // end namespace llvm
#endif
Index: vendor/llvm/dist-release_80/include/llvm/MC/MCSymbolWasm.h
===================================================================
--- vendor/llvm/dist-release_80/include/llvm/MC/MCSymbolWasm.h (revision 344165)
+++ vendor/llvm/dist-release_80/include/llvm/MC/MCSymbolWasm.h (revision 344166)
@@ -1,79 +1,93 @@
//===- MCSymbolWasm.h - ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLWASM_H
#define LLVM_MC_MCSYMBOLWASM_H
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/MC/MCSymbol.h"
namespace llvm {
class MCSymbolWasm : public MCSymbol {
wasm::WasmSymbolType Type = wasm::WASM_SYMBOL_TYPE_DATA;
bool IsWeak = false;
bool IsHidden = false;
bool IsComdat = false;
- std::string ModuleName;
+ Optional<std::string> ImportModule;
+ Optional<std::string> ImportName;
wasm::WasmSignature *Signature = nullptr;
Optional<wasm::WasmGlobalType> GlobalType;
Optional<wasm::WasmEventType> EventType;
/// An expression describing how to calculate the size of a symbol. If a
/// symbol has no size this field will be NULL.
const MCExpr *SymbolSize = nullptr;
public:
// Use a module name of "env" for now, for compatibility with existing tools.
// This is temporary, and may change, as the ABI is not yet stable.
MCSymbolWasm(const StringMapEntry<bool> *Name, bool isTemporary)
- : MCSymbol(SymbolKindWasm, Name, isTemporary), ModuleName("env") {}
+ : MCSymbol(SymbolKindWasm, Name, isTemporary) {}
static bool classof(const MCSymbol *S) { return S->isWasm(); }
const MCExpr *getSize() const { return SymbolSize; }
void setSize(const MCExpr *SS) { SymbolSize = SS; }
bool isFunction() const { return Type == wasm::WASM_SYMBOL_TYPE_FUNCTION; }
bool isData() const { return Type == wasm::WASM_SYMBOL_TYPE_DATA; }
bool isGlobal() const { return Type == wasm::WASM_SYMBOL_TYPE_GLOBAL; }
bool isSection() const { return Type == wasm::WASM_SYMBOL_TYPE_SECTION; }
bool isEvent() const { return Type == wasm::WASM_SYMBOL_TYPE_EVENT; }
wasm::WasmSymbolType getType() const { return Type; }
void setType(wasm::WasmSymbolType type) { Type = type; }
bool isWeak() const { return IsWeak; }
void setWeak(bool isWeak) { IsWeak = isWeak; }
bool isHidden() const { return IsHidden; }
void setHidden(bool isHidden) { IsHidden = isHidden; }
bool isComdat() const { return IsComdat; }
void setComdat(bool isComdat) { IsComdat = isComdat; }
- const StringRef getModuleName() const { return ModuleName; }
- void setModuleName(StringRef Name) { ModuleName = Name; }
+ const StringRef getImportModule() const {
+ if (ImportModule.hasValue()) {
+ return ImportModule.getValue();
+ }
+ return "env";
+ }
+ void setImportModule(StringRef Name) { ImportModule = Name; }
+
+ const StringRef getImportName() const {
+ if (ImportName.hasValue()) {
+ return ImportName.getValue();
+ }
+ return getName();
+ }
+ void setImportName(StringRef Name) { ImportName = Name; }
const wasm::WasmSignature *getSignature() const { return Signature; }
void setSignature(wasm::WasmSignature *Sig) { Signature = Sig; }
const wasm::WasmGlobalType &getGlobalType() const {
assert(GlobalType.hasValue());
return GlobalType.getValue();
}
void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; }
const wasm::WasmEventType &getEventType() const {
assert(EventType.hasValue());
return EventType.getValue();
}
void setEventType(wasm::WasmEventType ET) { EventType = ET; }
};
} // end namespace llvm
#endif // LLVM_MC_MCSYMBOLWASM_H
Index: vendor/llvm/dist-release_80/lib/Analysis/TargetLibraryInfo.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Analysis/TargetLibraryInfo.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Analysis/TargetLibraryInfo.cpp (revision 344166)
@@ -1,1683 +1,1703 @@
//===-- TargetLibraryInfo.cpp - Runtime library information ----------------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the TargetLibraryInfo class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
static cl::opt<TargetLibraryInfoImpl::VectorLibrary> ClVectorLibrary(
"vector-library", cl::Hidden, cl::desc("Vector functions library"),
cl::init(TargetLibraryInfoImpl::NoLibrary),
cl::values(clEnumValN(TargetLibraryInfoImpl::NoLibrary, "none",
"No vector functions library"),
clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate",
"Accelerate framework"),
clEnumValN(TargetLibraryInfoImpl::SVML, "SVML",
"Intel SVML library")));
StringRef const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] = {
#define TLI_DEFINE_STRING
#include "llvm/Analysis/TargetLibraryInfo.def"
};
static bool hasSinCosPiStret(const Triple &T) {
// Only Darwin variants have _stret versions of combined trig functions.
if (!T.isOSDarwin())
return false;
// The ABI is rather complicated on x86, so don't do anything special there.
if (T.getArch() == Triple::x86)
return false;
if (T.isMacOSX() && T.isMacOSXVersionLT(10, 9))
return false;
if (T.isiOS() && T.isOSVersionLT(7, 0))
return false;
return true;
}
/// Initialize the set of available library functions based on the specified
/// target triple. This should be carefully written so that a missing target
/// triple gets a sane set of defaults.
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T,
ArrayRef<StringRef> StandardNames) {
// Verify that the StandardNames array is in alphabetical order.
assert(std::is_sorted(StandardNames.begin(), StandardNames.end(),
[](StringRef LHS, StringRef RHS) {
return LHS < RHS;
}) &&
"TargetLibraryInfoImpl function names must be sorted");
// Set IO unlocked variants as unavailable
// Set them as available per system below
TLI.setUnavailable(LibFunc_getchar_unlocked);
TLI.setUnavailable(LibFunc_putc_unlocked);
TLI.setUnavailable(LibFunc_putchar_unlocked);
TLI.setUnavailable(LibFunc_fputc_unlocked);
TLI.setUnavailable(LibFunc_fgetc_unlocked);
TLI.setUnavailable(LibFunc_fread_unlocked);
TLI.setUnavailable(LibFunc_fwrite_unlocked);
TLI.setUnavailable(LibFunc_fputs_unlocked);
TLI.setUnavailable(LibFunc_fgets_unlocked);
bool ShouldExtI32Param = false, ShouldExtI32Return = false,
ShouldSignExtI32Param = false;
// PowerPC64, Sparc64, SystemZ need signext/zeroext on i32 parameters and
// returns corresponding to C-level ints and unsigned ints.
if (T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le ||
T.getArch() == Triple::sparcv9 || T.getArch() == Triple::systemz) {
ShouldExtI32Param = true;
ShouldExtI32Return = true;
}
// Mips, on the other hand, needs signext on i32 parameters corresponding
// to both signed and unsigned ints.
if (T.isMIPS()) {
ShouldSignExtI32Param = true;
}
TLI.setShouldExtI32Param(ShouldExtI32Param);
TLI.setShouldExtI32Return(ShouldExtI32Return);
TLI.setShouldSignExtI32Param(ShouldSignExtI32Param);
if (T.getArch() == Triple::r600 ||
T.getArch() == Triple::amdgcn) {
TLI.setUnavailable(LibFunc_ldexp);
TLI.setUnavailable(LibFunc_ldexpf);
TLI.setUnavailable(LibFunc_ldexpl);
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
TLI.setUnavailable(LibFunc_exp10l);
TLI.setUnavailable(LibFunc_log10);
TLI.setUnavailable(LibFunc_log10f);
TLI.setUnavailable(LibFunc_log10l);
}
// There are no library implementations of mempcy and memset for AMD gpus and
// these can be difficult to lower in the backend.
if (T.getArch() == Triple::r600 ||
T.getArch() == Triple::amdgcn) {
TLI.setUnavailable(LibFunc_memcpy);
TLI.setUnavailable(LibFunc_memset);
TLI.setUnavailable(LibFunc_memset_pattern16);
return;
}
// memset_pattern16 is only available on iOS 3.0 and Mac OS X 10.5 and later.
// All versions of watchOS support it.
if (T.isMacOSX()) {
// available IO unlocked variants on Mac OS X
TLI.setAvailable(LibFunc_getc_unlocked);
TLI.setAvailable(LibFunc_getchar_unlocked);
TLI.setAvailable(LibFunc_putc_unlocked);
TLI.setAvailable(LibFunc_putchar_unlocked);
if (T.isMacOSXVersionLT(10, 5))
TLI.setUnavailable(LibFunc_memset_pattern16);
} else if (T.isiOS()) {
if (T.isOSVersionLT(3, 0))
TLI.setUnavailable(LibFunc_memset_pattern16);
} else if (!T.isWatchOS()) {
TLI.setUnavailable(LibFunc_memset_pattern16);
}
if (!hasSinCosPiStret(T)) {
TLI.setUnavailable(LibFunc_sinpi);
TLI.setUnavailable(LibFunc_sinpif);
TLI.setUnavailable(LibFunc_cospi);
TLI.setUnavailable(LibFunc_cospif);
TLI.setUnavailable(LibFunc_sincospi_stret);
TLI.setUnavailable(LibFunc_sincospif_stret);
}
if (T.isMacOSX() && T.getArch() == Triple::x86 &&
!T.isMacOSXVersionLT(10, 7)) {
// x86-32 OSX has a scheme where fwrite and fputs (and some other functions
// we don't care about) have two versions; on recent OSX, the one we want
// has a $UNIX2003 suffix. The two implementations are identical except
// for the return value in some edge cases. However, we don't want to
// generate code that depends on the old symbols.
TLI.setAvailableWithName(LibFunc_fwrite, "fwrite$UNIX2003");
TLI.setAvailableWithName(LibFunc_fputs, "fputs$UNIX2003");
}
// iprintf and friends are only available on XCore and TCE.
if (T.getArch() != Triple::xcore && T.getArch() != Triple::tce) {
TLI.setUnavailable(LibFunc_iprintf);
TLI.setUnavailable(LibFunc_siprintf);
TLI.setUnavailable(LibFunc_fiprintf);
}
if (T.isOSWindows() && !T.isOSCygMing()) {
- // Win32 does not support long double
+ // XXX: The earliest documentation available at the moment is for VS2015/VC19:
+ // https://docs.microsoft.com/en-us/cpp/c-runtime-library/floating-point-support?view=vs-2015
+ // XXX: In order to use an MSVCRT older than VC19,
+ // the specific library version must be explicit in the target triple,
+ // e.g., x86_64-pc-windows-msvc18.
+ bool hasPartialC99 = true;
+ if (T.isKnownWindowsMSVCEnvironment()) {
+ unsigned Major, Minor, Micro;
+ T.getEnvironmentVersion(Major, Minor, Micro);
+ hasPartialC99 = (Major == 0 || Major >= 19);
+ }
+
+ // Latest targets support C89 math functions, in part.
+ bool isARM = (T.getArch() == Triple::aarch64 ||
+ T.getArch() == Triple::arm);
+ bool hasPartialFloat = (isARM ||
+ T.getArch() == Triple::x86_64);
+
+ // Win32 does not support float C89 math functions, in general.
+ if (!hasPartialFloat) {
+ TLI.setUnavailable(LibFunc_acosf);
+ TLI.setUnavailable(LibFunc_asinf);
+ TLI.setUnavailable(LibFunc_atan2f);
+ TLI.setUnavailable(LibFunc_atanf);
+ TLI.setUnavailable(LibFunc_ceilf);
+ TLI.setUnavailable(LibFunc_cosf);
+ TLI.setUnavailable(LibFunc_coshf);
+ TLI.setUnavailable(LibFunc_expf);
+ TLI.setUnavailable(LibFunc_floorf);
+ TLI.setUnavailable(LibFunc_fmodf);
+ TLI.setUnavailable(LibFunc_log10f);
+ TLI.setUnavailable(LibFunc_logf);
+ TLI.setUnavailable(LibFunc_modff);
+ TLI.setUnavailable(LibFunc_powf);
+ TLI.setUnavailable(LibFunc_sinf);
+ TLI.setUnavailable(LibFunc_sinhf);
+ TLI.setUnavailable(LibFunc_sqrtf);
+ TLI.setUnavailable(LibFunc_tanf);
+ TLI.setUnavailable(LibFunc_tanhf);
+ }
+ if (!isARM)
+ TLI.setUnavailable(LibFunc_fabsf);
+ TLI.setUnavailable(LibFunc_frexpf);
+ TLI.setUnavailable(LibFunc_ldexpf);
+
+ // Win32 does not support long double C89 math functions.
TLI.setUnavailable(LibFunc_acosl);
TLI.setUnavailable(LibFunc_asinl);
- TLI.setUnavailable(LibFunc_atanl);
TLI.setUnavailable(LibFunc_atan2l);
+ TLI.setUnavailable(LibFunc_atanl);
TLI.setUnavailable(LibFunc_ceill);
- TLI.setUnavailable(LibFunc_copysignl);
TLI.setUnavailable(LibFunc_cosl);
TLI.setUnavailable(LibFunc_coshl);
TLI.setUnavailable(LibFunc_expl);
- TLI.setUnavailable(LibFunc_fabsf); // Win32 and Win64 both lack fabsf
TLI.setUnavailable(LibFunc_fabsl);
TLI.setUnavailable(LibFunc_floorl);
- TLI.setUnavailable(LibFunc_fmaxl);
- TLI.setUnavailable(LibFunc_fminl);
TLI.setUnavailable(LibFunc_fmodl);
TLI.setUnavailable(LibFunc_frexpl);
- TLI.setUnavailable(LibFunc_ldexpf);
TLI.setUnavailable(LibFunc_ldexpl);
+ TLI.setUnavailable(LibFunc_log10l);
TLI.setUnavailable(LibFunc_logl);
TLI.setUnavailable(LibFunc_modfl);
TLI.setUnavailable(LibFunc_powl);
TLI.setUnavailable(LibFunc_sinl);
TLI.setUnavailable(LibFunc_sinhl);
TLI.setUnavailable(LibFunc_sqrtl);
TLI.setUnavailable(LibFunc_tanl);
TLI.setUnavailable(LibFunc_tanhl);
- // Win32 only has C89 math
- TLI.setUnavailable(LibFunc_acosh);
- TLI.setUnavailable(LibFunc_acoshf);
+ // Win32 does not fully support C99 math functions.
+ if (!hasPartialC99) {
+ TLI.setUnavailable(LibFunc_acosh);
+ TLI.setUnavailable(LibFunc_acoshf);
+ TLI.setUnavailable(LibFunc_asinh);
+ TLI.setUnavailable(LibFunc_asinhf);
+ TLI.setUnavailable(LibFunc_atanh);
+ TLI.setUnavailable(LibFunc_atanhf);
+ TLI.setAvailableWithName(LibFunc_cabs, "_cabs");
+ TLI.setUnavailable(LibFunc_cabsf);
+ TLI.setUnavailable(LibFunc_cbrt);
+ TLI.setUnavailable(LibFunc_cbrtf);
+ TLI.setAvailableWithName(LibFunc_copysign, "_copysign");
+ TLI.setAvailableWithName(LibFunc_copysignf, "_copysignf");
+ TLI.setUnavailable(LibFunc_exp2);
+ TLI.setUnavailable(LibFunc_exp2f);
+ TLI.setUnavailable(LibFunc_expm1);
+ TLI.setUnavailable(LibFunc_expm1f);
+ TLI.setUnavailable(LibFunc_fmax);
+ TLI.setUnavailable(LibFunc_fmaxf);
+ TLI.setUnavailable(LibFunc_fmin);
+ TLI.setUnavailable(LibFunc_fminf);
+ TLI.setUnavailable(LibFunc_log1p);
+ TLI.setUnavailable(LibFunc_log1pf);
+ TLI.setUnavailable(LibFunc_log2);
+ TLI.setUnavailable(LibFunc_log2f);
+ TLI.setAvailableWithName(LibFunc_logb, "_logb");
+ if (hasPartialFloat)
+ TLI.setAvailableWithName(LibFunc_logbf, "_logbf");
+ else
+ TLI.setUnavailable(LibFunc_logbf);
+ TLI.setUnavailable(LibFunc_rint);
+ TLI.setUnavailable(LibFunc_rintf);
+ TLI.setUnavailable(LibFunc_round);
+ TLI.setUnavailable(LibFunc_roundf);
+ TLI.setUnavailable(LibFunc_trunc);
+ TLI.setUnavailable(LibFunc_truncf);
+ }
+
+ // Win32 does not support long double C99 math functions.
TLI.setUnavailable(LibFunc_acoshl);
- TLI.setUnavailable(LibFunc_asinh);
- TLI.setUnavailable(LibFunc_asinhf);
TLI.setUnavailable(LibFunc_asinhl);
- TLI.setUnavailable(LibFunc_atanh);
- TLI.setUnavailable(LibFunc_atanhf);
TLI.setUnavailable(LibFunc_atanhl);
- TLI.setUnavailable(LibFunc_cabs);
- TLI.setUnavailable(LibFunc_cabsf);
TLI.setUnavailable(LibFunc_cabsl);
- TLI.setUnavailable(LibFunc_cbrt);
- TLI.setUnavailable(LibFunc_cbrtf);
TLI.setUnavailable(LibFunc_cbrtl);
- TLI.setUnavailable(LibFunc_exp2);
- TLI.setUnavailable(LibFunc_exp2f);
+ TLI.setUnavailable(LibFunc_copysignl);
TLI.setUnavailable(LibFunc_exp2l);
- TLI.setUnavailable(LibFunc_expm1);
- TLI.setUnavailable(LibFunc_expm1f);
TLI.setUnavailable(LibFunc_expm1l);
- TLI.setUnavailable(LibFunc_log2);
- TLI.setUnavailable(LibFunc_log2f);
- TLI.setUnavailable(LibFunc_log2l);
- TLI.setUnavailable(LibFunc_log1p);
- TLI.setUnavailable(LibFunc_log1pf);
+ TLI.setUnavailable(LibFunc_fmaxl);
+ TLI.setUnavailable(LibFunc_fminl);
TLI.setUnavailable(LibFunc_log1pl);
- TLI.setUnavailable(LibFunc_logb);
- TLI.setUnavailable(LibFunc_logbf);
+ TLI.setUnavailable(LibFunc_log2l);
TLI.setUnavailable(LibFunc_logbl);
- TLI.setUnavailable(LibFunc_nearbyint);
- TLI.setUnavailable(LibFunc_nearbyintf);
TLI.setUnavailable(LibFunc_nearbyintl);
- TLI.setUnavailable(LibFunc_rint);
- TLI.setUnavailable(LibFunc_rintf);
TLI.setUnavailable(LibFunc_rintl);
- TLI.setUnavailable(LibFunc_round);
- TLI.setUnavailable(LibFunc_roundf);
TLI.setUnavailable(LibFunc_roundl);
- TLI.setUnavailable(LibFunc_trunc);
- TLI.setUnavailable(LibFunc_truncf);
TLI.setUnavailable(LibFunc_truncl);
- // Win32 provides some C99 math with mangled names
- TLI.setAvailableWithName(LibFunc_copysign, "_copysign");
-
- if (T.getArch() == Triple::x86) {
- // Win32 on x86 implements single-precision math functions as macros
- TLI.setUnavailable(LibFunc_acosf);
- TLI.setUnavailable(LibFunc_asinf);
- TLI.setUnavailable(LibFunc_atanf);
- TLI.setUnavailable(LibFunc_atan2f);
- TLI.setUnavailable(LibFunc_ceilf);
- TLI.setUnavailable(LibFunc_copysignf);
- TLI.setUnavailable(LibFunc_cosf);
- TLI.setUnavailable(LibFunc_coshf);
- TLI.setUnavailable(LibFunc_expf);
- TLI.setUnavailable(LibFunc_floorf);
- TLI.setUnavailable(LibFunc_fminf);
- TLI.setUnavailable(LibFunc_fmaxf);
- TLI.setUnavailable(LibFunc_fmodf);
- TLI.setUnavailable(LibFunc_logf);
- TLI.setUnavailable(LibFunc_log10f);
- TLI.setUnavailable(LibFunc_modff);
- TLI.setUnavailable(LibFunc_powf);
- TLI.setUnavailable(LibFunc_sinf);
- TLI.setUnavailable(LibFunc_sinhf);
- TLI.setUnavailable(LibFunc_sqrtf);
- TLI.setUnavailable(LibFunc_tanf);
- TLI.setUnavailable(LibFunc_tanhf);
- }
-
- // Win32 does *not* provide these functions, but they are
- // generally available on POSIX-compliant systems:
+ // Win32 does not support these functions, but
+ // they are generally available on POSIX-compliant systems.
TLI.setUnavailable(LibFunc_access);
TLI.setUnavailable(LibFunc_bcmp);
TLI.setUnavailable(LibFunc_bcopy);
TLI.setUnavailable(LibFunc_bzero);
TLI.setUnavailable(LibFunc_chmod);
TLI.setUnavailable(LibFunc_chown);
TLI.setUnavailable(LibFunc_closedir);
TLI.setUnavailable(LibFunc_ctermid);
TLI.setUnavailable(LibFunc_fdopen);
TLI.setUnavailable(LibFunc_ffs);
TLI.setUnavailable(LibFunc_fileno);
TLI.setUnavailable(LibFunc_flockfile);
TLI.setUnavailable(LibFunc_fseeko);
TLI.setUnavailable(LibFunc_fstat);
TLI.setUnavailable(LibFunc_fstatvfs);
TLI.setUnavailable(LibFunc_ftello);
TLI.setUnavailable(LibFunc_ftrylockfile);
TLI.setUnavailable(LibFunc_funlockfile);
TLI.setUnavailable(LibFunc_getitimer);
TLI.setUnavailable(LibFunc_getlogin_r);
TLI.setUnavailable(LibFunc_getpwnam);
TLI.setUnavailable(LibFunc_gettimeofday);
TLI.setUnavailable(LibFunc_htonl);
TLI.setUnavailable(LibFunc_htons);
TLI.setUnavailable(LibFunc_lchown);
TLI.setUnavailable(LibFunc_lstat);
TLI.setUnavailable(LibFunc_memccpy);
TLI.setUnavailable(LibFunc_mkdir);
TLI.setUnavailable(LibFunc_ntohl);
TLI.setUnavailable(LibFunc_ntohs);
TLI.setUnavailable(LibFunc_open);
TLI.setUnavailable(LibFunc_opendir);
TLI.setUnavailable(LibFunc_pclose);
TLI.setUnavailable(LibFunc_popen);
TLI.setUnavailable(LibFunc_pread);
TLI.setUnavailable(LibFunc_pwrite);
TLI.setUnavailable(LibFunc_read);
TLI.setUnavailable(LibFunc_readlink);
TLI.setUnavailable(LibFunc_realpath);
TLI.setUnavailable(LibFunc_rmdir);
TLI.setUnavailable(LibFunc_setitimer);
TLI.setUnavailable(LibFunc_stat);
TLI.setUnavailable(LibFunc_statvfs);
TLI.setUnavailable(LibFunc_stpcpy);
TLI.setUnavailable(LibFunc_stpncpy);
TLI.setUnavailable(LibFunc_strcasecmp);
TLI.setUnavailable(LibFunc_strncasecmp);
TLI.setUnavailable(LibFunc_times);
TLI.setUnavailable(LibFunc_uname);
TLI.setUnavailable(LibFunc_unlink);
TLI.setUnavailable(LibFunc_unsetenv);
TLI.setUnavailable(LibFunc_utime);
TLI.setUnavailable(LibFunc_utimes);
TLI.setUnavailable(LibFunc_write);
-
- // Win32 does *not* provide provide these functions, but they are
- // specified by C99:
- TLI.setUnavailable(LibFunc_atoll);
- TLI.setUnavailable(LibFunc_frexpf);
- TLI.setUnavailable(LibFunc_llabs);
}
switch (T.getOS()) {
case Triple::MacOSX:
// exp10 and exp10f are not available on OS X until 10.9 and iOS until 7.0
// and their names are __exp10 and __exp10f. exp10l is not available on
// OS X or iOS.
TLI.setUnavailable(LibFunc_exp10l);
if (T.isMacOSXVersionLT(10, 9)) {
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
} else {
TLI.setAvailableWithName(LibFunc_exp10, "__exp10");
TLI.setAvailableWithName(LibFunc_exp10f, "__exp10f");
}
break;
case Triple::IOS:
case Triple::TvOS:
case Triple::WatchOS:
TLI.setUnavailable(LibFunc_exp10l);
if (!T.isWatchOS() && (T.isOSVersionLT(7, 0) ||
(T.isOSVersionLT(9, 0) &&
(T.getArch() == Triple::x86 ||
T.getArch() == Triple::x86_64)))) {
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
} else {
TLI.setAvailableWithName(LibFunc_exp10, "__exp10");
TLI.setAvailableWithName(LibFunc_exp10f, "__exp10f");
}
break;
case Triple::Linux:
// exp10, exp10f, exp10l is available on Linux (GLIBC) but are extremely
// buggy prior to glibc version 2.18. Until this version is widely deployed
// or we have a reasonable detection strategy, we cannot use exp10 reliably
// on Linux.
//
// Fall through to disable all of them.
LLVM_FALLTHROUGH;
default:
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
TLI.setUnavailable(LibFunc_exp10l);
}
// ffsl is available on at least Darwin, Mac OS X, iOS, FreeBSD, and
// Linux (GLIBC):
// http://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/ffsl.3.html
// http://svn.freebsd.org/base/head/lib/libc/string/ffsl.c
// http://www.gnu.org/software/gnulib/manual/html_node/ffsl.html
switch (T.getOS()) {
case Triple::Darwin:
case Triple::MacOSX:
case Triple::IOS:
case Triple::TvOS:
case Triple::WatchOS:
case Triple::FreeBSD:
case Triple::Linux:
break;
default:
TLI.setUnavailable(LibFunc_ffsl);
}
// ffsll is available on at least FreeBSD and Linux (GLIBC):
// http://svn.freebsd.org/base/head/lib/libc/string/ffsll.c
// http://www.gnu.org/software/gnulib/manual/html_node/ffsll.html
switch (T.getOS()) {
case Triple::Darwin:
case Triple::MacOSX:
case Triple::IOS:
case Triple::TvOS:
case Triple::WatchOS:
case Triple::FreeBSD:
case Triple::Linux:
break;
default:
TLI.setUnavailable(LibFunc_ffsll);
}
// The following functions are available on at least FreeBSD:
// http://svn.freebsd.org/base/head/lib/libc/string/fls.c
// http://svn.freebsd.org/base/head/lib/libc/string/flsl.c
// http://svn.freebsd.org/base/head/lib/libc/string/flsll.c
if (!T.isOSFreeBSD()) {
TLI.setUnavailable(LibFunc_fls);
TLI.setUnavailable(LibFunc_flsl);
TLI.setUnavailable(LibFunc_flsll);
}
// The following functions are only available on GNU/Linux (using glibc).
// Linux variants without glibc (eg: bionic, musl) may have some subset.
if (!T.isOSLinux() || !T.isGNUEnvironment()) {
TLI.setUnavailable(LibFunc_dunder_strdup);
TLI.setUnavailable(LibFunc_dunder_strtok_r);
TLI.setUnavailable(LibFunc_dunder_isoc99_scanf);
TLI.setUnavailable(LibFunc_dunder_isoc99_sscanf);
TLI.setUnavailable(LibFunc_under_IO_getc);
TLI.setUnavailable(LibFunc_under_IO_putc);
// But, Android and musl have memalign.
if (!T.isAndroid() && !T.isMusl())
TLI.setUnavailable(LibFunc_memalign);
TLI.setUnavailable(LibFunc_fopen64);
TLI.setUnavailable(LibFunc_fseeko64);
TLI.setUnavailable(LibFunc_fstat64);
TLI.setUnavailable(LibFunc_fstatvfs64);
TLI.setUnavailable(LibFunc_ftello64);
TLI.setUnavailable(LibFunc_lstat64);
TLI.setUnavailable(LibFunc_open64);
TLI.setUnavailable(LibFunc_stat64);
TLI.setUnavailable(LibFunc_statvfs64);
TLI.setUnavailable(LibFunc_tmpfile64);
// Relaxed math functions are included in math-finite.h on Linux (GLIBC).
TLI.setUnavailable(LibFunc_acos_finite);
TLI.setUnavailable(LibFunc_acosf_finite);
TLI.setUnavailable(LibFunc_acosl_finite);
TLI.setUnavailable(LibFunc_acosh_finite);
TLI.setUnavailable(LibFunc_acoshf_finite);
TLI.setUnavailable(LibFunc_acoshl_finite);
TLI.setUnavailable(LibFunc_asin_finite);
TLI.setUnavailable(LibFunc_asinf_finite);
TLI.setUnavailable(LibFunc_asinl_finite);
TLI.setUnavailable(LibFunc_atan2_finite);
TLI.setUnavailable(LibFunc_atan2f_finite);
TLI.setUnavailable(LibFunc_atan2l_finite);
TLI.setUnavailable(LibFunc_atanh_finite);
TLI.setUnavailable(LibFunc_atanhf_finite);
TLI.setUnavailable(LibFunc_atanhl_finite);
TLI.setUnavailable(LibFunc_cosh_finite);
TLI.setUnavailable(LibFunc_coshf_finite);
TLI.setUnavailable(LibFunc_coshl_finite);
TLI.setUnavailable(LibFunc_exp10_finite);
TLI.setUnavailable(LibFunc_exp10f_finite);
TLI.setUnavailable(LibFunc_exp10l_finite);
TLI.setUnavailable(LibFunc_exp2_finite);
TLI.setUnavailable(LibFunc_exp2f_finite);
TLI.setUnavailable(LibFunc_exp2l_finite);
TLI.setUnavailable(LibFunc_exp_finite);
TLI.setUnavailable(LibFunc_expf_finite);
TLI.setUnavailable(LibFunc_expl_finite);
TLI.setUnavailable(LibFunc_log10_finite);
TLI.setUnavailable(LibFunc_log10f_finite);
TLI.setUnavailable(LibFunc_log10l_finite);
TLI.setUnavailable(LibFunc_log2_finite);
TLI.setUnavailable(LibFunc_log2f_finite);
TLI.setUnavailable(LibFunc_log2l_finite);
TLI.setUnavailable(LibFunc_log_finite);
TLI.setUnavailable(LibFunc_logf_finite);
TLI.setUnavailable(LibFunc_logl_finite);
TLI.setUnavailable(LibFunc_pow_finite);
TLI.setUnavailable(LibFunc_powf_finite);
TLI.setUnavailable(LibFunc_powl_finite);
TLI.setUnavailable(LibFunc_sinh_finite);
TLI.setUnavailable(LibFunc_sinhf_finite);
TLI.setUnavailable(LibFunc_sinhl_finite);
}
if ((T.isOSLinux() && T.isGNUEnvironment()) ||
(T.isAndroid() && !T.isAndroidVersionLT(28))) {
// available IO unlocked variants on GNU/Linux and Android P or later
TLI.setAvailable(LibFunc_getc_unlocked);
TLI.setAvailable(LibFunc_getchar_unlocked);
TLI.setAvailable(LibFunc_putc_unlocked);
TLI.setAvailable(LibFunc_putchar_unlocked);
TLI.setAvailable(LibFunc_fputc_unlocked);
TLI.setAvailable(LibFunc_fgetc_unlocked);
TLI.setAvailable(LibFunc_fread_unlocked);
TLI.setAvailable(LibFunc_fwrite_unlocked);
TLI.setAvailable(LibFunc_fputs_unlocked);
TLI.setAvailable(LibFunc_fgets_unlocked);
}
// As currently implemented in clang, NVPTX code has no standard library to
// speak of. Headers provide a standard-ish library implementation, but many
// of the signatures are wrong -- for example, many libm functions are not
// extern "C".
//
// libdevice, an IR library provided by nvidia, is linked in by the front-end,
// but only used functions are provided to llvm. Moreover, most of the
// functions in libdevice don't map precisely to standard library functions.
//
// FIXME: Having no standard library prevents e.g. many fastmath
// optimizations, so this situation should be fixed.
if (T.isNVPTX()) {
TLI.disableAllFunctions();
TLI.setAvailable(LibFunc_nvvm_reflect);
} else {
TLI.setUnavailable(LibFunc_nvvm_reflect);
}
TLI.addVectorizableFunctionsFromVecLib(ClVectorLibrary);
}
TargetLibraryInfoImpl::TargetLibraryInfoImpl() {
// Default to everything being available.
memset(AvailableArray, -1, sizeof(AvailableArray));
initialize(*this, Triple(), StandardNames);
}
TargetLibraryInfoImpl::TargetLibraryInfoImpl(const Triple &T) {
// Default to everything being available.
memset(AvailableArray, -1, sizeof(AvailableArray));
initialize(*this, T, StandardNames);
}
TargetLibraryInfoImpl::TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI)
: CustomNames(TLI.CustomNames), ShouldExtI32Param(TLI.ShouldExtI32Param),
ShouldExtI32Return(TLI.ShouldExtI32Return),
ShouldSignExtI32Param(TLI.ShouldSignExtI32Param) {
memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
VectorDescs = TLI.VectorDescs;
ScalarDescs = TLI.ScalarDescs;
}
TargetLibraryInfoImpl::TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI)
: CustomNames(std::move(TLI.CustomNames)),
ShouldExtI32Param(TLI.ShouldExtI32Param),
ShouldExtI32Return(TLI.ShouldExtI32Return),
ShouldSignExtI32Param(TLI.ShouldSignExtI32Param) {
std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray),
AvailableArray);
VectorDescs = TLI.VectorDescs;
ScalarDescs = TLI.ScalarDescs;
}
TargetLibraryInfoImpl &TargetLibraryInfoImpl::operator=(const TargetLibraryInfoImpl &TLI) {
CustomNames = TLI.CustomNames;
ShouldExtI32Param = TLI.ShouldExtI32Param;
ShouldExtI32Return = TLI.ShouldExtI32Return;
ShouldSignExtI32Param = TLI.ShouldSignExtI32Param;
memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
return *this;
}
TargetLibraryInfoImpl &TargetLibraryInfoImpl::operator=(TargetLibraryInfoImpl &&TLI) {
CustomNames = std::move(TLI.CustomNames);
ShouldExtI32Param = TLI.ShouldExtI32Param;
ShouldExtI32Return = TLI.ShouldExtI32Return;
ShouldSignExtI32Param = TLI.ShouldSignExtI32Param;
std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray),
AvailableArray);
return *this;
}
static StringRef sanitizeFunctionName(StringRef funcName) {
// Filter out empty names and names containing null bytes, those can't be in
// our table.
if (funcName.empty() || funcName.find('\0') != StringRef::npos)
return StringRef();
// Check for \01 prefix that is used to mangle __asm declarations and
// strip it if present.
return GlobalValue::dropLLVMManglingEscape(funcName);
}
bool TargetLibraryInfoImpl::getLibFunc(StringRef funcName,
LibFunc &F) const {
StringRef const *Start = &StandardNames[0];
StringRef const *End = &StandardNames[NumLibFuncs];
funcName = sanitizeFunctionName(funcName);
if (funcName.empty())
return false;
StringRef const *I = std::lower_bound(
Start, End, funcName, [](StringRef LHS, StringRef RHS) {
return LHS < RHS;
});
if (I != End && *I == funcName) {
F = (LibFunc)(I - Start);
return true;
}
return false;
}
bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
LibFunc F,
const DataLayout *DL) const {
LLVMContext &Ctx = FTy.getContext();
Type *PCharTy = Type::getInt8PtrTy(Ctx);
Type *SizeTTy = DL ? DL->getIntPtrType(Ctx, /*AS=*/0) : nullptr;
auto IsSizeTTy = [SizeTTy](Type *Ty) {
return SizeTTy ? Ty == SizeTTy : Ty->isIntegerTy();
};
unsigned NumParams = FTy.getNumParams();
switch (F) {
case LibFunc_execl:
case LibFunc_execlp:
case LibFunc_execle:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(32));
case LibFunc_execv:
case LibFunc_execvp:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(32));
case LibFunc_execvP:
case LibFunc_execvpe:
case LibFunc_execve:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(32));
case LibFunc_strlen:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy());
case LibFunc_strchr:
case LibFunc_strrchr:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
FTy.getParamType(1)->isIntegerTy());
case LibFunc_strtol:
case LibFunc_strtod:
case LibFunc_strtof:
case LibFunc_strtoul:
case LibFunc_strtoll:
case LibFunc_strtold:
case LibFunc_strtoull:
return ((NumParams == 2 || NumParams == 3) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_strcat:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
FTy.getParamType(1) == FTy.getReturnType());
case LibFunc_strncat:
return (NumParams == 3 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
FTy.getParamType(1) == FTy.getReturnType() &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_strcpy_chk:
case LibFunc_stpcpy_chk:
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
LLVM_FALLTHROUGH;
case LibFunc_strcpy:
case LibFunc_stpcpy:
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0) == FTy.getParamType(1) &&
FTy.getParamType(0) == PCharTy);
case LibFunc_strncpy_chk:
case LibFunc_stpncpy_chk:
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
LLVM_FALLTHROUGH;
case LibFunc_strncpy:
case LibFunc_stpncpy:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0) == FTy.getParamType(1) &&
FTy.getParamType(0) == PCharTy &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_strxfrm:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_strcmp:
return (NumParams == 2 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(0) == FTy.getParamType(1));
case LibFunc_strncmp:
return (NumParams == 3 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(0) == FTy.getParamType(1) &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_strspn:
case LibFunc_strcspn:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(0) == FTy.getParamType(1) &&
FTy.getReturnType()->isIntegerTy());
case LibFunc_strcoll:
case LibFunc_strcasecmp:
case LibFunc_strncasecmp:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_strstr:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_strpbrk:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0) == FTy.getParamType(1));
case LibFunc_strtok:
case LibFunc_strtok_r:
return (NumParams >= 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_scanf:
case LibFunc_setbuf:
case LibFunc_setvbuf:
return (NumParams >= 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_strdup:
case LibFunc_strndup:
return (NumParams >= 1 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy());
case LibFunc_sscanf:
case LibFunc_stat:
case LibFunc_statvfs:
case LibFunc_siprintf:
case LibFunc_sprintf:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(32));
case LibFunc_snprintf:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(32));
case LibFunc_setitimer:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_system:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_malloc:
return (NumParams == 1 && FTy.getReturnType()->isPointerTy());
case LibFunc_memcmp:
return (NumParams == 3 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_memchr:
case LibFunc_memrchr:
return (NumParams == 3 && FTy.getReturnType()->isPointerTy() &&
FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(1)->isIntegerTy(32) &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_modf:
case LibFunc_modff:
case LibFunc_modfl:
return (NumParams >= 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_memcpy_chk:
case LibFunc_memmove_chk:
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
LLVM_FALLTHROUGH;
case LibFunc_memcpy:
case LibFunc_mempcpy:
case LibFunc_memmove:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_memset_chk:
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
LLVM_FALLTHROUGH;
case LibFunc_memset:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isIntegerTy() &&
IsSizeTTy(FTy.getParamType(2)));
case LibFunc_memccpy:
return (NumParams >= 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_memalign:
return (FTy.getReturnType()->isPointerTy());
case LibFunc_realloc:
case LibFunc_reallocf:
return (NumParams == 2 && FTy.getReturnType() == PCharTy &&
FTy.getParamType(0) == FTy.getReturnType() &&
IsSizeTTy(FTy.getParamType(1)));
case LibFunc_read:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy());
case LibFunc_rewind:
case LibFunc_rmdir:
case LibFunc_remove:
case LibFunc_realpath:
return (NumParams >= 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_rename:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_readlink:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_write:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy());
case LibFunc_bcopy:
case LibFunc_bcmp:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_bzero:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy());
case LibFunc_calloc:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy());
case LibFunc_atof:
case LibFunc_atoi:
case LibFunc_atol:
case LibFunc_atoll:
case LibFunc_ferror:
case LibFunc_getenv:
case LibFunc_getpwnam:
case LibFunc_iprintf:
case LibFunc_pclose:
case LibFunc_perror:
case LibFunc_printf:
case LibFunc_puts:
case LibFunc_uname:
case LibFunc_under_IO_getc:
case LibFunc_unlink:
case LibFunc_unsetenv:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_access:
case LibFunc_chmod:
case LibFunc_chown:
case LibFunc_clearerr:
case LibFunc_closedir:
case LibFunc_ctermid:
case LibFunc_fclose:
case LibFunc_feof:
case LibFunc_fflush:
case LibFunc_fgetc:
case LibFunc_fgetc_unlocked:
case LibFunc_fileno:
case LibFunc_flockfile:
case LibFunc_free:
case LibFunc_fseek:
case LibFunc_fseeko64:
case LibFunc_fseeko:
case LibFunc_fsetpos:
case LibFunc_ftell:
case LibFunc_ftello64:
case LibFunc_ftello:
case LibFunc_ftrylockfile:
case LibFunc_funlockfile:
case LibFunc_getc:
case LibFunc_getc_unlocked:
case LibFunc_getlogin_r:
case LibFunc_mkdir:
case LibFunc_mktime:
case LibFunc_times:
return (NumParams != 0 && FTy.getParamType(0)->isPointerTy());
case LibFunc_fopen:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_fork:
return (NumParams == 0 && FTy.getReturnType()->isIntegerTy(32));
case LibFunc_fdopen:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_fputc:
case LibFunc_fputc_unlocked:
case LibFunc_fstat:
case LibFunc_frexp:
case LibFunc_frexpf:
case LibFunc_frexpl:
case LibFunc_fstatvfs:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_fgets:
case LibFunc_fgets_unlocked:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_fread:
case LibFunc_fread_unlocked:
return (NumParams == 4 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(3)->isPointerTy());
case LibFunc_fwrite:
case LibFunc_fwrite_unlocked:
return (NumParams == 4 && FTy.getReturnType()->isIntegerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isIntegerTy() &&
FTy.getParamType(2)->isIntegerTy() &&
FTy.getParamType(3)->isPointerTy());
case LibFunc_fputs:
case LibFunc_fputs_unlocked:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_fscanf:
case LibFunc_fiprintf:
case LibFunc_fprintf:
return (NumParams >= 2 && FTy.getReturnType()->isIntegerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_fgetpos:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_getchar:
case LibFunc_getchar_unlocked:
return (NumParams == 0 && FTy.getReturnType()->isIntegerTy());
case LibFunc_gets:
return (NumParams == 1 && FTy.getParamType(0) == PCharTy);
case LibFunc_getitimer:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_ungetc:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_utime:
case LibFunc_utimes:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_putc:
case LibFunc_putc_unlocked:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_pread:
case LibFunc_pwrite:
return (NumParams == 4 && FTy.getParamType(1)->isPointerTy());
case LibFunc_popen:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_vscanf:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_vsscanf:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_vfscanf:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_valloc:
return (FTy.getReturnType()->isPointerTy());
case LibFunc_vprintf:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy());
case LibFunc_vfprintf:
case LibFunc_vsprintf:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_vsnprintf:
return (NumParams == 4 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_open:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy());
case LibFunc_opendir:
return (NumParams == 1 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy());
case LibFunc_tmpfile:
return (FTy.getReturnType()->isPointerTy());
case LibFunc_htonl:
case LibFunc_ntohl:
return (NumParams == 1 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_htons:
case LibFunc_ntohs:
return (NumParams == 1 && FTy.getReturnType()->isIntegerTy(16) &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_lstat:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_lchown:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy());
case LibFunc_qsort:
return (NumParams == 4 && FTy.getParamType(3)->isPointerTy());
case LibFunc_dunder_strdup:
case LibFunc_dunder_strndup:
return (NumParams >= 1 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy());
case LibFunc_dunder_strtok_r:
return (NumParams == 3 && FTy.getParamType(1)->isPointerTy());
case LibFunc_under_IO_putc:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_dunder_isoc99_scanf:
return (NumParams >= 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_stat64:
case LibFunc_lstat64:
case LibFunc_statvfs64:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_dunder_isoc99_sscanf:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_fopen64:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
case LibFunc_tmpfile64:
return (FTy.getReturnType()->isPointerTy());
case LibFunc_fstat64:
case LibFunc_fstatvfs64:
return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_open64:
return (NumParams >= 2 && FTy.getParamType(0)->isPointerTy());
case LibFunc_gettimeofday:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy());
// new(unsigned int);
case LibFunc_Znwj:
// new(unsigned long);
case LibFunc_Znwm:
// new[](unsigned int);
case LibFunc_Znaj:
// new[](unsigned long);
case LibFunc_Znam:
// new(unsigned int);
case LibFunc_msvc_new_int:
// new(unsigned long long);
case LibFunc_msvc_new_longlong:
// new[](unsigned int);
case LibFunc_msvc_new_array_int:
// new[](unsigned long long);
case LibFunc_msvc_new_array_longlong:
return (NumParams == 1 && FTy.getReturnType()->isPointerTy());
// new(unsigned int, nothrow);
case LibFunc_ZnwjRKSt9nothrow_t:
// new(unsigned long, nothrow);
case LibFunc_ZnwmRKSt9nothrow_t:
// new[](unsigned int, nothrow);
case LibFunc_ZnajRKSt9nothrow_t:
// new[](unsigned long, nothrow);
case LibFunc_ZnamRKSt9nothrow_t:
// new(unsigned int, nothrow);
case LibFunc_msvc_new_int_nothrow:
// new(unsigned long long, nothrow);
case LibFunc_msvc_new_longlong_nothrow:
// new[](unsigned int, nothrow);
case LibFunc_msvc_new_array_int_nothrow:
// new[](unsigned long long, nothrow);
case LibFunc_msvc_new_array_longlong_nothrow:
// new(unsigned int, align_val_t)
case LibFunc_ZnwjSt11align_val_t:
// new(unsigned long, align_val_t)
case LibFunc_ZnwmSt11align_val_t:
// new[](unsigned int, align_val_t)
case LibFunc_ZnajSt11align_val_t:
// new[](unsigned long, align_val_t)
case LibFunc_ZnamSt11align_val_t:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy());
// new(unsigned int, align_val_t, nothrow)
case LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t:
// new(unsigned long, align_val_t, nothrow)
case LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t:
// new[](unsigned int, align_val_t, nothrow)
case LibFunc_ZnajSt11align_val_tRKSt9nothrow_t:
// new[](unsigned long, align_val_t, nothrow)
case LibFunc_ZnamSt11align_val_tRKSt9nothrow_t:
return (NumParams == 3 && FTy.getReturnType()->isPointerTy());
// void operator delete[](void*);
case LibFunc_ZdaPv:
// void operator delete(void*);
case LibFunc_ZdlPv:
// void operator delete[](void*);
case LibFunc_msvc_delete_array_ptr32:
// void operator delete[](void*);
case LibFunc_msvc_delete_array_ptr64:
// void operator delete(void*);
case LibFunc_msvc_delete_ptr32:
// void operator delete(void*);
case LibFunc_msvc_delete_ptr64:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
// void operator delete[](void*, nothrow);
case LibFunc_ZdaPvRKSt9nothrow_t:
// void operator delete[](void*, unsigned int);
case LibFunc_ZdaPvj:
// void operator delete[](void*, unsigned long);
case LibFunc_ZdaPvm:
// void operator delete(void*, nothrow);
case LibFunc_ZdlPvRKSt9nothrow_t:
// void operator delete(void*, unsigned int);
case LibFunc_ZdlPvj:
// void operator delete(void*, unsigned long);
case LibFunc_ZdlPvm:
// void operator delete(void*, align_val_t)
case LibFunc_ZdlPvSt11align_val_t:
// void operator delete[](void*, align_val_t)
case LibFunc_ZdaPvSt11align_val_t:
// void operator delete[](void*, unsigned int);
case LibFunc_msvc_delete_array_ptr32_int:
// void operator delete[](void*, nothrow);
case LibFunc_msvc_delete_array_ptr32_nothrow:
// void operator delete[](void*, unsigned long long);
case LibFunc_msvc_delete_array_ptr64_longlong:
// void operator delete[](void*, nothrow);
case LibFunc_msvc_delete_array_ptr64_nothrow:
// void operator delete(void*, unsigned int);
case LibFunc_msvc_delete_ptr32_int:
// void operator delete(void*, nothrow);
case LibFunc_msvc_delete_ptr32_nothrow:
// void operator delete(void*, unsigned long long);
case LibFunc_msvc_delete_ptr64_longlong:
// void operator delete(void*, nothrow);
case LibFunc_msvc_delete_ptr64_nothrow:
return (NumParams == 2 && FTy.getParamType(0)->isPointerTy());
// void operator delete(void*, align_val_t, nothrow)
case LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t:
// void operator delete[](void*, align_val_t, nothrow)
case LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t:
return (NumParams == 3 && FTy.getParamType(0)->isPointerTy());
case LibFunc_memset_pattern16:
return (!FTy.isVarArg() && NumParams == 3 &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isIntegerTy());
case LibFunc_cxa_guard_abort:
case LibFunc_cxa_guard_acquire:
case LibFunc_cxa_guard_release:
case LibFunc_nvvm_reflect:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
case LibFunc_sincospi_stret:
case LibFunc_sincospif_stret:
return (NumParams == 1 && FTy.getParamType(0)->isFloatingPointTy());
case LibFunc_acos:
case LibFunc_acos_finite:
case LibFunc_acosf:
case LibFunc_acosf_finite:
case LibFunc_acosh:
case LibFunc_acosh_finite:
case LibFunc_acoshf:
case LibFunc_acoshf_finite:
case LibFunc_acoshl:
case LibFunc_acoshl_finite:
case LibFunc_acosl:
case LibFunc_acosl_finite:
case LibFunc_asin:
case LibFunc_asin_finite:
case LibFunc_asinf:
case LibFunc_asinf_finite:
case LibFunc_asinh:
case LibFunc_asinhf:
case LibFunc_asinhl:
case LibFunc_asinl:
case LibFunc_asinl_finite:
case LibFunc_atan:
case LibFunc_atanf:
case LibFunc_atanh:
case LibFunc_atanh_finite:
case LibFunc_atanhf:
case LibFunc_atanhf_finite:
case LibFunc_atanhl:
case LibFunc_atanhl_finite:
case LibFunc_atanl:
case LibFunc_cbrt:
case LibFunc_cbrtf:
case LibFunc_cbrtl:
case LibFunc_ceil:
case LibFunc_ceilf:
case LibFunc_ceill:
case LibFunc_cos:
case LibFunc_cosf:
case LibFunc_cosh:
case LibFunc_cosh_finite:
case LibFunc_coshf:
case LibFunc_coshf_finite:
case LibFunc_coshl:
case LibFunc_coshl_finite:
case LibFunc_cosl:
case LibFunc_exp10:
case LibFunc_exp10_finite:
case LibFunc_exp10f:
case LibFunc_exp10f_finite:
case LibFunc_exp10l:
case LibFunc_exp10l_finite:
case LibFunc_exp2:
case LibFunc_exp2_finite:
case LibFunc_exp2f:
case LibFunc_exp2f_finite:
case LibFunc_exp2l:
case LibFunc_exp2l_finite:
case LibFunc_exp:
case LibFunc_exp_finite:
case LibFunc_expf:
case LibFunc_expf_finite:
case LibFunc_expl:
case LibFunc_expl_finite:
case LibFunc_expm1:
case LibFunc_expm1f:
case LibFunc_expm1l:
case LibFunc_fabs:
case LibFunc_fabsf:
case LibFunc_fabsl:
case LibFunc_floor:
case LibFunc_floorf:
case LibFunc_floorl:
case LibFunc_log10:
case LibFunc_log10_finite:
case LibFunc_log10f:
case LibFunc_log10f_finite:
case LibFunc_log10l:
case LibFunc_log10l_finite:
case LibFunc_log1p:
case LibFunc_log1pf:
case LibFunc_log1pl:
case LibFunc_log2:
case LibFunc_log2_finite:
case LibFunc_log2f:
case LibFunc_log2f_finite:
case LibFunc_log2l:
case LibFunc_log2l_finite:
case LibFunc_log:
case LibFunc_log_finite:
case LibFunc_logb:
case LibFunc_logbf:
case LibFunc_logbl:
case LibFunc_logf:
case LibFunc_logf_finite:
case LibFunc_logl:
case LibFunc_logl_finite:
case LibFunc_nearbyint:
case LibFunc_nearbyintf:
case LibFunc_nearbyintl:
case LibFunc_rint:
case LibFunc_rintf:
case LibFunc_rintl:
case LibFunc_round:
case LibFunc_roundf:
case LibFunc_roundl:
case LibFunc_sin:
case LibFunc_sinf:
case LibFunc_sinh:
case LibFunc_sinh_finite:
case LibFunc_sinhf:
case LibFunc_sinhf_finite:
case LibFunc_sinhl:
case LibFunc_sinhl_finite:
case LibFunc_sinl:
case LibFunc_sqrt:
case LibFunc_sqrt_finite:
case LibFunc_sqrtf:
case LibFunc_sqrtf_finite:
case LibFunc_sqrtl:
case LibFunc_sqrtl_finite:
case LibFunc_tan:
case LibFunc_tanf:
case LibFunc_tanh:
case LibFunc_tanhf:
case LibFunc_tanhl:
case LibFunc_tanl:
case LibFunc_trunc:
case LibFunc_truncf:
case LibFunc_truncl:
return (NumParams == 1 && FTy.getReturnType()->isFloatingPointTy() &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_atan2:
case LibFunc_atan2_finite:
case LibFunc_atan2f:
case LibFunc_atan2f_finite:
case LibFunc_atan2l:
case LibFunc_atan2l_finite:
case LibFunc_fmin:
case LibFunc_fminf:
case LibFunc_fminl:
case LibFunc_fmax:
case LibFunc_fmaxf:
case LibFunc_fmaxl:
case LibFunc_fmod:
case LibFunc_fmodf:
case LibFunc_fmodl:
case LibFunc_copysign:
case LibFunc_copysignf:
case LibFunc_copysignl:
case LibFunc_pow:
case LibFunc_pow_finite:
case LibFunc_powf:
case LibFunc_powf_finite:
case LibFunc_powl:
case LibFunc_powl_finite:
return (NumParams == 2 && FTy.getReturnType()->isFloatingPointTy() &&
FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getReturnType() == FTy.getParamType(1));
case LibFunc_ldexp:
case LibFunc_ldexpf:
case LibFunc_ldexpl:
return (NumParams == 2 && FTy.getReturnType()->isFloatingPointTy() &&
FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(1)->isIntegerTy(32));
case LibFunc_ffs:
case LibFunc_ffsl:
case LibFunc_ffsll:
case LibFunc_fls:
case LibFunc_flsl:
case LibFunc_flsll:
return (NumParams == 1 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getParamType(0)->isIntegerTy());
case LibFunc_isdigit:
case LibFunc_isascii:
case LibFunc_toascii:
case LibFunc_putchar:
case LibFunc_putchar_unlocked:
return (NumParams == 1 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_abs:
case LibFunc_labs:
case LibFunc_llabs:
return (NumParams == 1 && FTy.getReturnType()->isIntegerTy() &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_cxa_atexit:
return (NumParams == 3 && FTy.getReturnType()->isIntegerTy() &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1)->isPointerTy() &&
FTy.getParamType(2)->isPointerTy());
case LibFunc_sinpi:
case LibFunc_cospi:
return (NumParams == 1 && FTy.getReturnType()->isDoubleTy() &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_sinpif:
case LibFunc_cospif:
return (NumParams == 1 && FTy.getReturnType()->isFloatTy() &&
FTy.getReturnType() == FTy.getParamType(0));
case LibFunc_strnlen:
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(1) &&
FTy.getParamType(0) == PCharTy &&
FTy.getParamType(1) == SizeTTy);
case LibFunc_posix_memalign:
return (NumParams == 3 && FTy.getReturnType()->isIntegerTy(32) &&
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
case LibFunc_wcslen:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy());
case LibFunc_cabs:
case LibFunc_cabsf:
case LibFunc_cabsl: {
Type* RetTy = FTy.getReturnType();
if (!RetTy->isFloatingPointTy())
return false;
// NOTE: These prototypes are target specific and currently support
// "complex" passed as an array or discrete real & imaginary parameters.
// Add other calling conventions to enable libcall optimizations.
if (NumParams == 1)
return (FTy.getParamType(0)->isArrayTy() &&
FTy.getParamType(0)->getArrayNumElements() == 2 &&
FTy.getParamType(0)->getArrayElementType() == RetTy);
else if (NumParams == 2)
return (FTy.getParamType(0) == RetTy && FTy.getParamType(1) == RetTy);
else
return false;
}
case LibFunc::NumLibFuncs:
break;
}
llvm_unreachable("Invalid libfunc");
}
bool TargetLibraryInfoImpl::getLibFunc(const Function &FDecl,
LibFunc &F) const {
const DataLayout *DL =
FDecl.getParent() ? &FDecl.getParent()->getDataLayout() : nullptr;
return getLibFunc(FDecl.getName(), F) &&
isValidProtoForLibFunc(*FDecl.getFunctionType(), F, DL);
}
void TargetLibraryInfoImpl::disableAllFunctions() {
memset(AvailableArray, 0, sizeof(AvailableArray));
}
static bool compareByScalarFnName(const VecDesc &LHS, const VecDesc &RHS) {
return LHS.ScalarFnName < RHS.ScalarFnName;
}
static bool compareByVectorFnName(const VecDesc &LHS, const VecDesc &RHS) {
return LHS.VectorFnName < RHS.VectorFnName;
}
static bool compareWithScalarFnName(const VecDesc &LHS, StringRef S) {
return LHS.ScalarFnName < S;
}
static bool compareWithVectorFnName(const VecDesc &LHS, StringRef S) {
return LHS.VectorFnName < S;
}
void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
VectorDescs.insert(VectorDescs.end(), Fns.begin(), Fns.end());
llvm::sort(VectorDescs, compareByScalarFnName);
ScalarDescs.insert(ScalarDescs.end(), Fns.begin(), Fns.end());
llvm::sort(ScalarDescs, compareByVectorFnName);
}
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
enum VectorLibrary VecLib) {
switch (VecLib) {
case Accelerate: {
const VecDesc VecFuncs[] = {
// Floating-Point Arithmetic and Auxiliary Functions
{"ceilf", "vceilf", 4},
{"fabsf", "vfabsf", 4},
{"llvm.fabs.f32", "vfabsf", 4},
{"floorf", "vfloorf", 4},
{"sqrtf", "vsqrtf", 4},
{"llvm.sqrt.f32", "vsqrtf", 4},
// Exponential and Logarithmic Functions
{"expf", "vexpf", 4},
{"llvm.exp.f32", "vexpf", 4},
{"expm1f", "vexpm1f", 4},
{"logf", "vlogf", 4},
{"llvm.log.f32", "vlogf", 4},
{"log1pf", "vlog1pf", 4},
{"log10f", "vlog10f", 4},
{"llvm.log10.f32", "vlog10f", 4},
{"logbf", "vlogbf", 4},
// Trigonometric Functions
{"sinf", "vsinf", 4},
{"llvm.sin.f32", "vsinf", 4},
{"cosf", "vcosf", 4},
{"llvm.cos.f32", "vcosf", 4},
{"tanf", "vtanf", 4},
{"asinf", "vasinf", 4},
{"acosf", "vacosf", 4},
{"atanf", "vatanf", 4},
// Hyperbolic Functions
{"sinhf", "vsinhf", 4},
{"coshf", "vcoshf", 4},
{"tanhf", "vtanhf", 4},
{"asinhf", "vasinhf", 4},
{"acoshf", "vacoshf", 4},
{"atanhf", "vatanhf", 4},
};
addVectorizableFunctions(VecFuncs);
break;
}
case SVML: {
const VecDesc VecFuncs[] = {
{"sin", "__svml_sin2", 2},
{"sin", "__svml_sin4", 4},
{"sin", "__svml_sin8", 8},
{"sinf", "__svml_sinf4", 4},
{"sinf", "__svml_sinf8", 8},
{"sinf", "__svml_sinf16", 16},
{"llvm.sin.f64", "__svml_sin2", 2},
{"llvm.sin.f64", "__svml_sin4", 4},
{"llvm.sin.f64", "__svml_sin8", 8},
{"llvm.sin.f32", "__svml_sinf4", 4},
{"llvm.sin.f32", "__svml_sinf8", 8},
{"llvm.sin.f32", "__svml_sinf16", 16},
{"cos", "__svml_cos2", 2},
{"cos", "__svml_cos4", 4},
{"cos", "__svml_cos8", 8},
{"cosf", "__svml_cosf4", 4},
{"cosf", "__svml_cosf8", 8},
{"cosf", "__svml_cosf16", 16},
{"llvm.cos.f64", "__svml_cos2", 2},
{"llvm.cos.f64", "__svml_cos4", 4},
{"llvm.cos.f64", "__svml_cos8", 8},
{"llvm.cos.f32", "__svml_cosf4", 4},
{"llvm.cos.f32", "__svml_cosf8", 8},
{"llvm.cos.f32", "__svml_cosf16", 16},
{"pow", "__svml_pow2", 2},
{"pow", "__svml_pow4", 4},
{"pow", "__svml_pow8", 8},
{"powf", "__svml_powf4", 4},
{"powf", "__svml_powf8", 8},
{"powf", "__svml_powf16", 16},
{ "__pow_finite", "__svml_pow2", 2 },
{ "__pow_finite", "__svml_pow4", 4 },
{ "__pow_finite", "__svml_pow8", 8 },
{ "__powf_finite", "__svml_powf4", 4 },
{ "__powf_finite", "__svml_powf8", 8 },
{ "__powf_finite", "__svml_powf16", 16 },
{"llvm.pow.f64", "__svml_pow2", 2},
{"llvm.pow.f64", "__svml_pow4", 4},
{"llvm.pow.f64", "__svml_pow8", 8},
{"llvm.pow.f32", "__svml_powf4", 4},
{"llvm.pow.f32", "__svml_powf8", 8},
{"llvm.pow.f32", "__svml_powf16", 16},
{"exp", "__svml_exp2", 2},
{"exp", "__svml_exp4", 4},
{"exp", "__svml_exp8", 8},
{"expf", "__svml_expf4", 4},
{"expf", "__svml_expf8", 8},
{"expf", "__svml_expf16", 16},
{ "__exp_finite", "__svml_exp2", 2 },
{ "__exp_finite", "__svml_exp4", 4 },
{ "__exp_finite", "__svml_exp8", 8 },
{ "__expf_finite", "__svml_expf4", 4 },
{ "__expf_finite", "__svml_expf8", 8 },
{ "__expf_finite", "__svml_expf16", 16 },
{"llvm.exp.f64", "__svml_exp2", 2},
{"llvm.exp.f64", "__svml_exp4", 4},
{"llvm.exp.f64", "__svml_exp8", 8},
{"llvm.exp.f32", "__svml_expf4", 4},
{"llvm.exp.f32", "__svml_expf8", 8},
{"llvm.exp.f32", "__svml_expf16", 16},
{"log", "__svml_log2", 2},
{"log", "__svml_log4", 4},
{"log", "__svml_log8", 8},
{"logf", "__svml_logf4", 4},
{"logf", "__svml_logf8", 8},
{"logf", "__svml_logf16", 16},
{ "__log_finite", "__svml_log2", 2 },
{ "__log_finite", "__svml_log4", 4 },
{ "__log_finite", "__svml_log8", 8 },
{ "__logf_finite", "__svml_logf4", 4 },
{ "__logf_finite", "__svml_logf8", 8 },
{ "__logf_finite", "__svml_logf16", 16 },
{"llvm.log.f64", "__svml_log2", 2},
{"llvm.log.f64", "__svml_log4", 4},
{"llvm.log.f64", "__svml_log8", 8},
{"llvm.log.f32", "__svml_logf4", 4},
{"llvm.log.f32", "__svml_logf8", 8},
{"llvm.log.f32", "__svml_logf16", 16},
};
addVectorizableFunctions(VecFuncs);
break;
}
case NoLibrary:
break;
}
}
bool TargetLibraryInfoImpl::isFunctionVectorizable(StringRef funcName) const {
funcName = sanitizeFunctionName(funcName);
if (funcName.empty())
return false;
std::vector<VecDesc>::const_iterator I = std::lower_bound(
VectorDescs.begin(), VectorDescs.end(), funcName,
compareWithScalarFnName);
return I != VectorDescs.end() && StringRef(I->ScalarFnName) == funcName;
}
StringRef TargetLibraryInfoImpl::getVectorizedFunction(StringRef F,
unsigned VF) const {
F = sanitizeFunctionName(F);
if (F.empty())
return F;
std::vector<VecDesc>::const_iterator I = std::lower_bound(
VectorDescs.begin(), VectorDescs.end(), F, compareWithScalarFnName);
while (I != VectorDescs.end() && StringRef(I->ScalarFnName) == F) {
if (I->VectorizationFactor == VF)
return I->VectorFnName;
++I;
}
return StringRef();
}
StringRef TargetLibraryInfoImpl::getScalarizedFunction(StringRef F,
unsigned &VF) const {
F = sanitizeFunctionName(F);
if (F.empty())
return F;
std::vector<VecDesc>::const_iterator I = std::lower_bound(
ScalarDescs.begin(), ScalarDescs.end(), F, compareWithVectorFnName);
if (I == VectorDescs.end() || StringRef(I->VectorFnName) != F)
return StringRef();
VF = I->VectorizationFactor;
return I->ScalarFnName;
}
TargetLibraryInfo TargetLibraryAnalysis::run(Module &M,
ModuleAnalysisManager &) {
if (PresetInfoImpl)
return TargetLibraryInfo(*PresetInfoImpl);
return TargetLibraryInfo(lookupInfoImpl(Triple(M.getTargetTriple())));
}
TargetLibraryInfo TargetLibraryAnalysis::run(Function &F,
FunctionAnalysisManager &) {
if (PresetInfoImpl)
return TargetLibraryInfo(*PresetInfoImpl);
return TargetLibraryInfo(
lookupInfoImpl(Triple(F.getParent()->getTargetTriple())));
}
TargetLibraryInfoImpl &TargetLibraryAnalysis::lookupInfoImpl(const Triple &T) {
std::unique_ptr<TargetLibraryInfoImpl> &Impl =
Impls[T.normalize()];
if (!Impl)
Impl.reset(new TargetLibraryInfoImpl(T));
return *Impl;
}
unsigned TargetLibraryInfoImpl::getWCharSize(const Module &M) const {
if (auto *ShortWChar = cast_or_null<ConstantAsMetadata>(
M.getModuleFlag("wchar_size")))
return cast<ConstantInt>(ShortWChar->getValue())->getZExtValue();
return 0;
}
TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass()
: ImmutablePass(ID), TLIImpl(), TLI(TLIImpl) {
initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
}
TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass(const Triple &T)
: ImmutablePass(ID), TLIImpl(T), TLI(TLIImpl) {
initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
}
TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass(
const TargetLibraryInfoImpl &TLIImpl)
: ImmutablePass(ID), TLIImpl(TLIImpl), TLI(this->TLIImpl) {
initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
}
AnalysisKey TargetLibraryAnalysis::Key;
// Register the basic pass.
INITIALIZE_PASS(TargetLibraryInfoWrapperPass, "targetlibinfo",
"Target Library Information", false, true)
char TargetLibraryInfoWrapperPass::ID = 0;
void TargetLibraryInfoWrapperPass::anchor() {}
Index: vendor/llvm/dist-release_80/lib/MC/MCExpr.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/MC/MCExpr.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/MC/MCExpr.cpp (revision 344166)
@@ -1,901 +1,906 @@
//===- MCExpr.cpp - Assembly Level Expression Implementation --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCExpr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
#define DEBUG_TYPE "mcexpr"
namespace {
namespace stats {
STATISTIC(MCExprEvaluate, "Number of MCExpr evaluations");
} // end namespace stats
} // end anonymous namespace
void MCExpr::print(raw_ostream &OS, const MCAsmInfo *MAI, bool InParens) const {
switch (getKind()) {
case MCExpr::Target:
return cast<MCTargetExpr>(this)->printImpl(OS, MAI);
case MCExpr::Constant:
OS << cast<MCConstantExpr>(*this).getValue();
return;
case MCExpr::SymbolRef: {
const MCSymbolRefExpr &SRE = cast<MCSymbolRefExpr>(*this);
const MCSymbol &Sym = SRE.getSymbol();
// Parenthesize names that start with $ so that they don't look like
// absolute names.
bool UseParens =
!InParens && !Sym.getName().empty() && Sym.getName()[0] == '$';
if (UseParens) {
OS << '(';
Sym.print(OS, MAI);
OS << ')';
} else
Sym.print(OS, MAI);
if (SRE.getKind() != MCSymbolRefExpr::VK_None)
SRE.printVariantKind(OS);
return;
}
case MCExpr::Unary: {
const MCUnaryExpr &UE = cast<MCUnaryExpr>(*this);
switch (UE.getOpcode()) {
case MCUnaryExpr::LNot: OS << '!'; break;
case MCUnaryExpr::Minus: OS << '-'; break;
case MCUnaryExpr::Not: OS << '~'; break;
case MCUnaryExpr::Plus: OS << '+'; break;
}
bool Binary = UE.getSubExpr()->getKind() == MCExpr::Binary;
if (Binary) OS << "(";
UE.getSubExpr()->print(OS, MAI);
if (Binary) OS << ")";
return;
}
case MCExpr::Binary: {
const MCBinaryExpr &BE = cast<MCBinaryExpr>(*this);
// Only print parens around the LHS if it is non-trivial.
if (isa<MCConstantExpr>(BE.getLHS()) || isa<MCSymbolRefExpr>(BE.getLHS())) {
BE.getLHS()->print(OS, MAI);
} else {
OS << '(';
BE.getLHS()->print(OS, MAI);
OS << ')';
}
switch (BE.getOpcode()) {
case MCBinaryExpr::Add:
// Print "X-42" instead of "X+-42".
if (const MCConstantExpr *RHSC = dyn_cast<MCConstantExpr>(BE.getRHS())) {
if (RHSC->getValue() < 0) {
OS << RHSC->getValue();
return;
}
}
OS << '+';
break;
case MCBinaryExpr::AShr: OS << ">>"; break;
case MCBinaryExpr::And: OS << '&'; break;
case MCBinaryExpr::Div: OS << '/'; break;
case MCBinaryExpr::EQ: OS << "=="; break;
case MCBinaryExpr::GT: OS << '>'; break;
case MCBinaryExpr::GTE: OS << ">="; break;
case MCBinaryExpr::LAnd: OS << "&&"; break;
case MCBinaryExpr::LOr: OS << "||"; break;
case MCBinaryExpr::LShr: OS << ">>"; break;
case MCBinaryExpr::LT: OS << '<'; break;
case MCBinaryExpr::LTE: OS << "<="; break;
case MCBinaryExpr::Mod: OS << '%'; break;
case MCBinaryExpr::Mul: OS << '*'; break;
case MCBinaryExpr::NE: OS << "!="; break;
case MCBinaryExpr::Or: OS << '|'; break;
case MCBinaryExpr::Shl: OS << "<<"; break;
case MCBinaryExpr::Sub: OS << '-'; break;
case MCBinaryExpr::Xor: OS << '^'; break;
}
// Only print parens around the LHS if it is non-trivial.
if (isa<MCConstantExpr>(BE.getRHS()) || isa<MCSymbolRefExpr>(BE.getRHS())) {
BE.getRHS()->print(OS, MAI);
} else {
OS << '(';
BE.getRHS()->print(OS, MAI);
OS << ')';
}
return;
}
}
llvm_unreachable("Invalid expression kind!");
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MCExpr::dump() const {
dbgs() << *this;
dbgs() << '\n';
}
#endif
/* *** */
const MCBinaryExpr *MCBinaryExpr::create(Opcode Opc, const MCExpr *LHS,
const MCExpr *RHS, MCContext &Ctx,
SMLoc Loc) {
return new (Ctx) MCBinaryExpr(Opc, LHS, RHS, Loc);
}
const MCUnaryExpr *MCUnaryExpr::create(Opcode Opc, const MCExpr *Expr,
MCContext &Ctx, SMLoc Loc) {
return new (Ctx) MCUnaryExpr(Opc, Expr, Loc);
}
const MCConstantExpr *MCConstantExpr::create(int64_t Value, MCContext &Ctx) {
return new (Ctx) MCConstantExpr(Value);
}
/* *** */
MCSymbolRefExpr::MCSymbolRefExpr(const MCSymbol *Symbol, VariantKind Kind,
const MCAsmInfo *MAI, SMLoc Loc)
: MCExpr(MCExpr::SymbolRef, Loc), Kind(Kind),
UseParensForSymbolVariant(MAI->useParensForSymbolVariant()),
HasSubsectionsViaSymbols(MAI->hasSubsectionsViaSymbols()),
Symbol(Symbol) {
assert(Symbol);
}
const MCSymbolRefExpr *MCSymbolRefExpr::create(const MCSymbol *Sym,
VariantKind Kind,
MCContext &Ctx, SMLoc Loc) {
return new (Ctx) MCSymbolRefExpr(Sym, Kind, Ctx.getAsmInfo(), Loc);
}
const MCSymbolRefExpr *MCSymbolRefExpr::create(StringRef Name, VariantKind Kind,
MCContext &Ctx) {
return create(Ctx.getOrCreateSymbol(Name), Kind, Ctx);
}
StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
switch (Kind) {
case VK_Invalid: return "<<invalid>>";
case VK_None: return "<<none>>";
case VK_DTPOFF: return "DTPOFF";
case VK_DTPREL: return "DTPREL";
case VK_GOT: return "GOT";
case VK_GOTOFF: return "GOTOFF";
case VK_GOTREL: return "GOTREL";
case VK_GOTPCREL: return "GOTPCREL";
case VK_GOTTPOFF: return "GOTTPOFF";
case VK_INDNTPOFF: return "INDNTPOFF";
case VK_NTPOFF: return "NTPOFF";
case VK_GOTNTPOFF: return "GOTNTPOFF";
case VK_PLT: return "PLT";
case VK_TLSGD: return "TLSGD";
case VK_TLSLD: return "TLSLD";
case VK_TLSLDM: return "TLSLDM";
case VK_TPOFF: return "TPOFF";
case VK_TPREL: return "TPREL";
case VK_TLSCALL: return "tlscall";
case VK_TLSDESC: return "tlsdesc";
case VK_TLVP: return "TLVP";
case VK_TLVPPAGE: return "TLVPPAGE";
case VK_TLVPPAGEOFF: return "TLVPPAGEOFF";
case VK_PAGE: return "PAGE";
case VK_PAGEOFF: return "PAGEOFF";
case VK_GOTPAGE: return "GOTPAGE";
case VK_GOTPAGEOFF: return "GOTPAGEOFF";
case VK_SECREL: return "SECREL32";
case VK_SIZE: return "SIZE";
case VK_WEAKREF: return "WEAKREF";
case VK_X86_ABS8: return "ABS8";
case VK_ARM_NONE: return "none";
case VK_ARM_GOT_PREL: return "GOT_PREL";
case VK_ARM_TARGET1: return "target1";
case VK_ARM_TARGET2: return "target2";
case VK_ARM_PREL31: return "prel31";
case VK_ARM_SBREL: return "sbrel";
case VK_ARM_TLSLDO: return "tlsldo";
case VK_ARM_TLSDESCSEQ: return "tlsdescseq";
case VK_AVR_NONE: return "none";
case VK_AVR_LO8: return "lo8";
case VK_AVR_HI8: return "hi8";
case VK_AVR_HLO8: return "hlo8";
case VK_AVR_DIFF8: return "diff8";
case VK_AVR_DIFF16: return "diff16";
case VK_AVR_DIFF32: return "diff32";
case VK_PPC_LO: return "l";
case VK_PPC_HI: return "h";
case VK_PPC_HA: return "ha";
case VK_PPC_HIGH: return "high";
case VK_PPC_HIGHA: return "higha";
case VK_PPC_HIGHER: return "higher";
case VK_PPC_HIGHERA: return "highera";
case VK_PPC_HIGHEST: return "highest";
case VK_PPC_HIGHESTA: return "highesta";
case VK_PPC_GOT_LO: return "got@l";
case VK_PPC_GOT_HI: return "got@h";
case VK_PPC_GOT_HA: return "got@ha";
case VK_PPC_TOCBASE: return "tocbase";
case VK_PPC_TOC: return "toc";
case VK_PPC_TOC_LO: return "toc@l";
case VK_PPC_TOC_HI: return "toc@h";
case VK_PPC_TOC_HA: return "toc@ha";
case VK_PPC_DTPMOD: return "dtpmod";
case VK_PPC_TPREL_LO: return "tprel@l";
case VK_PPC_TPREL_HI: return "tprel@h";
case VK_PPC_TPREL_HA: return "tprel@ha";
case VK_PPC_TPREL_HIGH: return "tprel@high";
case VK_PPC_TPREL_HIGHA: return "tprel@higha";
case VK_PPC_TPREL_HIGHER: return "tprel@higher";
case VK_PPC_TPREL_HIGHERA: return "tprel@highera";
case VK_PPC_TPREL_HIGHEST: return "tprel@highest";
case VK_PPC_TPREL_HIGHESTA: return "tprel@highesta";
case VK_PPC_DTPREL_LO: return "dtprel@l";
case VK_PPC_DTPREL_HI: return "dtprel@h";
case VK_PPC_DTPREL_HA: return "dtprel@ha";
case VK_PPC_DTPREL_HIGH: return "dtprel@high";
case VK_PPC_DTPREL_HIGHA: return "dtprel@higha";
case VK_PPC_DTPREL_HIGHER: return "dtprel@higher";
case VK_PPC_DTPREL_HIGHERA: return "dtprel@highera";
case VK_PPC_DTPREL_HIGHEST: return "dtprel@highest";
case VK_PPC_DTPREL_HIGHESTA: return "dtprel@highesta";
case VK_PPC_GOT_TPREL: return "got@tprel";
case VK_PPC_GOT_TPREL_LO: return "got@tprel@l";
case VK_PPC_GOT_TPREL_HI: return "got@tprel@h";
case VK_PPC_GOT_TPREL_HA: return "got@tprel@ha";
case VK_PPC_GOT_DTPREL: return "got@dtprel";
case VK_PPC_GOT_DTPREL_LO: return "got@dtprel@l";
case VK_PPC_GOT_DTPREL_HI: return "got@dtprel@h";
case VK_PPC_GOT_DTPREL_HA: return "got@dtprel@ha";
case VK_PPC_TLS: return "tls";
case VK_PPC_GOT_TLSGD: return "got@tlsgd";
case VK_PPC_GOT_TLSGD_LO: return "got@tlsgd@l";
case VK_PPC_GOT_TLSGD_HI: return "got@tlsgd@h";
case VK_PPC_GOT_TLSGD_HA: return "got@tlsgd@ha";
case VK_PPC_TLSGD: return "tlsgd";
case VK_PPC_GOT_TLSLD: return "got@tlsld";
case VK_PPC_GOT_TLSLD_LO: return "got@tlsld@l";
case VK_PPC_GOT_TLSLD_HI: return "got@tlsld@h";
case VK_PPC_GOT_TLSLD_HA: return "got@tlsld@ha";
case VK_PPC_TLSLD: return "tlsld";
case VK_PPC_LOCAL: return "local";
case VK_COFF_IMGREL32: return "IMGREL";
case VK_Hexagon_PCREL: return "PCREL";
case VK_Hexagon_LO16: return "LO16";
case VK_Hexagon_HI16: return "HI16";
case VK_Hexagon_GPREL: return "GPREL";
case VK_Hexagon_GD_GOT: return "GDGOT";
case VK_Hexagon_LD_GOT: return "LDGOT";
case VK_Hexagon_GD_PLT: return "GDPLT";
case VK_Hexagon_LD_PLT: return "LDPLT";
case VK_Hexagon_IE: return "IE";
case VK_Hexagon_IE_GOT: return "IEGOT";
case VK_WebAssembly_FUNCTION: return "FUNCTION";
case VK_WebAssembly_GLOBAL: return "GLOBAL";
case VK_WebAssembly_TYPEINDEX: return "TYPEINDEX";
case VK_WebAssembly_EVENT: return "EVENT";
case VK_AMDGPU_GOTPCREL32_LO: return "gotpcrel32@lo";
case VK_AMDGPU_GOTPCREL32_HI: return "gotpcrel32@hi";
case VK_AMDGPU_REL32_LO: return "rel32@lo";
case VK_AMDGPU_REL32_HI: return "rel32@hi";
case VK_AMDGPU_REL64: return "rel64";
}
llvm_unreachable("Invalid variant kind");
}
MCSymbolRefExpr::VariantKind
MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
return StringSwitch<VariantKind>(Name.lower())
.Case("dtprel", VK_DTPREL)
.Case("dtpoff", VK_DTPOFF)
.Case("got", VK_GOT)
.Case("gotoff", VK_GOTOFF)
.Case("gotrel", VK_GOTREL)
.Case("gotpcrel", VK_GOTPCREL)
.Case("gottpoff", VK_GOTTPOFF)
.Case("indntpoff", VK_INDNTPOFF)
.Case("ntpoff", VK_NTPOFF)
.Case("gotntpoff", VK_GOTNTPOFF)
.Case("plt", VK_PLT)
.Case("tlscall", VK_TLSCALL)
.Case("tlsdesc", VK_TLSDESC)
.Case("tlsgd", VK_TLSGD)
.Case("tlsld", VK_TLSLD)
.Case("tlsldm", VK_TLSLDM)
.Case("tpoff", VK_TPOFF)
.Case("tprel", VK_TPREL)
.Case("tlvp", VK_TLVP)
.Case("tlvppage", VK_TLVPPAGE)
.Case("tlvppageoff", VK_TLVPPAGEOFF)
.Case("page", VK_PAGE)
.Case("pageoff", VK_PAGEOFF)
.Case("gotpage", VK_GOTPAGE)
.Case("gotpageoff", VK_GOTPAGEOFF)
.Case("imgrel", VK_COFF_IMGREL32)
.Case("secrel32", VK_SECREL)
.Case("size", VK_SIZE)
.Case("abs8", VK_X86_ABS8)
.Case("l", VK_PPC_LO)
.Case("h", VK_PPC_HI)
.Case("ha", VK_PPC_HA)
.Case("high", VK_PPC_HIGH)
.Case("higha", VK_PPC_HIGHA)
.Case("higher", VK_PPC_HIGHER)
.Case("highera", VK_PPC_HIGHERA)
.Case("highest", VK_PPC_HIGHEST)
.Case("highesta", VK_PPC_HIGHESTA)
.Case("got@l", VK_PPC_GOT_LO)
.Case("got@h", VK_PPC_GOT_HI)
.Case("got@ha", VK_PPC_GOT_HA)
.Case("local", VK_PPC_LOCAL)
.Case("tocbase", VK_PPC_TOCBASE)
.Case("toc", VK_PPC_TOC)
.Case("toc@l", VK_PPC_TOC_LO)
.Case("toc@h", VK_PPC_TOC_HI)
.Case("toc@ha", VK_PPC_TOC_HA)
.Case("tls", VK_PPC_TLS)
.Case("dtpmod", VK_PPC_DTPMOD)
.Case("tprel@l", VK_PPC_TPREL_LO)
.Case("tprel@h", VK_PPC_TPREL_HI)
.Case("tprel@ha", VK_PPC_TPREL_HA)
.Case("tprel@high", VK_PPC_TPREL_HIGH)
.Case("tprel@higha", VK_PPC_TPREL_HIGHA)
.Case("tprel@higher", VK_PPC_TPREL_HIGHER)
.Case("tprel@highera", VK_PPC_TPREL_HIGHERA)
.Case("tprel@highest", VK_PPC_TPREL_HIGHEST)
.Case("tprel@highesta", VK_PPC_TPREL_HIGHESTA)
.Case("dtprel@l", VK_PPC_DTPREL_LO)
.Case("dtprel@h", VK_PPC_DTPREL_HI)
.Case("dtprel@ha", VK_PPC_DTPREL_HA)
.Case("dtprel@high", VK_PPC_DTPREL_HIGH)
.Case("dtprel@higha", VK_PPC_DTPREL_HIGHA)
.Case("dtprel@higher", VK_PPC_DTPREL_HIGHER)
.Case("dtprel@highera", VK_PPC_DTPREL_HIGHERA)
.Case("dtprel@highest", VK_PPC_DTPREL_HIGHEST)
.Case("dtprel@highesta", VK_PPC_DTPREL_HIGHESTA)
.Case("got@tprel", VK_PPC_GOT_TPREL)
.Case("got@tprel@l", VK_PPC_GOT_TPREL_LO)
.Case("got@tprel@h", VK_PPC_GOT_TPREL_HI)
.Case("got@tprel@ha", VK_PPC_GOT_TPREL_HA)
.Case("got@dtprel", VK_PPC_GOT_DTPREL)
.Case("got@dtprel@l", VK_PPC_GOT_DTPREL_LO)
.Case("got@dtprel@h", VK_PPC_GOT_DTPREL_HI)
.Case("got@dtprel@ha", VK_PPC_GOT_DTPREL_HA)
.Case("got@tlsgd", VK_PPC_GOT_TLSGD)
.Case("got@tlsgd@l", VK_PPC_GOT_TLSGD_LO)
.Case("got@tlsgd@h", VK_PPC_GOT_TLSGD_HI)
.Case("got@tlsgd@ha", VK_PPC_GOT_TLSGD_HA)
.Case("got@tlsld", VK_PPC_GOT_TLSLD)
.Case("got@tlsld@l", VK_PPC_GOT_TLSLD_LO)
.Case("got@tlsld@h", VK_PPC_GOT_TLSLD_HI)
.Case("got@tlsld@ha", VK_PPC_GOT_TLSLD_HA)
.Case("gdgot", VK_Hexagon_GD_GOT)
.Case("gdplt", VK_Hexagon_GD_PLT)
.Case("iegot", VK_Hexagon_IE_GOT)
.Case("ie", VK_Hexagon_IE)
.Case("ldgot", VK_Hexagon_LD_GOT)
.Case("ldplt", VK_Hexagon_LD_PLT)
.Case("pcrel", VK_Hexagon_PCREL)
.Case("none", VK_ARM_NONE)
.Case("got_prel", VK_ARM_GOT_PREL)
.Case("target1", VK_ARM_TARGET1)
.Case("target2", VK_ARM_TARGET2)
.Case("prel31", VK_ARM_PREL31)
.Case("sbrel", VK_ARM_SBREL)
.Case("tlsldo", VK_ARM_TLSLDO)
.Case("lo8", VK_AVR_LO8)
.Case("hi8", VK_AVR_HI8)
.Case("hlo8", VK_AVR_HLO8)
.Case("function", VK_WebAssembly_FUNCTION)
.Case("global", VK_WebAssembly_GLOBAL)
.Case("typeindex", VK_WebAssembly_TYPEINDEX)
.Case("event", VK_WebAssembly_EVENT)
.Case("gotpcrel32@lo", VK_AMDGPU_GOTPCREL32_LO)
.Case("gotpcrel32@hi", VK_AMDGPU_GOTPCREL32_HI)
.Case("rel32@lo", VK_AMDGPU_REL32_LO)
.Case("rel32@hi", VK_AMDGPU_REL32_HI)
.Case("rel64", VK_AMDGPU_REL64)
.Default(VK_Invalid);
}
void MCSymbolRefExpr::printVariantKind(raw_ostream &OS) const {
if (UseParensForSymbolVariant)
OS << '(' << MCSymbolRefExpr::getVariantKindName(getKind()) << ')';
else
OS << '@' << MCSymbolRefExpr::getVariantKindName(getKind());
}
/* *** */
void MCTargetExpr::anchor() {}
/* *** */
bool MCExpr::evaluateAsAbsolute(int64_t &Res) const {
return evaluateAsAbsolute(Res, nullptr, nullptr, nullptr);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res,
const MCAsmLayout &Layout) const {
return evaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, nullptr);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res,
const MCAsmLayout &Layout,
const SectionAddrMap &Addrs) const {
return evaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, &Addrs);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const {
return evaluateAsAbsolute(Res, &Asm, nullptr, nullptr);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm) const {
return evaluateAsAbsolute(Res, Asm, nullptr, nullptr);
}
bool MCExpr::evaluateKnownAbsolute(int64_t &Res,
const MCAsmLayout &Layout) const {
return evaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, nullptr,
true);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
const SectionAddrMap *Addrs) const {
// FIXME: The use if InSet = Addrs is a hack. Setting InSet causes us
// absolutize differences across sections and that is what the MachO writer
// uses Addrs for.
return evaluateAsAbsolute(Res, Asm, Layout, Addrs, Addrs);
}
bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
const SectionAddrMap *Addrs, bool InSet) const {
MCValue Value;
// Fast path constants.
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(this)) {
Res = CE->getValue();
return true;
}
bool IsRelocatable =
evaluateAsRelocatableImpl(Value, Asm, Layout, nullptr, Addrs, InSet);
// Record the current value.
Res = Value.getConstant();
return IsRelocatable && Value.isAbsolute();
}
/// Helper method for \see EvaluateSymbolAdd().
static void AttemptToFoldSymbolOffsetDifference(
const MCAssembler *Asm, const MCAsmLayout *Layout,
const SectionAddrMap *Addrs, bool InSet, const MCSymbolRefExpr *&A,
const MCSymbolRefExpr *&B, int64_t &Addend) {
if (!A || !B)
return;
const MCSymbol &SA = A->getSymbol();
const MCSymbol &SB = B->getSymbol();
if (SA.isUndefined() || SB.isUndefined())
return;
if (!Asm->getWriter().isSymbolRefDifferenceFullyResolved(*Asm, A, B, InSet))
return;
if (SA.getFragment() == SB.getFragment() && !SA.isVariable() &&
!SA.isUnset() && !SB.isVariable() && !SB.isUnset()) {
Addend += (SA.getOffset() - SB.getOffset());
// Pointers to Thumb symbols need to have their low-bit set to allow
// for interworking.
if (Asm->isThumbFunc(&SA))
Addend |= 1;
// If symbol is labeled as micromips, we set low-bit to ensure
// correct offset in .gcc_except_table
if (Asm->getBackend().isMicroMips(&SA))
Addend |= 1;
// Clear the symbol expr pointers to indicate we have folded these
// operands.
A = B = nullptr;
return;
}
if (!Layout)
return;
const MCSection &SecA = *SA.getFragment()->getParent();
const MCSection &SecB = *SB.getFragment()->getParent();
if ((&SecA != &SecB) && !Addrs)
return;
// Eagerly evaluate.
Addend += Layout->getSymbolOffset(A->getSymbol()) -
Layout->getSymbolOffset(B->getSymbol());
if (Addrs && (&SecA != &SecB))
Addend += (Addrs->lookup(&SecA) - Addrs->lookup(&SecB));
// Pointers to Thumb symbols need to have their low-bit set to allow
// for interworking.
if (Asm->isThumbFunc(&SA))
Addend |= 1;
+ // If symbol is labeled as micromips, we set low-bit to ensure
+ // correct offset in .gcc_except_table
+ if (Asm->getBackend().isMicroMips(&SA))
+ Addend |= 1;
+
// Clear the symbol expr pointers to indicate we have folded these
// operands.
A = B = nullptr;
}
/// Evaluate the result of an add between (conceptually) two MCValues.
///
/// This routine conceptually attempts to construct an MCValue:
/// Result = (Result_A - Result_B + Result_Cst)
/// from two MCValue's LHS and RHS where
/// Result = LHS + RHS
/// and
/// Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst).
///
/// This routine attempts to aggresively fold the operands such that the result
/// is representable in an MCValue, but may not always succeed.
///
/// \returns True on success, false if the result is not representable in an
/// MCValue.
/// NOTE: It is really important to have both the Asm and Layout arguments.
/// They might look redundant, but this function can be used before layout
/// is done (see the object streamer for example) and having the Asm argument
/// lets us avoid relaxations early.
static bool
EvaluateSymbolicAdd(const MCAssembler *Asm, const MCAsmLayout *Layout,
const SectionAddrMap *Addrs, bool InSet, const MCValue &LHS,
const MCSymbolRefExpr *RHS_A, const MCSymbolRefExpr *RHS_B,
int64_t RHS_Cst, MCValue &Res) {
// FIXME: This routine (and other evaluation parts) are *incredibly* sloppy
// about dealing with modifiers. This will ultimately bite us, one day.
const MCSymbolRefExpr *LHS_A = LHS.getSymA();
const MCSymbolRefExpr *LHS_B = LHS.getSymB();
int64_t LHS_Cst = LHS.getConstant();
// Fold the result constant immediately.
int64_t Result_Cst = LHS_Cst + RHS_Cst;
assert((!Layout || Asm) &&
"Must have an assembler object if layout is given!");
// If we have a layout, we can fold resolved differences. Do not do this if
// the backend requires this to be emitted as individual relocations, unless
// the InSet flag is set to get the current difference anyway (used for
// example to calculate symbol sizes).
if (Asm &&
(InSet || !Asm->getBackend().requiresDiffExpressionRelocations())) {
// First, fold out any differences which are fully resolved. By
// reassociating terms in
// Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst).
// we have the four possible differences:
// (LHS_A - LHS_B),
// (LHS_A - RHS_B),
// (RHS_A - LHS_B),
// (RHS_A - RHS_B).
// Since we are attempting to be as aggressive as possible about folding, we
// attempt to evaluate each possible alternative.
AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, LHS_B,
Result_Cst);
AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, RHS_B,
Result_Cst);
AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, LHS_B,
Result_Cst);
AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, RHS_B,
Result_Cst);
}
// We can't represent the addition or subtraction of two symbols.
if ((LHS_A && RHS_A) || (LHS_B && RHS_B))
return false;
// At this point, we have at most one additive symbol and one subtractive
// symbol -- find them.
const MCSymbolRefExpr *A = LHS_A ? LHS_A : RHS_A;
const MCSymbolRefExpr *B = LHS_B ? LHS_B : RHS_B;
Res = MCValue::get(A, B, Result_Cst);
return true;
}
bool MCExpr::evaluateAsRelocatable(MCValue &Res,
const MCAsmLayout *Layout,
const MCFixup *Fixup) const {
MCAssembler *Assembler = Layout ? &Layout->getAssembler() : nullptr;
return evaluateAsRelocatableImpl(Res, Assembler, Layout, Fixup, nullptr,
false);
}
bool MCExpr::evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const {
MCAssembler *Assembler = &Layout.getAssembler();
return evaluateAsRelocatableImpl(Res, Assembler, &Layout, nullptr, nullptr,
true);
}
static bool canExpand(const MCSymbol &Sym, bool InSet) {
const MCExpr *Expr = Sym.getVariableValue();
const auto *Inner = dyn_cast<MCSymbolRefExpr>(Expr);
if (Inner) {
if (Inner->getKind() == MCSymbolRefExpr::VK_WEAKREF)
return false;
}
if (InSet)
return true;
return !Sym.isInSection();
}
bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
const MCFixup *Fixup,
const SectionAddrMap *Addrs,
bool InSet) const {
++stats::MCExprEvaluate;
switch (getKind()) {
case Target:
return cast<MCTargetExpr>(this)->evaluateAsRelocatableImpl(Res, Layout,
Fixup);
case Constant:
Res = MCValue::get(cast<MCConstantExpr>(this)->getValue());
return true;
case SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
const MCSymbol &Sym = SRE->getSymbol();
// Evaluate recursively if this is a variable.
if (Sym.isVariable() && SRE->getKind() == MCSymbolRefExpr::VK_None &&
canExpand(Sym, InSet)) {
bool IsMachO = SRE->hasSubsectionsViaSymbols();
if (Sym.getVariableValue()->evaluateAsRelocatableImpl(
Res, Asm, Layout, Fixup, Addrs, InSet || IsMachO)) {
if (!IsMachO)
return true;
const MCSymbolRefExpr *A = Res.getSymA();
const MCSymbolRefExpr *B = Res.getSymB();
// FIXME: This is small hack. Given
// a = b + 4
// .long a
// the OS X assembler will completely drop the 4. We should probably
// include it in the relocation or produce an error if that is not
// possible.
// Allow constant expressions.
if (!A && !B)
return true;
// Allows aliases with zero offset.
if (Res.getConstant() == 0 && (!A || !B))
return true;
}
}
Res = MCValue::get(SRE, nullptr, 0);
return true;
}
case Unary: {
const MCUnaryExpr *AUE = cast<MCUnaryExpr>(this);
MCValue Value;
if (!AUE->getSubExpr()->evaluateAsRelocatableImpl(Value, Asm, Layout, Fixup,
Addrs, InSet))
return false;
switch (AUE->getOpcode()) {
case MCUnaryExpr::LNot:
if (!Value.isAbsolute())
return false;
Res = MCValue::get(!Value.getConstant());
break;
case MCUnaryExpr::Minus:
/// -(a - b + const) ==> (b - a - const)
if (Value.getSymA() && !Value.getSymB())
return false;
// The cast avoids undefined behavior if the constant is INT64_MIN.
Res = MCValue::get(Value.getSymB(), Value.getSymA(),
-(uint64_t)Value.getConstant());
break;
case MCUnaryExpr::Not:
if (!Value.isAbsolute())
return false;
Res = MCValue::get(~Value.getConstant());
break;
case MCUnaryExpr::Plus:
Res = Value;
break;
}
return true;
}
case Binary: {
const MCBinaryExpr *ABE = cast<MCBinaryExpr>(this);
MCValue LHSValue, RHSValue;
if (!ABE->getLHS()->evaluateAsRelocatableImpl(LHSValue, Asm, Layout, Fixup,
Addrs, InSet) ||
!ABE->getRHS()->evaluateAsRelocatableImpl(RHSValue, Asm, Layout, Fixup,
Addrs, InSet)) {
// Check if both are Target Expressions, see if we can compare them.
if (const MCTargetExpr *L = dyn_cast<MCTargetExpr>(ABE->getLHS()))
if (const MCTargetExpr *R = cast<MCTargetExpr>(ABE->getRHS())) {
switch (ABE->getOpcode()) {
case MCBinaryExpr::EQ:
Res = MCValue::get((L->isEqualTo(R)) ? -1 : 0);
return true;
case MCBinaryExpr::NE:
Res = MCValue::get((R->isEqualTo(R)) ? 0 : -1);
return true;
default: break;
}
}
return false;
}
// We only support a few operations on non-constant expressions, handle
// those first.
if (!LHSValue.isAbsolute() || !RHSValue.isAbsolute()) {
switch (ABE->getOpcode()) {
default:
return false;
case MCBinaryExpr::Sub:
// Negate RHS and add.
// The cast avoids undefined behavior if the constant is INT64_MIN.
return EvaluateSymbolicAdd(Asm, Layout, Addrs, InSet, LHSValue,
RHSValue.getSymB(), RHSValue.getSymA(),
-(uint64_t)RHSValue.getConstant(), Res);
case MCBinaryExpr::Add:
return EvaluateSymbolicAdd(Asm, Layout, Addrs, InSet, LHSValue,
RHSValue.getSymA(), RHSValue.getSymB(),
RHSValue.getConstant(), Res);
}
}
// FIXME: We need target hooks for the evaluation. It may be limited in
// width, and gas defines the result of comparisons differently from
// Apple as.
int64_t LHS = LHSValue.getConstant(), RHS = RHSValue.getConstant();
int64_t Result = 0;
auto Op = ABE->getOpcode();
switch (Op) {
case MCBinaryExpr::AShr: Result = LHS >> RHS; break;
case MCBinaryExpr::Add: Result = LHS + RHS; break;
case MCBinaryExpr::And: Result = LHS & RHS; break;
case MCBinaryExpr::Div:
case MCBinaryExpr::Mod:
// Handle division by zero. gas just emits a warning and keeps going,
// we try to be stricter.
// FIXME: Currently the caller of this function has no way to understand
// we're bailing out because of 'division by zero'. Therefore, it will
// emit a 'expected relocatable expression' error. It would be nice to
// change this code to emit a better diagnostic.
if (RHS == 0)
return false;
if (ABE->getOpcode() == MCBinaryExpr::Div)
Result = LHS / RHS;
else
Result = LHS % RHS;
break;
case MCBinaryExpr::EQ: Result = LHS == RHS; break;
case MCBinaryExpr::GT: Result = LHS > RHS; break;
case MCBinaryExpr::GTE: Result = LHS >= RHS; break;
case MCBinaryExpr::LAnd: Result = LHS && RHS; break;
case MCBinaryExpr::LOr: Result = LHS || RHS; break;
case MCBinaryExpr::LShr: Result = uint64_t(LHS) >> uint64_t(RHS); break;
case MCBinaryExpr::LT: Result = LHS < RHS; break;
case MCBinaryExpr::LTE: Result = LHS <= RHS; break;
case MCBinaryExpr::Mul: Result = LHS * RHS; break;
case MCBinaryExpr::NE: Result = LHS != RHS; break;
case MCBinaryExpr::Or: Result = LHS | RHS; break;
case MCBinaryExpr::Shl: Result = uint64_t(LHS) << uint64_t(RHS); break;
case MCBinaryExpr::Sub: Result = LHS - RHS; break;
case MCBinaryExpr::Xor: Result = LHS ^ RHS; break;
}
switch (Op) {
default:
Res = MCValue::get(Result);
break;
case MCBinaryExpr::EQ:
case MCBinaryExpr::GT:
case MCBinaryExpr::GTE:
case MCBinaryExpr::LT:
case MCBinaryExpr::LTE:
case MCBinaryExpr::NE:
// A comparison operator returns a -1 if true and 0 if false.
Res = MCValue::get(Result ? -1 : 0);
break;
}
return true;
}
}
llvm_unreachable("Invalid assembly expression kind!");
}
MCFragment *MCExpr::findAssociatedFragment() const {
switch (getKind()) {
case Target:
// We never look through target specific expressions.
return cast<MCTargetExpr>(this)->findAssociatedFragment();
case Constant:
return MCSymbol::AbsolutePseudoFragment;
case SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
const MCSymbol &Sym = SRE->getSymbol();
return Sym.getFragment();
}
case Unary:
return cast<MCUnaryExpr>(this)->getSubExpr()->findAssociatedFragment();
case Binary: {
const MCBinaryExpr *BE = cast<MCBinaryExpr>(this);
MCFragment *LHS_F = BE->getLHS()->findAssociatedFragment();
MCFragment *RHS_F = BE->getRHS()->findAssociatedFragment();
// If either is absolute, return the other.
if (LHS_F == MCSymbol::AbsolutePseudoFragment)
return RHS_F;
if (RHS_F == MCSymbol::AbsolutePseudoFragment)
return LHS_F;
// Not always correct, but probably the best we can do without more context.
if (BE->getOpcode() == MCBinaryExpr::Sub)
return MCSymbol::AbsolutePseudoFragment;
// Otherwise, return the first non-null fragment.
return LHS_F ? LHS_F : RHS_F;
}
}
llvm_unreachable("Invalid assembly expression kind!");
}
Index: vendor/llvm/dist-release_80/lib/MC/MCParser/AsmParser.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/MC/MCParser/AsmParser.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/MC/MCParser/AsmParser.cpp (revision 344166)
@@ -1,5935 +1,5936 @@
//===- AsmParser.cpp - Parser for Assembly Files --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class implements the parser for assembly files.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeView.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/AsmCond.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cctype>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
MCAsmParserSemaCallback::~MCAsmParserSemaCallback() = default;
static cl::opt<unsigned> AsmMacroMaxNestingDepth(
"asm-macro-max-nesting-depth", cl::init(20), cl::Hidden,
cl::desc("The maximum nesting depth allowed for assembly macros."));
namespace {
/// Helper types for tracking macro definitions.
typedef std::vector<AsmToken> MCAsmMacroArgument;
typedef std::vector<MCAsmMacroArgument> MCAsmMacroArguments;
/// Helper class for storing information about an active macro
/// instantiation.
struct MacroInstantiation {
/// The location of the instantiation.
SMLoc InstantiationLoc;
/// The buffer where parsing should resume upon instantiation completion.
int ExitBuffer;
/// The location where parsing should resume upon instantiation completion.
SMLoc ExitLoc;
/// The depth of TheCondStack at the start of the instantiation.
size_t CondStackDepth;
public:
MacroInstantiation(SMLoc IL, int EB, SMLoc EL, size_t CondStackDepth);
};
struct ParseStatementInfo {
/// The parsed operands from the last parsed statement.
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> ParsedOperands;
/// The opcode from the last parsed instruction.
unsigned Opcode = ~0U;
/// Was there an error parsing the inline assembly?
bool ParseError = false;
SmallVectorImpl<AsmRewrite> *AsmRewrites = nullptr;
ParseStatementInfo() = delete;
ParseStatementInfo(SmallVectorImpl<AsmRewrite> *rewrites)
: AsmRewrites(rewrites) {}
};
/// The concrete assembly parser instance.
class AsmParser : public MCAsmParser {
private:
AsmLexer Lexer;
MCContext &Ctx;
MCStreamer &Out;
const MCAsmInfo &MAI;
SourceMgr &SrcMgr;
SourceMgr::DiagHandlerTy SavedDiagHandler;
void *SavedDiagContext;
std::unique_ptr<MCAsmParserExtension> PlatformParser;
/// This is the current buffer index we're lexing from as managed by the
/// SourceMgr object.
unsigned CurBuffer;
AsmCond TheCondState;
std::vector<AsmCond> TheCondStack;
/// maps directive names to handler methods in parser
/// extensions. Extensions register themselves in this map by calling
/// addDirectiveHandler.
StringMap<ExtensionDirectiveHandler> ExtensionDirectiveMap;
/// Stack of active macro instantiations.
std::vector<MacroInstantiation*> ActiveMacros;
/// List of bodies of anonymous macros.
std::deque<MCAsmMacro> MacroLikeBodies;
/// Boolean tracking whether macro substitution is enabled.
unsigned MacrosEnabledFlag : 1;
/// Keeps track of how many .macro's have been instantiated.
unsigned NumOfMacroInstantiations;
/// The values from the last parsed cpp hash file line comment if any.
struct CppHashInfoTy {
StringRef Filename;
int64_t LineNumber = 0;
SMLoc Loc;
unsigned Buf = 0;
};
CppHashInfoTy CppHashInfo;
/// List of forward directional labels for diagnosis at the end.
SmallVector<std::tuple<SMLoc, CppHashInfoTy, MCSymbol *>, 4> DirLabels;
/// AssemblerDialect. ~OU means unset value and use value provided by MAI.
unsigned AssemblerDialect = ~0U;
/// is Darwin compatibility enabled?
bool IsDarwin = false;
/// Are we parsing ms-style inline assembly?
bool ParsingInlineAsm = false;
/// Did we already inform the user about inconsistent MD5 usage?
bool ReportedInconsistentMD5 = false;
// Is alt macro mode enabled.
bool AltMacroMode = false;
public:
AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI, unsigned CB);
AsmParser(const AsmParser &) = delete;
AsmParser &operator=(const AsmParser &) = delete;
~AsmParser() override;
bool Run(bool NoInitialTextSection, bool NoFinalize = false) override;
void addDirectiveHandler(StringRef Directive,
ExtensionDirectiveHandler Handler) override {
ExtensionDirectiveMap[Directive] = Handler;
}
void addAliasForDirective(StringRef Directive, StringRef Alias) override {
DirectiveKindMap[Directive] = DirectiveKindMap[Alias];
}
/// @name MCAsmParser Interface
/// {
SourceMgr &getSourceManager() override { return SrcMgr; }
MCAsmLexer &getLexer() override { return Lexer; }
MCContext &getContext() override { return Ctx; }
MCStreamer &getStreamer() override { return Out; }
CodeViewContext &getCVContext() { return Ctx.getCVContext(); }
unsigned getAssemblerDialect() override {
if (AssemblerDialect == ~0U)
return MAI.getAssemblerDialect();
else
return AssemblerDialect;
}
void setAssemblerDialect(unsigned i) override {
AssemblerDialect = i;
}
void Note(SMLoc L, const Twine &Msg, SMRange Range = None) override;
bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) override;
bool printError(SMLoc L, const Twine &Msg, SMRange Range = None) override;
const AsmToken &Lex() override;
void setParsingInlineAsm(bool V) override {
ParsingInlineAsm = V;
// When parsing MS inline asm, we must lex 0b1101 and 0ABCH as binary and
// hex integer literals.
Lexer.setLexMasmIntegers(V);
}
bool isParsingInlineAsm() override { return ParsingInlineAsm; }
bool parseMSInlineAsm(void *AsmLoc, std::string &AsmString,
unsigned &NumOutputs, unsigned &NumInputs,
SmallVectorImpl<std::pair<void *,bool>> &OpDecls,
SmallVectorImpl<std::string> &Constraints,
SmallVectorImpl<std::string> &Clobbers,
const MCInstrInfo *MII, const MCInstPrinter *IP,
MCAsmParserSemaCallback &SI) override;
bool parseExpression(const MCExpr *&Res);
bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc) override;
bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) override;
bool parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
SMLoc &EndLoc) override;
bool parseAbsoluteExpression(int64_t &Res) override;
/// Parse a floating point expression using the float \p Semantics
/// and set \p Res to the value.
bool parseRealValue(const fltSemantics &Semantics, APInt &Res);
/// Parse an identifier or string (as a quoted identifier)
/// and set \p Res to the identifier contents.
bool parseIdentifier(StringRef &Res) override;
void eatToEndOfStatement() override;
bool checkForValidSection() override;
/// }
private:
bool parseStatement(ParseStatementInfo &Info,
MCAsmParserSemaCallback *SI);
bool parseCurlyBlockScope(SmallVectorImpl<AsmRewrite>& AsmStrRewrites);
bool parseCppHashLineFilenameComment(SMLoc L);
void checkForBadMacro(SMLoc DirectiveLoc, StringRef Name, StringRef Body,
ArrayRef<MCAsmMacroParameter> Parameters);
bool expandMacro(raw_svector_ostream &OS, StringRef Body,
ArrayRef<MCAsmMacroParameter> Parameters,
ArrayRef<MCAsmMacroArgument> A, bool EnableAtPseudoVariable,
SMLoc L);
/// Are macros enabled in the parser?
bool areMacrosEnabled() {return MacrosEnabledFlag;}
/// Control a flag in the parser that enables or disables macros.
void setMacrosEnabled(bool Flag) {MacrosEnabledFlag = Flag;}
/// Are we inside a macro instantiation?
bool isInsideMacroInstantiation() {return !ActiveMacros.empty();}
/// Handle entry to macro instantiation.
///
/// \param M The macro.
/// \param NameLoc Instantiation location.
bool handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc);
/// Handle exit from macro instantiation.
void handleMacroExit();
/// Extract AsmTokens for a macro argument.
bool parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg);
/// Parse all macro arguments for a given macro.
bool parseMacroArguments(const MCAsmMacro *M, MCAsmMacroArguments &A);
void printMacroInstantiations();
void printMessage(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Msg,
SMRange Range = None) const {
ArrayRef<SMRange> Ranges(Range);
SrcMgr.PrintMessage(Loc, Kind, Msg, Ranges);
}
static void DiagHandler(const SMDiagnostic &Diag, void *Context);
/// Should we emit DWARF describing this assembler source? (Returns false if
/// the source has .file directives, which means we don't want to generate
/// info describing the assembler source itself.)
bool enabledGenDwarfForAssembly();
/// Enter the specified file. This returns true on failure.
bool enterIncludeFile(const std::string &Filename);
/// Process the specified file for the .incbin directive.
/// This returns true on failure.
bool processIncbinFile(const std::string &Filename, int64_t Skip = 0,
const MCExpr *Count = nullptr, SMLoc Loc = SMLoc());
/// Reset the current lexer position to that given by \p Loc. The
/// current token is not set; clients should ensure Lex() is called
/// subsequently.
///
/// \param InBuffer If not 0, should be the known buffer id that contains the
/// location.
void jumpToLoc(SMLoc Loc, unsigned InBuffer = 0);
/// Parse up to the end of statement and a return the contents from the
/// current token until the end of the statement; the current token on exit
/// will be either the EndOfStatement or EOF.
StringRef parseStringToEndOfStatement() override;
/// Parse until the end of a statement or a comma is encountered,
/// return the contents from the current token up to the end or comma.
StringRef parseStringToComma();
bool parseAssignment(StringRef Name, bool allow_redef,
bool NoDeadStrip = false);
unsigned getBinOpPrecedence(AsmToken::TokenKind K,
MCBinaryExpr::Opcode &Kind);
bool parseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc);
bool parseParenExpr(const MCExpr *&Res, SMLoc &EndLoc);
bool parseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc);
bool parseRegisterOrRegisterNumber(int64_t &Register, SMLoc DirectiveLoc);
bool parseCVFunctionId(int64_t &FunctionId, StringRef DirectiveName);
bool parseCVFileId(int64_t &FileId, StringRef DirectiveName);
// Generic (target and platform independent) directive parsing.
enum DirectiveKind {
DK_NO_DIRECTIVE, // Placeholder
DK_SET,
DK_EQU,
DK_EQUIV,
DK_ASCII,
DK_ASCIZ,
DK_STRING,
DK_BYTE,
DK_SHORT,
DK_RELOC,
DK_VALUE,
DK_2BYTE,
DK_LONG,
DK_INT,
DK_4BYTE,
DK_QUAD,
DK_8BYTE,
DK_OCTA,
DK_DC,
DK_DC_A,
DK_DC_B,
DK_DC_D,
DK_DC_L,
DK_DC_S,
DK_DC_W,
DK_DC_X,
DK_DCB,
DK_DCB_B,
DK_DCB_D,
DK_DCB_L,
DK_DCB_S,
DK_DCB_W,
DK_DCB_X,
DK_DS,
DK_DS_B,
DK_DS_D,
DK_DS_L,
DK_DS_P,
DK_DS_S,
DK_DS_W,
DK_DS_X,
DK_SINGLE,
DK_FLOAT,
DK_DOUBLE,
DK_ALIGN,
DK_ALIGN32,
DK_BALIGN,
DK_BALIGNW,
DK_BALIGNL,
DK_P2ALIGN,
DK_P2ALIGNW,
DK_P2ALIGNL,
DK_ORG,
DK_FILL,
DK_ENDR,
DK_BUNDLE_ALIGN_MODE,
DK_BUNDLE_LOCK,
DK_BUNDLE_UNLOCK,
DK_ZERO,
DK_EXTERN,
DK_GLOBL,
DK_GLOBAL,
DK_LAZY_REFERENCE,
DK_NO_DEAD_STRIP,
DK_SYMBOL_RESOLVER,
DK_PRIVATE_EXTERN,
DK_REFERENCE,
DK_WEAK_DEFINITION,
DK_WEAK_REFERENCE,
DK_WEAK_DEF_CAN_BE_HIDDEN,
DK_COMM,
DK_COMMON,
DK_LCOMM,
DK_ABORT,
DK_INCLUDE,
DK_INCBIN,
DK_CODE16,
DK_CODE16GCC,
DK_REPT,
DK_IRP,
DK_IRPC,
DK_IF,
DK_IFEQ,
DK_IFGE,
DK_IFGT,
DK_IFLE,
DK_IFLT,
DK_IFNE,
DK_IFB,
DK_IFNB,
DK_IFC,
DK_IFEQS,
DK_IFNC,
DK_IFNES,
DK_IFDEF,
DK_IFNDEF,
DK_IFNOTDEF,
DK_ELSEIF,
DK_ELSE,
DK_ENDIF,
DK_SPACE,
DK_SKIP,
DK_FILE,
DK_LINE,
DK_LOC,
DK_STABS,
DK_CV_FILE,
DK_CV_FUNC_ID,
DK_CV_INLINE_SITE_ID,
DK_CV_LOC,
DK_CV_LINETABLE,
DK_CV_INLINE_LINETABLE,
DK_CV_DEF_RANGE,
DK_CV_STRINGTABLE,
DK_CV_STRING,
DK_CV_FILECHECKSUMS,
DK_CV_FILECHECKSUM_OFFSET,
DK_CV_FPO_DATA,
DK_CFI_SECTIONS,
DK_CFI_STARTPROC,
DK_CFI_ENDPROC,
DK_CFI_DEF_CFA,
DK_CFI_DEF_CFA_OFFSET,
DK_CFI_ADJUST_CFA_OFFSET,
DK_CFI_DEF_CFA_REGISTER,
DK_CFI_OFFSET,
DK_CFI_REL_OFFSET,
DK_CFI_PERSONALITY,
DK_CFI_LSDA,
DK_CFI_REMEMBER_STATE,
DK_CFI_RESTORE_STATE,
DK_CFI_SAME_VALUE,
DK_CFI_RESTORE,
DK_CFI_ESCAPE,
DK_CFI_RETURN_COLUMN,
DK_CFI_SIGNAL_FRAME,
DK_CFI_UNDEFINED,
DK_CFI_REGISTER,
DK_CFI_WINDOW_SAVE,
DK_CFI_B_KEY_FRAME,
DK_MACROS_ON,
DK_MACROS_OFF,
DK_ALTMACRO,
DK_NOALTMACRO,
DK_MACRO,
DK_EXITM,
DK_ENDM,
DK_ENDMACRO,
DK_PURGEM,
DK_SLEB128,
DK_ULEB128,
DK_ERR,
DK_ERROR,
DK_WARNING,
DK_PRINT,
DK_ADDRSIG,
DK_ADDRSIG_SYM,
DK_END
};
/// Maps directive name --> DirectiveKind enum, for
/// directives parsed by this class.
StringMap<DirectiveKind> DirectiveKindMap;
// ".ascii", ".asciz", ".string"
bool parseDirectiveAscii(StringRef IDVal, bool ZeroTerminated);
bool parseDirectiveReloc(SMLoc DirectiveLoc); // ".reloc"
bool parseDirectiveValue(StringRef IDVal,
unsigned Size); // ".byte", ".long", ...
bool parseDirectiveOctaValue(StringRef IDVal); // ".octa", ...
bool parseDirectiveRealValue(StringRef IDVal,
const fltSemantics &); // ".single", ...
bool parseDirectiveFill(); // ".fill"
bool parseDirectiveZero(); // ".zero"
// ".set", ".equ", ".equiv"
bool parseDirectiveSet(StringRef IDVal, bool allow_redef);
bool parseDirectiveOrg(); // ".org"
// ".align{,32}", ".p2align{,w,l}"
bool parseDirectiveAlign(bool IsPow2, unsigned ValueSize);
// ".file", ".line", ".loc", ".stabs"
bool parseDirectiveFile(SMLoc DirectiveLoc);
bool parseDirectiveLine();
bool parseDirectiveLoc();
bool parseDirectiveStabs();
// ".cv_file", ".cv_func_id", ".cv_inline_site_id", ".cv_loc", ".cv_linetable",
// ".cv_inline_linetable", ".cv_def_range", ".cv_string"
bool parseDirectiveCVFile();
bool parseDirectiveCVFuncId();
bool parseDirectiveCVInlineSiteId();
bool parseDirectiveCVLoc();
bool parseDirectiveCVLinetable();
bool parseDirectiveCVInlineLinetable();
bool parseDirectiveCVDefRange();
bool parseDirectiveCVString();
bool parseDirectiveCVStringTable();
bool parseDirectiveCVFileChecksums();
bool parseDirectiveCVFileChecksumOffset();
bool parseDirectiveCVFPOData();
// .cfi directives
bool parseDirectiveCFIRegister(SMLoc DirectiveLoc);
bool parseDirectiveCFIWindowSave();
bool parseDirectiveCFISections();
bool parseDirectiveCFIStartProc();
bool parseDirectiveCFIEndProc();
bool parseDirectiveCFIDefCfaOffset();
bool parseDirectiveCFIDefCfa(SMLoc DirectiveLoc);
bool parseDirectiveCFIAdjustCfaOffset();
bool parseDirectiveCFIDefCfaRegister(SMLoc DirectiveLoc);
bool parseDirectiveCFIOffset(SMLoc DirectiveLoc);
bool parseDirectiveCFIRelOffset(SMLoc DirectiveLoc);
bool parseDirectiveCFIPersonalityOrLsda(bool IsPersonality);
bool parseDirectiveCFIRememberState();
bool parseDirectiveCFIRestoreState();
bool parseDirectiveCFISameValue(SMLoc DirectiveLoc);
bool parseDirectiveCFIRestore(SMLoc DirectiveLoc);
bool parseDirectiveCFIEscape();
bool parseDirectiveCFIReturnColumn(SMLoc DirectiveLoc);
bool parseDirectiveCFISignalFrame();
bool parseDirectiveCFIUndefined(SMLoc DirectiveLoc);
// macro directives
bool parseDirectivePurgeMacro(SMLoc DirectiveLoc);
bool parseDirectiveExitMacro(StringRef Directive);
bool parseDirectiveEndMacro(StringRef Directive);
bool parseDirectiveMacro(SMLoc DirectiveLoc);
bool parseDirectiveMacrosOnOff(StringRef Directive);
// alternate macro mode directives
bool parseDirectiveAltmacro(StringRef Directive);
// ".bundle_align_mode"
bool parseDirectiveBundleAlignMode();
// ".bundle_lock"
bool parseDirectiveBundleLock();
// ".bundle_unlock"
bool parseDirectiveBundleUnlock();
// ".space", ".skip"
bool parseDirectiveSpace(StringRef IDVal);
// ".dcb"
bool parseDirectiveDCB(StringRef IDVal, unsigned Size);
bool parseDirectiveRealDCB(StringRef IDVal, const fltSemantics &);
// ".ds"
bool parseDirectiveDS(StringRef IDVal, unsigned Size);
// .sleb128 (Signed=true) and .uleb128 (Signed=false)
bool parseDirectiveLEB128(bool Signed);
/// Parse a directive like ".globl" which
/// accepts a single symbol (which should be a label or an external).
bool parseDirectiveSymbolAttribute(MCSymbolAttr Attr);
bool parseDirectiveComm(bool IsLocal); // ".comm" and ".lcomm"
bool parseDirectiveAbort(); // ".abort"
bool parseDirectiveInclude(); // ".include"
bool parseDirectiveIncbin(); // ".incbin"
// ".if", ".ifeq", ".ifge", ".ifgt" , ".ifle", ".iflt" or ".ifne"
bool parseDirectiveIf(SMLoc DirectiveLoc, DirectiveKind DirKind);
// ".ifb" or ".ifnb", depending on ExpectBlank.
bool parseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank);
// ".ifc" or ".ifnc", depending on ExpectEqual.
bool parseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual);
// ".ifeqs" or ".ifnes", depending on ExpectEqual.
bool parseDirectiveIfeqs(SMLoc DirectiveLoc, bool ExpectEqual);
// ".ifdef" or ".ifndef", depending on expect_defined
bool parseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined);
bool parseDirectiveElseIf(SMLoc DirectiveLoc); // ".elseif"
bool parseDirectiveElse(SMLoc DirectiveLoc); // ".else"
bool parseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
bool parseEscapedString(std::string &Data) override;
const MCExpr *applyModifierToExpr(const MCExpr *E,
MCSymbolRefExpr::VariantKind Variant);
// Macro-like directives
MCAsmMacro *parseMacroLikeBody(SMLoc DirectiveLoc);
void instantiateMacroLikeBody(MCAsmMacro *M, SMLoc DirectiveLoc,
raw_svector_ostream &OS);
bool parseDirectiveRept(SMLoc DirectiveLoc, StringRef Directive);
bool parseDirectiveIrp(SMLoc DirectiveLoc); // ".irp"
bool parseDirectiveIrpc(SMLoc DirectiveLoc); // ".irpc"
bool parseDirectiveEndr(SMLoc DirectiveLoc); // ".endr"
// "_emit" or "__emit"
bool parseDirectiveMSEmit(SMLoc DirectiveLoc, ParseStatementInfo &Info,
size_t Len);
// "align"
bool parseDirectiveMSAlign(SMLoc DirectiveLoc, ParseStatementInfo &Info);
// "end"
bool parseDirectiveEnd(SMLoc DirectiveLoc);
// ".err" or ".error"
bool parseDirectiveError(SMLoc DirectiveLoc, bool WithMessage);
// ".warning"
bool parseDirectiveWarning(SMLoc DirectiveLoc);
// .print <double-quotes-string>
bool parseDirectivePrint(SMLoc DirectiveLoc);
// Directives to support address-significance tables.
bool parseDirectiveAddrsig();
bool parseDirectiveAddrsigSym();
void initializeDirectiveKindMap();
};
} // end anonymous namespace
namespace llvm {
extern MCAsmParserExtension *createDarwinAsmParser();
extern MCAsmParserExtension *createELFAsmParser();
extern MCAsmParserExtension *createCOFFAsmParser();
extern MCAsmParserExtension *createWasmAsmParser();
} // end namespace llvm
enum { DEFAULT_ADDRSPACE = 0 };
AsmParser::AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI, unsigned CB = 0)
: Lexer(MAI), Ctx(Ctx), Out(Out), MAI(MAI), SrcMgr(SM),
CurBuffer(CB ? CB : SM.getMainFileID()), MacrosEnabledFlag(true) {
HadError = false;
// Save the old handler.
SavedDiagHandler = SrcMgr.getDiagHandler();
SavedDiagContext = SrcMgr.getDiagContext();
// Set our own handler which calls the saved handler.
SrcMgr.setDiagHandler(DiagHandler, this);
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
// Initialize the platform / file format parser.
switch (Ctx.getObjectFileInfo()->getObjectFileType()) {
case MCObjectFileInfo::IsCOFF:
PlatformParser.reset(createCOFFAsmParser());
break;
case MCObjectFileInfo::IsMachO:
PlatformParser.reset(createDarwinAsmParser());
IsDarwin = true;
break;
case MCObjectFileInfo::IsELF:
PlatformParser.reset(createELFAsmParser());
break;
case MCObjectFileInfo::IsWasm:
PlatformParser.reset(createWasmAsmParser());
break;
}
PlatformParser->Initialize(*this);
initializeDirectiveKindMap();
NumOfMacroInstantiations = 0;
}
AsmParser::~AsmParser() {
assert((HadError || ActiveMacros.empty()) &&
"Unexpected active macro instantiation!");
// Restore the saved diagnostics handler and context for use during
// finalization.
SrcMgr.setDiagHandler(SavedDiagHandler, SavedDiagContext);
}
void AsmParser::printMacroInstantiations() {
// Print the active macro instantiation stack.
for (std::vector<MacroInstantiation *>::const_reverse_iterator
it = ActiveMacros.rbegin(),
ie = ActiveMacros.rend();
it != ie; ++it)
printMessage((*it)->InstantiationLoc, SourceMgr::DK_Note,
"while in macro instantiation");
}
void AsmParser::Note(SMLoc L, const Twine &Msg, SMRange Range) {
printPendingErrors();
printMessage(L, SourceMgr::DK_Note, Msg, Range);
printMacroInstantiations();
}
bool AsmParser::Warning(SMLoc L, const Twine &Msg, SMRange Range) {
if(getTargetParser().getTargetOptions().MCNoWarn)
return false;
if (getTargetParser().getTargetOptions().MCFatalWarnings)
return Error(L, Msg, Range);
printMessage(L, SourceMgr::DK_Warning, Msg, Range);
printMacroInstantiations();
return false;
}
bool AsmParser::printError(SMLoc L, const Twine &Msg, SMRange Range) {
HadError = true;
printMessage(L, SourceMgr::DK_Error, Msg, Range);
printMacroInstantiations();
return true;
}
bool AsmParser::enterIncludeFile(const std::string &Filename) {
std::string IncludedFile;
unsigned NewBuf =
SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
if (!NewBuf)
return true;
CurBuffer = NewBuf;
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
return false;
}
/// Process the specified .incbin file by searching for it in the include paths
/// then just emitting the byte contents of the file to the streamer. This
/// returns true on failure.
bool AsmParser::processIncbinFile(const std::string &Filename, int64_t Skip,
const MCExpr *Count, SMLoc Loc) {
std::string IncludedFile;
unsigned NewBuf =
SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
if (!NewBuf)
return true;
// Pick up the bytes from the file and emit them.
StringRef Bytes = SrcMgr.getMemoryBuffer(NewBuf)->getBuffer();
Bytes = Bytes.drop_front(Skip);
if (Count) {
int64_t Res;
if (!Count->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
return Error(Loc, "expected absolute expression");
if (Res < 0)
return Warning(Loc, "negative count has no effect");
Bytes = Bytes.take_front(Res);
}
getStreamer().EmitBytes(Bytes);
return false;
}
void AsmParser::jumpToLoc(SMLoc Loc, unsigned InBuffer) {
CurBuffer = InBuffer ? InBuffer : SrcMgr.FindBufferContainingLoc(Loc);
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(),
Loc.getPointer());
}
const AsmToken &AsmParser::Lex() {
if (Lexer.getTok().is(AsmToken::Error))
Error(Lexer.getErrLoc(), Lexer.getErr());
// if it's a end of statement with a comment in it
if (getTok().is(AsmToken::EndOfStatement)) {
// if this is a line comment output it.
if (!getTok().getString().empty() && getTok().getString().front() != '\n' &&
getTok().getString().front() != '\r' && MAI.preserveAsmComments())
Out.addExplicitComment(Twine(getTok().getString()));
}
const AsmToken *tok = &Lexer.Lex();
// Parse comments here to be deferred until end of next statement.
while (tok->is(AsmToken::Comment)) {
if (MAI.preserveAsmComments())
Out.addExplicitComment(Twine(tok->getString()));
tok = &Lexer.Lex();
}
if (tok->is(AsmToken::Eof)) {
// If this is the end of an included file, pop the parent file off the
// include stack.
SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
if (ParentIncludeLoc != SMLoc()) {
jumpToLoc(ParentIncludeLoc);
return Lex();
}
}
return *tok;
}
bool AsmParser::enabledGenDwarfForAssembly() {
// Check whether the user specified -g.
if (!getContext().getGenDwarfForAssembly())
return false;
// If we haven't encountered any .file directives (which would imply that
// the assembler source was produced with debug info already) then emit one
// describing the assembler source file itself.
if (getContext().getGenDwarfFileNumber() == 0)
getContext().setGenDwarfFileNumber(getStreamer().EmitDwarfFileDirective(
0, StringRef(), getContext().getMainFileName()));
return true;
}
bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// Create the initial section, if requested.
if (!NoInitialTextSection)
Out.InitSections(false);
// Prime the lexer.
Lex();
HadError = false;
AsmCond StartingCondState = TheCondState;
SmallVector<AsmRewrite, 4> AsmStrRewrites;
// If we are generating dwarf for assembly source files save the initial text
// section. (Don't use enabledGenDwarfForAssembly() here, as we aren't
// emitting any actual debug info yet and haven't had a chance to parse any
// embedded .file directives.)
if (getContext().getGenDwarfForAssembly()) {
MCSection *Sec = getStreamer().getCurrentSectionOnly();
if (!Sec->getBeginSymbol()) {
MCSymbol *SectionStartSym = getContext().createTempSymbol();
getStreamer().EmitLabel(SectionStartSym);
Sec->setBeginSymbol(SectionStartSym);
}
bool InsertResult = getContext().addGenDwarfSection(Sec);
assert(InsertResult && ".text section should not have debug info yet");
(void)InsertResult;
}
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
ParseStatementInfo Info(&AsmStrRewrites);
if (!parseStatement(Info, nullptr))
continue;
// If we have a Lexer Error we are on an Error Token. Load in Lexer Error
// for printing ErrMsg via Lex() only if no (presumably better) parser error
// exists.
if (!hasPendingError() && Lexer.getTok().is(AsmToken::Error)) {
Lex();
}
// parseStatement returned true so may need to emit an error.
printPendingErrors();
// Skipping to the next line if needed.
if (!getLexer().isAtStartOfStatement())
eatToEndOfStatement();
}
getTargetParser().onEndOfFile();
printPendingErrors();
// All errors should have been emitted.
assert(!hasPendingError() && "unexpected error from parseStatement");
getTargetParser().flushPendingInstructions(getStreamer());
if (TheCondState.TheCond != StartingCondState.TheCond ||
TheCondState.Ignore != StartingCondState.Ignore)
printError(getTok().getLoc(), "unmatched .ifs or .elses");
// Check to see there are no empty DwarfFile slots.
const auto &LineTables = getContext().getMCDwarfLineTables();
if (!LineTables.empty()) {
unsigned Index = 0;
for (const auto &File : LineTables.begin()->second.getMCDwarfFiles()) {
if (File.Name.empty() && Index != 0)
printError(getTok().getLoc(), "unassigned file number: " +
Twine(Index) +
" for .file directives");
++Index;
}
}
// Check to see that all assembler local symbols were actually defined.
// Targets that don't do subsections via symbols may not want this, though,
// so conservatively exclude them. Only do this if we're finalizing, though,
// as otherwise we won't necessarilly have seen everything yet.
if (!NoFinalize) {
if (MAI.hasSubsectionsViaSymbols()) {
for (const auto &TableEntry : getContext().getSymbols()) {
MCSymbol *Sym = TableEntry.getValue();
// Variable symbols may not be marked as defined, so check those
// explicitly. If we know it's a variable, we have a definition for
// the purposes of this check.
if (Sym->isTemporary() && !Sym->isVariable() && !Sym->isDefined())
// FIXME: We would really like to refer back to where the symbol was
// first referenced for a source location. We need to add something
// to track that. Currently, we just point to the end of the file.
printError(getTok().getLoc(), "assembler local symbol '" +
Sym->getName() + "' not defined");
}
}
// Temporary symbols like the ones for directional jumps don't go in the
// symbol table. They also need to be diagnosed in all (final) cases.
for (std::tuple<SMLoc, CppHashInfoTy, MCSymbol *> &LocSym : DirLabels) {
if (std::get<2>(LocSym)->isUndefined()) {
// Reset the state of any "# line file" directives we've seen to the
// context as it was at the diagnostic site.
CppHashInfo = std::get<1>(LocSym);
printError(std::get<0>(LocSym), "directional label undefined");
}
}
}
// Finalize the output stream if there are no errors and if the client wants
// us to.
if (!HadError && !NoFinalize)
Out.Finish();
return HadError || getContext().hadError();
}
bool AsmParser::checkForValidSection() {
if (!ParsingInlineAsm && !getStreamer().getCurrentSectionOnly()) {
Out.InitSections(false);
return Error(getTok().getLoc(),
"expected section directive before assembly directive");
}
return false;
}
/// Throw away the rest of the line for testing purposes.
void AsmParser::eatToEndOfStatement() {
while (Lexer.isNot(AsmToken::EndOfStatement) && Lexer.isNot(AsmToken::Eof))
Lexer.Lex();
// Eat EOL.
if (Lexer.is(AsmToken::EndOfStatement))
Lexer.Lex();
}
StringRef AsmParser::parseStringToEndOfStatement() {
const char *Start = getTok().getLoc().getPointer();
while (Lexer.isNot(AsmToken::EndOfStatement) && Lexer.isNot(AsmToken::Eof))
Lexer.Lex();
const char *End = getTok().getLoc().getPointer();
return StringRef(Start, End - Start);
}
StringRef AsmParser::parseStringToComma() {
const char *Start = getTok().getLoc().getPointer();
while (Lexer.isNot(AsmToken::EndOfStatement) &&
Lexer.isNot(AsmToken::Comma) && Lexer.isNot(AsmToken::Eof))
Lexer.Lex();
const char *End = getTok().getLoc().getPointer();
return StringRef(Start, End - Start);
}
/// Parse a paren expression and return it.
/// NOTE: This assumes the leading '(' has already been consumed.
///
/// parenexpr ::= expr)
///
bool AsmParser::parseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) {
if (parseExpression(Res))
return true;
if (Lexer.isNot(AsmToken::RParen))
return TokError("expected ')' in parentheses expression");
EndLoc = Lexer.getTok().getEndLoc();
Lex();
return false;
}
/// Parse a bracket expression and return it.
/// NOTE: This assumes the leading '[' has already been consumed.
///
/// bracketexpr ::= expr]
///
bool AsmParser::parseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc) {
if (parseExpression(Res))
return true;
EndLoc = getTok().getEndLoc();
if (parseToken(AsmToken::RBrac, "expected ']' in brackets expression"))
return true;
return false;
}
/// Parse a primary expression and return it.
/// primaryexpr ::= (parenexpr
/// primaryexpr ::= symbol
/// primaryexpr ::= number
/// primaryexpr ::= '.'
/// primaryexpr ::= ~,+,- primaryexpr
bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
SMLoc FirstTokenLoc = getLexer().getLoc();
AsmToken::TokenKind FirstTokenKind = Lexer.getKind();
switch (FirstTokenKind) {
default:
return TokError("unknown token in expression");
// If we have an error assume that we've already handled it.
case AsmToken::Error:
return true;
case AsmToken::Exclaim:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
Res = MCUnaryExpr::createLNot(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Dollar:
case AsmToken::At:
case AsmToken::String:
case AsmToken::Identifier: {
StringRef Identifier;
if (parseIdentifier(Identifier)) {
// We may have failed but $ may be a valid token.
if (getTok().is(AsmToken::Dollar)) {
if (Lexer.getMAI().getDollarIsPC()) {
Lex();
// This is a '$' reference, which references the current PC. Emit a
// temporary label to the streamer and refer to it.
MCSymbol *Sym = Ctx.createTempSymbol();
Out.EmitLabel(Sym);
Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None,
getContext());
EndLoc = FirstTokenLoc;
return false;
}
return Error(FirstTokenLoc, "invalid token in expression");
}
}
// Parse symbol variant
std::pair<StringRef, StringRef> Split;
if (!MAI.useParensForSymbolVariant()) {
if (FirstTokenKind == AsmToken::String) {
if (Lexer.is(AsmToken::At)) {
Lex(); // eat @
SMLoc AtLoc = getLexer().getLoc();
StringRef VName;
if (parseIdentifier(VName))
return Error(AtLoc, "expected symbol variant after '@'");
Split = std::make_pair(Identifier, VName);
}
} else {
Split = Identifier.split('@');
}
} else if (Lexer.is(AsmToken::LParen)) {
Lex(); // eat '('.
StringRef VName;
parseIdentifier(VName);
// eat ')'.
if (parseToken(AsmToken::RParen,
"unexpected token in variant, expected ')'"))
return true;
Split = std::make_pair(Identifier, VName);
}
EndLoc = SMLoc::getFromPointer(Identifier.end());
// This is a symbol reference.
StringRef SymbolName = Identifier;
if (SymbolName.empty())
return Error(getLexer().getLoc(), "expected a symbol reference");
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
// Lookup the symbol variant if used.
if (!Split.second.empty()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
if (Variant != MCSymbolRefExpr::VK_Invalid) {
SymbolName = Split.first;
} else if (MAI.doesAllowAtInName() && !MAI.useParensForSymbolVariant()) {
Variant = MCSymbolRefExpr::VK_None;
} else {
return Error(SMLoc::getFromPointer(Split.second.begin()),
"invalid variant '" + Split.second + "'");
}
}
MCSymbol *Sym = getContext().getOrCreateSymbol(SymbolName);
// If this is an absolute variable reference, substitute it now to preserve
// semantics in the face of reassignment.
if (Sym->isVariable()) {
auto V = Sym->getVariableValue(/*SetUsed*/ false);
bool DoInline = isa<MCConstantExpr>(V) && !Variant;
if (auto TV = dyn_cast<MCTargetExpr>(V))
DoInline = TV->inlineAssignedExpr();
if (DoInline) {
if (Variant)
return Error(EndLoc, "unexpected modifier on variable reference");
Res = Sym->getVariableValue(/*SetUsed*/ false);
return false;
}
}
// Otherwise create a symbol ref.
Res = MCSymbolRefExpr::create(Sym, Variant, getContext(), FirstTokenLoc);
return false;
}
case AsmToken::BigNum:
return TokError("literal value out of range for directive");
case AsmToken::Integer: {
SMLoc Loc = getTok().getLoc();
int64_t IntVal = getTok().getIntVal();
Res = MCConstantExpr::create(IntVal, getContext());
EndLoc = Lexer.getTok().getEndLoc();
Lex(); // Eat token.
// Look for 'b' or 'f' following an Integer as a directional label
if (Lexer.getKind() == AsmToken::Identifier) {
StringRef IDVal = getTok().getString();
// Lookup the symbol variant if used.
std::pair<StringRef, StringRef> Split = IDVal.split('@');
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
if (Split.first.size() != IDVal.size()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
if (Variant == MCSymbolRefExpr::VK_Invalid)
return TokError("invalid variant '" + Split.second + "'");
IDVal = Split.first;
}
if (IDVal == "f" || IDVal == "b") {
MCSymbol *Sym =
Ctx.getDirectionalLocalSymbol(IntVal, IDVal == "b");
Res = MCSymbolRefExpr::create(Sym, Variant, getContext());
if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "directional label undefined");
DirLabels.push_back(std::make_tuple(Loc, CppHashInfo, Sym));
EndLoc = Lexer.getTok().getEndLoc();
Lex(); // Eat identifier.
}
}
return false;
}
case AsmToken::Real: {
APFloat RealVal(APFloat::IEEEdouble(), getTok().getString());
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
Res = MCConstantExpr::create(IntVal, getContext());
EndLoc = Lexer.getTok().getEndLoc();
Lex(); // Eat token.
return false;
}
case AsmToken::Dot: {
// This is a '.' reference, which references the current PC. Emit a
// temporary label to the streamer and refer to it.
MCSymbol *Sym = Ctx.createTempSymbol();
Out.EmitLabel(Sym);
Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
EndLoc = Lexer.getTok().getEndLoc();
Lex(); // Eat identifier.
return false;
}
case AsmToken::LParen:
Lex(); // Eat the '('.
return parseParenExpr(Res, EndLoc);
case AsmToken::LBrac:
if (!PlatformParser->HasBracketExpressions())
return TokError("brackets expression not supported on this target");
Lex(); // Eat the '['.
return parseBracketExpr(Res, EndLoc);
case AsmToken::Minus:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
Res = MCUnaryExpr::createMinus(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Plus:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
Res = MCUnaryExpr::createPlus(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Tilde:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
Res = MCUnaryExpr::createNot(Res, getContext(), FirstTokenLoc);
return false;
// MIPS unary expression operators. The lexer won't generate these tokens if
// MCAsmInfo::HasMipsExpressions is false for the target.
case AsmToken::PercentCall16:
case AsmToken::PercentCall_Hi:
case AsmToken::PercentCall_Lo:
case AsmToken::PercentDtprel_Hi:
case AsmToken::PercentDtprel_Lo:
case AsmToken::PercentGot:
case AsmToken::PercentGot_Disp:
case AsmToken::PercentGot_Hi:
case AsmToken::PercentGot_Lo:
case AsmToken::PercentGot_Ofst:
case AsmToken::PercentGot_Page:
case AsmToken::PercentGottprel:
case AsmToken::PercentGp_Rel:
case AsmToken::PercentHi:
case AsmToken::PercentHigher:
case AsmToken::PercentHighest:
case AsmToken::PercentLo:
case AsmToken::PercentNeg:
case AsmToken::PercentPcrel_Hi:
case AsmToken::PercentPcrel_Lo:
case AsmToken::PercentTlsgd:
case AsmToken::PercentTlsldm:
case AsmToken::PercentTprel_Hi:
case AsmToken::PercentTprel_Lo:
Lex(); // Eat the operator.
if (Lexer.isNot(AsmToken::LParen))
return TokError("expected '(' after operator");
Lex(); // Eat the operator.
if (parseExpression(Res, EndLoc))
return true;
if (Lexer.isNot(AsmToken::RParen))
return TokError("expected ')'");
Lex(); // Eat the operator.
Res = getTargetParser().createTargetUnaryExpr(Res, FirstTokenKind, Ctx);
return !Res;
}
}
bool AsmParser::parseExpression(const MCExpr *&Res) {
SMLoc EndLoc;
return parseExpression(Res, EndLoc);
}
const MCExpr *
AsmParser::applyModifierToExpr(const MCExpr *E,
MCSymbolRefExpr::VariantKind Variant) {
// Ask the target implementation about this expression first.
const MCExpr *NewE = getTargetParser().applyModifierToExpr(E, Variant, Ctx);
if (NewE)
return NewE;
// Recurse over the given expression, rebuilding it to apply the given variant
// if there is exactly one symbol.
switch (E->getKind()) {
case MCExpr::Target:
case MCExpr::Constant:
return nullptr;
case MCExpr::SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
if (SRE->getKind() != MCSymbolRefExpr::VK_None) {
TokError("invalid variant on expression '" + getTok().getIdentifier() +
"' (already modified)");
return E;
}
return MCSymbolRefExpr::create(&SRE->getSymbol(), Variant, getContext());
}
case MCExpr::Unary: {
const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
const MCExpr *Sub = applyModifierToExpr(UE->getSubExpr(), Variant);
if (!Sub)
return nullptr;
return MCUnaryExpr::create(UE->getOpcode(), Sub, getContext());
}
case MCExpr::Binary: {
const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
const MCExpr *LHS = applyModifierToExpr(BE->getLHS(), Variant);
const MCExpr *RHS = applyModifierToExpr(BE->getRHS(), Variant);
if (!LHS && !RHS)
return nullptr;
if (!LHS)
LHS = BE->getLHS();
if (!RHS)
RHS = BE->getRHS();
return MCBinaryExpr::create(BE->getOpcode(), LHS, RHS, getContext());
}
}
llvm_unreachable("Invalid expression kind!");
}
/// This function checks if the next token is <string> type or arithmetic.
/// string that begin with character '<' must end with character '>'.
/// otherwise it is arithmetics.
/// If the function returns a 'true' value,
/// the End argument will be filled with the last location pointed to the '>'
/// character.
/// There is a gap between the AltMacro's documentation and the single quote
/// implementation. GCC does not fully support this feature and so we will not
/// support it.
/// TODO: Adding single quote as a string.
static bool isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc) {
assert((StrLoc.getPointer() != nullptr) &&
"Argument to the function cannot be a NULL value");
const char *CharPtr = StrLoc.getPointer();
while ((*CharPtr != '>') && (*CharPtr != '\n') && (*CharPtr != '\r') &&
(*CharPtr != '\0')) {
if (*CharPtr == '!')
CharPtr++;
CharPtr++;
}
if (*CharPtr == '>') {
EndLoc = StrLoc.getFromPointer(CharPtr + 1);
return true;
}
return false;
}
/// creating a string without the escape characters '!'.
static std::string altMacroString(StringRef AltMacroStr) {
std::string Res;
for (size_t Pos = 0; Pos < AltMacroStr.size(); Pos++) {
if (AltMacroStr[Pos] == '!')
Pos++;
Res += AltMacroStr[Pos];
}
return Res;
}
/// Parse an expression and return it.
///
/// expr ::= expr &&,|| expr -> lowest.
/// expr ::= expr |,^,&,! expr
/// expr ::= expr ==,!=,<>,<,<=,>,>= expr
/// expr ::= expr <<,>> expr
/// expr ::= expr +,- expr
/// expr ::= expr *,/,% expr -> highest.
/// expr ::= primaryexpr
///
bool AsmParser::parseExpression(const MCExpr *&Res, SMLoc &EndLoc) {
// Parse the expression.
Res = nullptr;
if (getTargetParser().parsePrimaryExpr(Res, EndLoc) ||
parseBinOpRHS(1, Res, EndLoc))
return true;
// As a special case, we support 'a op b @ modifier' by rewriting the
// expression to include the modifier. This is inefficient, but in general we
// expect users to use 'a@modifier op b'.
if (Lexer.getKind() == AsmToken::At) {
Lex();
if (Lexer.isNot(AsmToken::Identifier))
return TokError("unexpected symbol modifier following '@'");
MCSymbolRefExpr::VariantKind Variant =
MCSymbolRefExpr::getVariantKindForName(getTok().getIdentifier());
if (Variant == MCSymbolRefExpr::VK_Invalid)
return TokError("invalid variant '" + getTok().getIdentifier() + "'");
const MCExpr *ModifiedRes = applyModifierToExpr(Res, Variant);
if (!ModifiedRes) {
return TokError("invalid modifier '" + getTok().getIdentifier() +
"' (no symbols present)");
}
Res = ModifiedRes;
Lex();
}
// Try to constant fold it up front, if possible. Do not exploit
// assembler here.
int64_t Value;
if (Res->evaluateAsAbsolute(Value))
Res = MCConstantExpr::create(Value, getContext());
return false;
}
bool AsmParser::parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) {
Res = nullptr;
return parseParenExpr(Res, EndLoc) || parseBinOpRHS(1, Res, EndLoc);
}
bool AsmParser::parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
SMLoc &EndLoc) {
if (parseParenExpr(Res, EndLoc))
return true;
for (; ParenDepth > 0; --ParenDepth) {
if (parseBinOpRHS(1, Res, EndLoc))
return true;
// We don't Lex() the last RParen.
// This is the same behavior as parseParenExpression().
if (ParenDepth - 1 > 0) {
EndLoc = getTok().getEndLoc();
if (parseToken(AsmToken::RParen,
"expected ')' in parentheses expression"))
return true;
}
}
return false;
}
bool AsmParser::parseAbsoluteExpression(int64_t &Res) {
const MCExpr *Expr;
SMLoc StartLoc = Lexer.getLoc();
if (parseExpression(Expr))
return true;
if (!Expr->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
return Error(StartLoc, "expected absolute expression");
return false;
}
static unsigned getDarwinBinOpPrecedence(AsmToken::TokenKind K,
MCBinaryExpr::Opcode &Kind,
bool ShouldUseLogicalShr) {
switch (K) {
default:
return 0; // not a binop.
// Lowest Precedence: &&, ||
case AsmToken::AmpAmp:
Kind = MCBinaryExpr::LAnd;
return 1;
case AsmToken::PipePipe:
Kind = MCBinaryExpr::LOr;
return 1;
// Low Precedence: |, &, ^
//
// FIXME: gas seems to support '!' as an infix operator?
case AsmToken::Pipe:
Kind = MCBinaryExpr::Or;
return 2;
case AsmToken::Caret:
Kind = MCBinaryExpr::Xor;
return 2;
case AsmToken::Amp:
Kind = MCBinaryExpr::And;
return 2;
// Low Intermediate Precedence: ==, !=, <>, <, <=, >, >=
case AsmToken::EqualEqual:
Kind = MCBinaryExpr::EQ;
return 3;
case AsmToken::ExclaimEqual:
case AsmToken::LessGreater:
Kind = MCBinaryExpr::NE;
return 3;
case AsmToken::Less:
Kind = MCBinaryExpr::LT;
return 3;
case AsmToken::LessEqual:
Kind = MCBinaryExpr::LTE;
return 3;
case AsmToken::Greater:
Kind = MCBinaryExpr::GT;
return 3;
case AsmToken::GreaterEqual:
Kind = MCBinaryExpr::GTE;
return 3;
// Intermediate Precedence: <<, >>
case AsmToken::LessLess:
Kind = MCBinaryExpr::Shl;
return 4;
case AsmToken::GreaterGreater:
Kind = ShouldUseLogicalShr ? MCBinaryExpr::LShr : MCBinaryExpr::AShr;
return 4;
// High Intermediate Precedence: +, -
case AsmToken::Plus:
Kind = MCBinaryExpr::Add;
return 5;
case AsmToken::Minus:
Kind = MCBinaryExpr::Sub;
return 5;
// Highest Precedence: *, /, %
case AsmToken::Star:
Kind = MCBinaryExpr::Mul;
return 6;
case AsmToken::Slash:
Kind = MCBinaryExpr::Div;
return 6;
case AsmToken::Percent:
Kind = MCBinaryExpr::Mod;
return 6;
}
}
static unsigned getGNUBinOpPrecedence(AsmToken::TokenKind K,
MCBinaryExpr::Opcode &Kind,
bool ShouldUseLogicalShr) {
switch (K) {
default:
return 0; // not a binop.
// Lowest Precedence: &&, ||
case AsmToken::AmpAmp:
Kind = MCBinaryExpr::LAnd;
return 2;
case AsmToken::PipePipe:
Kind = MCBinaryExpr::LOr;
return 1;
// Low Precedence: ==, !=, <>, <, <=, >, >=
case AsmToken::EqualEqual:
Kind = MCBinaryExpr::EQ;
return 3;
case AsmToken::ExclaimEqual:
case AsmToken::LessGreater:
Kind = MCBinaryExpr::NE;
return 3;
case AsmToken::Less:
Kind = MCBinaryExpr::LT;
return 3;
case AsmToken::LessEqual:
Kind = MCBinaryExpr::LTE;
return 3;
case AsmToken::Greater:
Kind = MCBinaryExpr::GT;
return 3;
case AsmToken::GreaterEqual:
Kind = MCBinaryExpr::GTE;
return 3;
// Low Intermediate Precedence: +, -
case AsmToken::Plus:
Kind = MCBinaryExpr::Add;
return 4;
case AsmToken::Minus:
Kind = MCBinaryExpr::Sub;
return 4;
// High Intermediate Precedence: |, &, ^
//
// FIXME: gas seems to support '!' as an infix operator?
case AsmToken::Pipe:
Kind = MCBinaryExpr::Or;
return 5;
case AsmToken::Caret:
Kind = MCBinaryExpr::Xor;
return 5;
case AsmToken::Amp:
Kind = MCBinaryExpr::And;
return 5;
// Highest Precedence: *, /, %, <<, >>
case AsmToken::Star:
Kind = MCBinaryExpr::Mul;
return 6;
case AsmToken::Slash:
Kind = MCBinaryExpr::Div;
return 6;
case AsmToken::Percent:
Kind = MCBinaryExpr::Mod;
return 6;
case AsmToken::LessLess:
Kind = MCBinaryExpr::Shl;
return 6;
case AsmToken::GreaterGreater:
Kind = ShouldUseLogicalShr ? MCBinaryExpr::LShr : MCBinaryExpr::AShr;
return 6;
}
}
unsigned AsmParser::getBinOpPrecedence(AsmToken::TokenKind K,
MCBinaryExpr::Opcode &Kind) {
bool ShouldUseLogicalShr = MAI.shouldUseLogicalShr();
return IsDarwin ? getDarwinBinOpPrecedence(K, Kind, ShouldUseLogicalShr)
: getGNUBinOpPrecedence(K, Kind, ShouldUseLogicalShr);
}
/// Parse all binary operators with precedence >= 'Precedence'.
/// Res contains the LHS of the expression on input.
bool AsmParser::parseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
SMLoc &EndLoc) {
SMLoc StartLoc = Lexer.getLoc();
while (true) {
MCBinaryExpr::Opcode Kind = MCBinaryExpr::Add;
unsigned TokPrec = getBinOpPrecedence(Lexer.getKind(), Kind);
// If the next token is lower precedence than we are allowed to eat, return
// successfully with what we ate already.
if (TokPrec < Precedence)
return false;
Lex();
// Eat the next primary expression.
const MCExpr *RHS;
if (getTargetParser().parsePrimaryExpr(RHS, EndLoc))
return true;
// If BinOp binds less tightly with RHS than the operator after RHS, let
// the pending operator take RHS as its LHS.
MCBinaryExpr::Opcode Dummy;
unsigned NextTokPrec = getBinOpPrecedence(Lexer.getKind(), Dummy);
if (TokPrec < NextTokPrec && parseBinOpRHS(TokPrec + 1, RHS, EndLoc))
return true;
// Merge LHS and RHS according to operator.
Res = MCBinaryExpr::create(Kind, Res, RHS, getContext(), StartLoc);
}
}
/// ParseStatement:
/// ::= EndOfStatement
/// ::= Label* Directive ...Operands... EndOfStatement
/// ::= Label* Identifier OperandList* EndOfStatement
bool AsmParser::parseStatement(ParseStatementInfo &Info,
MCAsmParserSemaCallback *SI) {
assert(!hasPendingError() && "parseStatement started with pending error");
// Eat initial spaces and comments
while (Lexer.is(AsmToken::Space))
Lex();
if (Lexer.is(AsmToken::EndOfStatement)) {
// if this is a line comment we can drop it safely
if (getTok().getString().empty() || getTok().getString().front() == '\r' ||
getTok().getString().front() == '\n')
Out.AddBlankLine();
Lex();
return false;
}
// Statements always start with an identifier.
AsmToken ID = getTok();
SMLoc IDLoc = ID.getLoc();
StringRef IDVal;
int64_t LocalLabelVal = -1;
if (Lexer.is(AsmToken::HashDirective))
return parseCppHashLineFilenameComment(IDLoc);
// Allow an integer followed by a ':' as a directional local label.
if (Lexer.is(AsmToken::Integer)) {
LocalLabelVal = getTok().getIntVal();
if (LocalLabelVal < 0) {
if (!TheCondState.Ignore) {
Lex(); // always eat a token
return Error(IDLoc, "unexpected token at start of statement");
}
IDVal = "";
} else {
IDVal = getTok().getString();
Lex(); // Consume the integer token to be used as an identifier token.
if (Lexer.getKind() != AsmToken::Colon) {
if (!TheCondState.Ignore) {
Lex(); // always eat a token
return Error(IDLoc, "unexpected token at start of statement");
}
}
}
} else if (Lexer.is(AsmToken::Dot)) {
// Treat '.' as a valid identifier in this context.
Lex();
IDVal = ".";
} else if (Lexer.is(AsmToken::LCurly)) {
// Treat '{' as a valid identifier in this context.
Lex();
IDVal = "{";
} else if (Lexer.is(AsmToken::RCurly)) {
// Treat '}' as a valid identifier in this context.
Lex();
IDVal = "}";
} else if (Lexer.is(AsmToken::Star) &&
getTargetParser().starIsStartOfStatement()) {
// Accept '*' as a valid start of statement.
Lex();
IDVal = "*";
} else if (parseIdentifier(IDVal)) {
if (!TheCondState.Ignore) {
Lex(); // always eat a token
return Error(IDLoc, "unexpected token at start of statement");
}
IDVal = "";
}
// Handle conditional assembly here before checking for skipping. We
// have to do this so that .endif isn't skipped in a ".if 0" block for
// example.
StringMap<DirectiveKind>::const_iterator DirKindIt =
DirectiveKindMap.find(IDVal);
DirectiveKind DirKind = (DirKindIt == DirectiveKindMap.end())
? DK_NO_DIRECTIVE
: DirKindIt->getValue();
switch (DirKind) {
default:
break;
case DK_IF:
case DK_IFEQ:
case DK_IFGE:
case DK_IFGT:
case DK_IFLE:
case DK_IFLT:
case DK_IFNE:
return parseDirectiveIf(IDLoc, DirKind);
case DK_IFB:
return parseDirectiveIfb(IDLoc, true);
case DK_IFNB:
return parseDirectiveIfb(IDLoc, false);
case DK_IFC:
return parseDirectiveIfc(IDLoc, true);
case DK_IFEQS:
return parseDirectiveIfeqs(IDLoc, true);
case DK_IFNC:
return parseDirectiveIfc(IDLoc, false);
case DK_IFNES:
return parseDirectiveIfeqs(IDLoc, false);
case DK_IFDEF:
return parseDirectiveIfdef(IDLoc, true);
case DK_IFNDEF:
case DK_IFNOTDEF:
return parseDirectiveIfdef(IDLoc, false);
case DK_ELSEIF:
return parseDirectiveElseIf(IDLoc);
case DK_ELSE:
return parseDirectiveElse(IDLoc);
case DK_ENDIF:
return parseDirectiveEndIf(IDLoc);
}
// Ignore the statement if in the middle of inactive conditional
// (e.g. ".if 0").
if (TheCondState.Ignore) {
eatToEndOfStatement();
return false;
}
// FIXME: Recurse on local labels?
// See what kind of statement we have.
switch (Lexer.getKind()) {
case AsmToken::Colon: {
if (!getTargetParser().isLabel(ID))
break;
if (checkForValidSection())
return true;
// identifier ':' -> Label.
Lex();
// Diagnose attempt to use '.' as a label.
if (IDVal == ".")
return Error(IDLoc, "invalid use of pseudo-symbol '.' as a label");
// Diagnose attempt to use a variable as a label.
//
// FIXME: Diagnostics. Note the location of the definition as a label.
// FIXME: This doesn't diagnose assignment to a symbol which has been
// implicitly marked as external.
MCSymbol *Sym;
if (LocalLabelVal == -1) {
if (ParsingInlineAsm && SI) {
StringRef RewrittenLabel =
SI->LookupInlineAsmLabel(IDVal, getSourceManager(), IDLoc, true);
assert(!RewrittenLabel.empty() &&
"We should have an internal name here.");
Info.AsmRewrites->emplace_back(AOK_Label, IDLoc, IDVal.size(),
RewrittenLabel);
IDVal = RewrittenLabel;
}
Sym = getContext().getOrCreateSymbol(IDVal);
} else
Sym = Ctx.createDirectionalLocalSymbol(LocalLabelVal);
// End of Labels should be treated as end of line for lexing
// purposes but that information is not available to the Lexer who
// does not understand Labels. This may cause us to see a Hash
// here instead of a preprocessor line comment.
if (getTok().is(AsmToken::Hash)) {
StringRef CommentStr = parseStringToEndOfStatement();
Lexer.Lex();
Lexer.UnLex(AsmToken(AsmToken::EndOfStatement, CommentStr));
}
// Consume any end of statement token, if present, to avoid spurious
// AddBlankLine calls().
if (getTok().is(AsmToken::EndOfStatement)) {
Lex();
}
getTargetParser().doBeforeLabelEmit(Sym);
// Emit the label.
if (!getTargetParser().isParsingInlineAsm())
Out.EmitLabel(Sym, IDLoc);
// If we are generating dwarf for assembly source files then gather the
// info to make a dwarf label entry for this label if needed.
if (enabledGenDwarfForAssembly())
MCGenDwarfLabelEntry::Make(Sym, &getStreamer(), getSourceManager(),
IDLoc);
getTargetParser().onLabelParsed(Sym);
return false;
}
case AsmToken::Equal:
if (!getTargetParser().equalIsAsmAssignment())
break;
// identifier '=' ... -> assignment statement
Lex();
return parseAssignment(IDVal, true);
default: // Normal instruction or directive.
break;
}
// If macros are enabled, check to see if this is a macro instantiation.
if (areMacrosEnabled())
if (const MCAsmMacro *M = getContext().lookupMacro(IDVal)) {
return handleMacroEntry(M, IDLoc);
}
// Otherwise, we have a normal instruction or directive.
// Directives start with "."
if (IDVal.startswith(".") && IDVal != ".") {
// There are several entities interested in parsing directives:
//
// 1. The target-specific assembly parser. Some directives are target
// specific or may potentially behave differently on certain targets.
// 2. Asm parser extensions. For example, platform-specific parsers
// (like the ELF parser) register themselves as extensions.
// 3. The generic directive parser implemented by this class. These are
// all the directives that behave in a target and platform independent
// manner, or at least have a default behavior that's shared between
// all targets and platforms.
getTargetParser().flushPendingInstructions(getStreamer());
SMLoc StartTokLoc = getTok().getLoc();
bool TPDirectiveReturn = getTargetParser().ParseDirective(ID);
if (hasPendingError())
return true;
// Currently the return value should be true if we are
// uninterested but as this is at odds with the standard parsing
// convention (return true = error) we have instances of a parsed
// directive that fails returning true as an error. Catch these
// cases as best as possible errors here.
if (TPDirectiveReturn && StartTokLoc != getTok().getLoc())
return true;
// Return if we did some parsing or believe we succeeded.
if (!TPDirectiveReturn || StartTokLoc != getTok().getLoc())
return false;
// Next, check the extension directive map to see if any extension has
// registered itself to parse this directive.
std::pair<MCAsmParserExtension *, DirectiveHandler> Handler =
ExtensionDirectiveMap.lookup(IDVal);
if (Handler.first)
return (*Handler.second)(Handler.first, IDVal, IDLoc);
// Finally, if no one else is interested in this directive, it must be
// generic and familiar to this class.
switch (DirKind) {
default:
break;
case DK_SET:
case DK_EQU:
return parseDirectiveSet(IDVal, true);
case DK_EQUIV:
return parseDirectiveSet(IDVal, false);
case DK_ASCII:
return parseDirectiveAscii(IDVal, false);
case DK_ASCIZ:
case DK_STRING:
return parseDirectiveAscii(IDVal, true);
case DK_BYTE:
case DK_DC_B:
return parseDirectiveValue(IDVal, 1);
case DK_DC:
case DK_DC_W:
case DK_SHORT:
case DK_VALUE:
case DK_2BYTE:
return parseDirectiveValue(IDVal, 2);
case DK_LONG:
case DK_INT:
case DK_4BYTE:
case DK_DC_L:
return parseDirectiveValue(IDVal, 4);
case DK_QUAD:
case DK_8BYTE:
return parseDirectiveValue(IDVal, 8);
case DK_DC_A:
return parseDirectiveValue(
IDVal, getContext().getAsmInfo()->getCodePointerSize());
case DK_OCTA:
return parseDirectiveOctaValue(IDVal);
case DK_SINGLE:
case DK_FLOAT:
case DK_DC_S:
return parseDirectiveRealValue(IDVal, APFloat::IEEEsingle());
case DK_DOUBLE:
case DK_DC_D:
return parseDirectiveRealValue(IDVal, APFloat::IEEEdouble());
case DK_ALIGN: {
bool IsPow2 = !getContext().getAsmInfo()->getAlignmentIsInBytes();
return parseDirectiveAlign(IsPow2, /*ExprSize=*/1);
}
case DK_ALIGN32: {
bool IsPow2 = !getContext().getAsmInfo()->getAlignmentIsInBytes();
return parseDirectiveAlign(IsPow2, /*ExprSize=*/4);
}
case DK_BALIGN:
return parseDirectiveAlign(/*IsPow2=*/false, /*ExprSize=*/1);
case DK_BALIGNW:
return parseDirectiveAlign(/*IsPow2=*/false, /*ExprSize=*/2);
case DK_BALIGNL:
return parseDirectiveAlign(/*IsPow2=*/false, /*ExprSize=*/4);
case DK_P2ALIGN:
return parseDirectiveAlign(/*IsPow2=*/true, /*ExprSize=*/1);
case DK_P2ALIGNW:
return parseDirectiveAlign(/*IsPow2=*/true, /*ExprSize=*/2);
case DK_P2ALIGNL:
return parseDirectiveAlign(/*IsPow2=*/true, /*ExprSize=*/4);
case DK_ORG:
return parseDirectiveOrg();
case DK_FILL:
return parseDirectiveFill();
case DK_ZERO:
return parseDirectiveZero();
case DK_EXTERN:
eatToEndOfStatement(); // .extern is the default, ignore it.
return false;
case DK_GLOBL:
case DK_GLOBAL:
return parseDirectiveSymbolAttribute(MCSA_Global);
case DK_LAZY_REFERENCE:
return parseDirectiveSymbolAttribute(MCSA_LazyReference);
case DK_NO_DEAD_STRIP:
return parseDirectiveSymbolAttribute(MCSA_NoDeadStrip);
case DK_SYMBOL_RESOLVER:
return parseDirectiveSymbolAttribute(MCSA_SymbolResolver);
case DK_PRIVATE_EXTERN:
return parseDirectiveSymbolAttribute(MCSA_PrivateExtern);
case DK_REFERENCE:
return parseDirectiveSymbolAttribute(MCSA_Reference);
case DK_WEAK_DEFINITION:
return parseDirectiveSymbolAttribute(MCSA_WeakDefinition);
case DK_WEAK_REFERENCE:
return parseDirectiveSymbolAttribute(MCSA_WeakReference);
case DK_WEAK_DEF_CAN_BE_HIDDEN:
return parseDirectiveSymbolAttribute(MCSA_WeakDefAutoPrivate);
case DK_COMM:
case DK_COMMON:
return parseDirectiveComm(/*IsLocal=*/false);
case DK_LCOMM:
return parseDirectiveComm(/*IsLocal=*/true);
case DK_ABORT:
return parseDirectiveAbort();
case DK_INCLUDE:
return parseDirectiveInclude();
case DK_INCBIN:
return parseDirectiveIncbin();
case DK_CODE16:
case DK_CODE16GCC:
return TokError(Twine(IDVal) +
" not currently supported for this target");
case DK_REPT:
return parseDirectiveRept(IDLoc, IDVal);
case DK_IRP:
return parseDirectiveIrp(IDLoc);
case DK_IRPC:
return parseDirectiveIrpc(IDLoc);
case DK_ENDR:
return parseDirectiveEndr(IDLoc);
case DK_BUNDLE_ALIGN_MODE:
return parseDirectiveBundleAlignMode();
case DK_BUNDLE_LOCK:
return parseDirectiveBundleLock();
case DK_BUNDLE_UNLOCK:
return parseDirectiveBundleUnlock();
case DK_SLEB128:
return parseDirectiveLEB128(true);
case DK_ULEB128:
return parseDirectiveLEB128(false);
case DK_SPACE:
case DK_SKIP:
return parseDirectiveSpace(IDVal);
case DK_FILE:
return parseDirectiveFile(IDLoc);
case DK_LINE:
return parseDirectiveLine();
case DK_LOC:
return parseDirectiveLoc();
case DK_STABS:
return parseDirectiveStabs();
case DK_CV_FILE:
return parseDirectiveCVFile();
case DK_CV_FUNC_ID:
return parseDirectiveCVFuncId();
case DK_CV_INLINE_SITE_ID:
return parseDirectiveCVInlineSiteId();
case DK_CV_LOC:
return parseDirectiveCVLoc();
case DK_CV_LINETABLE:
return parseDirectiveCVLinetable();
case DK_CV_INLINE_LINETABLE:
return parseDirectiveCVInlineLinetable();
case DK_CV_DEF_RANGE:
return parseDirectiveCVDefRange();
case DK_CV_STRING:
return parseDirectiveCVString();
case DK_CV_STRINGTABLE:
return parseDirectiveCVStringTable();
case DK_CV_FILECHECKSUMS:
return parseDirectiveCVFileChecksums();
case DK_CV_FILECHECKSUM_OFFSET:
return parseDirectiveCVFileChecksumOffset();
case DK_CV_FPO_DATA:
return parseDirectiveCVFPOData();
case DK_CFI_SECTIONS:
return parseDirectiveCFISections();
case DK_CFI_STARTPROC:
return parseDirectiveCFIStartProc();
case DK_CFI_ENDPROC:
return parseDirectiveCFIEndProc();
case DK_CFI_DEF_CFA:
return parseDirectiveCFIDefCfa(IDLoc);
case DK_CFI_DEF_CFA_OFFSET:
return parseDirectiveCFIDefCfaOffset();
case DK_CFI_ADJUST_CFA_OFFSET:
return parseDirectiveCFIAdjustCfaOffset();
case DK_CFI_DEF_CFA_REGISTER:
return parseDirectiveCFIDefCfaRegister(IDLoc);
case DK_CFI_OFFSET:
return parseDirectiveCFIOffset(IDLoc);
case DK_CFI_REL_OFFSET:
return parseDirectiveCFIRelOffset(IDLoc);
case DK_CFI_PERSONALITY:
return parseDirectiveCFIPersonalityOrLsda(true);
case DK_CFI_LSDA:
return parseDirectiveCFIPersonalityOrLsda(false);
case DK_CFI_REMEMBER_STATE:
return parseDirectiveCFIRememberState();
case DK_CFI_RESTORE_STATE:
return parseDirectiveCFIRestoreState();
case DK_CFI_SAME_VALUE:
return parseDirectiveCFISameValue(IDLoc);
case DK_CFI_RESTORE:
return parseDirectiveCFIRestore(IDLoc);
case DK_CFI_ESCAPE:
return parseDirectiveCFIEscape();
case DK_CFI_RETURN_COLUMN:
return parseDirectiveCFIReturnColumn(IDLoc);
case DK_CFI_SIGNAL_FRAME:
return parseDirectiveCFISignalFrame();
case DK_CFI_UNDEFINED:
return parseDirectiveCFIUndefined(IDLoc);
case DK_CFI_REGISTER:
return parseDirectiveCFIRegister(IDLoc);
case DK_CFI_WINDOW_SAVE:
return parseDirectiveCFIWindowSave();
case DK_MACROS_ON:
case DK_MACROS_OFF:
return parseDirectiveMacrosOnOff(IDVal);
case DK_MACRO:
return parseDirectiveMacro(IDLoc);
case DK_ALTMACRO:
case DK_NOALTMACRO:
return parseDirectiveAltmacro(IDVal);
case DK_EXITM:
return parseDirectiveExitMacro(IDVal);
case DK_ENDM:
case DK_ENDMACRO:
return parseDirectiveEndMacro(IDVal);
case DK_PURGEM:
return parseDirectivePurgeMacro(IDLoc);
case DK_END:
return parseDirectiveEnd(IDLoc);
case DK_ERR:
return parseDirectiveError(IDLoc, false);
case DK_ERROR:
return parseDirectiveError(IDLoc, true);
case DK_WARNING:
return parseDirectiveWarning(IDLoc);
case DK_RELOC:
return parseDirectiveReloc(IDLoc);
case DK_DCB:
case DK_DCB_W:
return parseDirectiveDCB(IDVal, 2);
case DK_DCB_B:
return parseDirectiveDCB(IDVal, 1);
case DK_DCB_D:
return parseDirectiveRealDCB(IDVal, APFloat::IEEEdouble());
case DK_DCB_L:
return parseDirectiveDCB(IDVal, 4);
case DK_DCB_S:
return parseDirectiveRealDCB(IDVal, APFloat::IEEEsingle());
case DK_DC_X:
case DK_DCB_X:
return TokError(Twine(IDVal) +
" not currently supported for this target");
case DK_DS:
case DK_DS_W:
return parseDirectiveDS(IDVal, 2);
case DK_DS_B:
return parseDirectiveDS(IDVal, 1);
case DK_DS_D:
return parseDirectiveDS(IDVal, 8);
case DK_DS_L:
case DK_DS_S:
return parseDirectiveDS(IDVal, 4);
case DK_DS_P:
case DK_DS_X:
return parseDirectiveDS(IDVal, 12);
case DK_PRINT:
return parseDirectivePrint(IDLoc);
case DK_ADDRSIG:
return parseDirectiveAddrsig();
case DK_ADDRSIG_SYM:
return parseDirectiveAddrsigSym();
}
return Error(IDLoc, "unknown directive");
}
// __asm _emit or __asm __emit
if (ParsingInlineAsm && (IDVal == "_emit" || IDVal == "__emit" ||
IDVal == "_EMIT" || IDVal == "__EMIT"))
return parseDirectiveMSEmit(IDLoc, Info, IDVal.size());
// __asm align
if (ParsingInlineAsm && (IDVal == "align" || IDVal == "ALIGN"))
return parseDirectiveMSAlign(IDLoc, Info);
if (ParsingInlineAsm && (IDVal == "even" || IDVal == "EVEN"))
Info.AsmRewrites->emplace_back(AOK_EVEN, IDLoc, 4);
if (checkForValidSection())
return true;
// Canonicalize the opcode to lower case.
std::string OpcodeStr = IDVal.lower();
ParseInstructionInfo IInfo(Info.AsmRewrites);
bool ParseHadError = getTargetParser().ParseInstruction(IInfo, OpcodeStr, ID,
Info.ParsedOperands);
Info.ParseError = ParseHadError;
// Dump the parsed representation, if requested.
if (getShowParsedOperands()) {
SmallString<256> Str;
raw_svector_ostream OS(Str);
OS << "parsed instruction: [";
for (unsigned i = 0; i != Info.ParsedOperands.size(); ++i) {
if (i != 0)
OS << ", ";
Info.ParsedOperands[i]->print(OS);
}
OS << "]";
printMessage(IDLoc, SourceMgr::DK_Note, OS.str());
}
// Fail even if ParseInstruction erroneously returns false.
if (hasPendingError() || ParseHadError)
return true;
// If we are generating dwarf for the current section then generate a .loc
// directive for the instruction.
if (!ParseHadError && enabledGenDwarfForAssembly() &&
getContext().getGenDwarfSectionSyms().count(
getStreamer().getCurrentSectionOnly())) {
unsigned Line;
if (ActiveMacros.empty())
Line = SrcMgr.FindLineNumber(IDLoc, CurBuffer);
else
Line = SrcMgr.FindLineNumber(ActiveMacros.front()->InstantiationLoc,
ActiveMacros.front()->ExitBuffer);
// If we previously parsed a cpp hash file line comment then make sure the
// current Dwarf File is for the CppHashFilename if not then emit the
// Dwarf File table for it and adjust the line number for the .loc.
if (!CppHashInfo.Filename.empty()) {
unsigned FileNumber = getStreamer().EmitDwarfFileDirective(
0, StringRef(), CppHashInfo.Filename);
getContext().setGenDwarfFileNumber(FileNumber);
unsigned CppHashLocLineNo =
SrcMgr.FindLineNumber(CppHashInfo.Loc, CppHashInfo.Buf);
Line = CppHashInfo.LineNumber - 1 + (Line - CppHashLocLineNo);
}
getStreamer().EmitDwarfLocDirective(
getContext().getGenDwarfFileNumber(), Line, 0,
DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0, 0, 0,
StringRef());
}
// If parsing succeeded, match the instruction.
if (!ParseHadError) {
uint64_t ErrorInfo;
if (getTargetParser().MatchAndEmitInstruction(
IDLoc, Info.Opcode, Info.ParsedOperands, Out, ErrorInfo,
getTargetParser().isParsingInlineAsm()))
return true;
}
return false;
}
// Parse and erase curly braces marking block start/end
bool
AsmParser::parseCurlyBlockScope(SmallVectorImpl<AsmRewrite> &AsmStrRewrites) {
// Identify curly brace marking block start/end
if (Lexer.isNot(AsmToken::LCurly) && Lexer.isNot(AsmToken::RCurly))
return false;
SMLoc StartLoc = Lexer.getLoc();
Lex(); // Eat the brace
if (Lexer.is(AsmToken::EndOfStatement))
Lex(); // Eat EndOfStatement following the brace
// Erase the block start/end brace from the output asm string
AsmStrRewrites.emplace_back(AOK_Skip, StartLoc, Lexer.getLoc().getPointer() -
StartLoc.getPointer());
return true;
}
/// parseCppHashLineFilenameComment as this:
/// ::= # number "filename"
bool AsmParser::parseCppHashLineFilenameComment(SMLoc L) {
Lex(); // Eat the hash token.
// Lexer only ever emits HashDirective if it fully formed if it's
// done the checking already so this is an internal error.
assert(getTok().is(AsmToken::Integer) &&
"Lexing Cpp line comment: Expected Integer");
int64_t LineNumber = getTok().getIntVal();
Lex();
assert(getTok().is(AsmToken::String) &&
"Lexing Cpp line comment: Expected String");
StringRef Filename = getTok().getString();
Lex();
// Get rid of the enclosing quotes.
Filename = Filename.substr(1, Filename.size() - 2);
// Save the SMLoc, Filename and LineNumber for later use by diagnostics.
CppHashInfo.Loc = L;
CppHashInfo.Filename = Filename;
CppHashInfo.LineNumber = LineNumber;
CppHashInfo.Buf = CurBuffer;
return false;
}
/// will use the last parsed cpp hash line filename comment
/// for the Filename and LineNo if any in the diagnostic.
void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
const AsmParser *Parser = static_cast<const AsmParser *>(Context);
raw_ostream &OS = errs();
const SourceMgr &DiagSrcMgr = *Diag.getSourceMgr();
SMLoc DiagLoc = Diag.getLoc();
unsigned DiagBuf = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
unsigned CppHashBuf =
Parser->SrcMgr.FindBufferContainingLoc(Parser->CppHashInfo.Loc);
// Like SourceMgr::printMessage() we need to print the include stack if any
// before printing the message.
unsigned DiagCurBuffer = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
if (!Parser->SavedDiagHandler && DiagCurBuffer &&
DiagCurBuffer != DiagSrcMgr.getMainFileID()) {
SMLoc ParentIncludeLoc = DiagSrcMgr.getParentIncludeLoc(DiagCurBuffer);
DiagSrcMgr.PrintIncludeStack(ParentIncludeLoc, OS);
}
// If we have not parsed a cpp hash line filename comment or the source
// manager changed or buffer changed (like in a nested include) then just
// print the normal diagnostic using its Filename and LineNo.
if (!Parser->CppHashInfo.LineNumber || &DiagSrcMgr != &Parser->SrcMgr ||
DiagBuf != CppHashBuf) {
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(Diag, Parser->SavedDiagContext);
else
Diag.print(nullptr, OS);
return;
}
// Use the CppHashFilename and calculate a line number based on the
// CppHashInfo.Loc and CppHashInfo.LineNumber relative to this Diag's SMLoc
// for the diagnostic.
const std::string &Filename = Parser->CppHashInfo.Filename;
int DiagLocLineNo = DiagSrcMgr.FindLineNumber(DiagLoc, DiagBuf);
int CppHashLocLineNo =
Parser->SrcMgr.FindLineNumber(Parser->CppHashInfo.Loc, CppHashBuf);
int LineNo =
Parser->CppHashInfo.LineNumber - 1 + (DiagLocLineNo - CppHashLocLineNo);
SMDiagnostic NewDiag(*Diag.getSourceMgr(), Diag.getLoc(), Filename, LineNo,
Diag.getColumnNo(), Diag.getKind(), Diag.getMessage(),
Diag.getLineContents(), Diag.getRanges());
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(NewDiag, Parser->SavedDiagContext);
else
NewDiag.print(nullptr, OS);
}
// FIXME: This is mostly duplicated from the function in AsmLexer.cpp. The
// difference being that that function accepts '@' as part of identifiers and
// we can't do that. AsmLexer.cpp should probably be changed to handle
// '@' as a special case when needed.
static bool isIdentifierChar(char c) {
return isalnum(static_cast<unsigned char>(c)) || c == '_' || c == '$' ||
c == '.';
}
bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
ArrayRef<MCAsmMacroParameter> Parameters,
ArrayRef<MCAsmMacroArgument> A,
bool EnableAtPseudoVariable, SMLoc L) {
unsigned NParameters = Parameters.size();
bool HasVararg = NParameters ? Parameters.back().Vararg : false;
if ((!IsDarwin || NParameters != 0) && NParameters != A.size())
return Error(L, "Wrong number of arguments");
// A macro without parameters is handled differently on Darwin:
// gas accepts no arguments and does no substitutions
while (!Body.empty()) {
// Scan for the next substitution.
std::size_t End = Body.size(), Pos = 0;
for (; Pos != End; ++Pos) {
// Check for a substitution or escape.
if (IsDarwin && !NParameters) {
// This macro has no parameters, look for $0, $1, etc.
if (Body[Pos] != '$' || Pos + 1 == End)
continue;
char Next = Body[Pos + 1];
if (Next == '$' || Next == 'n' ||
isdigit(static_cast<unsigned char>(Next)))
break;
} else {
// This macro has parameters, look for \foo, \bar, etc.
if (Body[Pos] == '\\' && Pos + 1 != End)
break;
}
}
// Add the prefix.
OS << Body.slice(0, Pos);
// Check if we reached the end.
if (Pos == End)
break;
if (IsDarwin && !NParameters) {
switch (Body[Pos + 1]) {
// $$ => $
case '$':
OS << '$';
break;
// $n => number of arguments
case 'n':
OS << A.size();
break;
// $[0-9] => argument
default: {
// Missing arguments are ignored.
unsigned Index = Body[Pos + 1] - '0';
if (Index >= A.size())
break;
// Otherwise substitute with the token values, with spaces eliminated.
for (const AsmToken &Token : A[Index])
OS << Token.getString();
break;
}
}
Pos += 2;
} else {
unsigned I = Pos + 1;
// Check for the \@ pseudo-variable.
if (EnableAtPseudoVariable && Body[I] == '@' && I + 1 != End)
++I;
else
while (isIdentifierChar(Body[I]) && I + 1 != End)
++I;
const char *Begin = Body.data() + Pos + 1;
StringRef Argument(Begin, I - (Pos + 1));
unsigned Index = 0;
if (Argument == "@") {
OS << NumOfMacroInstantiations;
Pos += 2;
} else {
for (; Index < NParameters; ++Index)
if (Parameters[Index].Name == Argument)
break;
if (Index == NParameters) {
if (Body[Pos + 1] == '(' && Body[Pos + 2] == ')')
Pos += 3;
else {
OS << '\\' << Argument;
Pos = I;
}
} else {
bool VarargParameter = HasVararg && Index == (NParameters - 1);
for (const AsmToken &Token : A[Index])
// For altmacro mode, you can write '%expr'.
// The prefix '%' evaluates the expression 'expr'
// and uses the result as a string (e.g. replace %(1+2) with the
// string "3").
// Here, we identify the integer token which is the result of the
// absolute expression evaluation and replace it with its string
// representation.
if (AltMacroMode && Token.getString().front() == '%' &&
Token.is(AsmToken::Integer))
// Emit an integer value to the buffer.
OS << Token.getIntVal();
// Only Token that was validated as a string and begins with '<'
// is considered altMacroString!!!
else if (AltMacroMode && Token.getString().front() == '<' &&
Token.is(AsmToken::String)) {
OS << altMacroString(Token.getStringContents());
}
// We expect no quotes around the string's contents when
// parsing for varargs.
else if (Token.isNot(AsmToken::String) || VarargParameter)
OS << Token.getString();
else
OS << Token.getStringContents();
Pos += 1 + Argument.size();
}
}
}
// Update the scan point.
Body = Body.substr(Pos);
}
return false;
}
MacroInstantiation::MacroInstantiation(SMLoc IL, int EB, SMLoc EL,
size_t CondStackDepth)
: InstantiationLoc(IL), ExitBuffer(EB), ExitLoc(EL),
CondStackDepth(CondStackDepth) {}
static bool isOperator(AsmToken::TokenKind kind) {
switch (kind) {
default:
return false;
case AsmToken::Plus:
case AsmToken::Minus:
case AsmToken::Tilde:
case AsmToken::Slash:
case AsmToken::Star:
case AsmToken::Dot:
case AsmToken::Equal:
case AsmToken::EqualEqual:
case AsmToken::Pipe:
case AsmToken::PipePipe:
case AsmToken::Caret:
case AsmToken::Amp:
case AsmToken::AmpAmp:
case AsmToken::Exclaim:
case AsmToken::ExclaimEqual:
case AsmToken::Less:
case AsmToken::LessEqual:
case AsmToken::LessLess:
case AsmToken::LessGreater:
case AsmToken::Greater:
case AsmToken::GreaterEqual:
case AsmToken::GreaterGreater:
return true;
}
}
namespace {
class AsmLexerSkipSpaceRAII {
public:
AsmLexerSkipSpaceRAII(AsmLexer &Lexer, bool SkipSpace) : Lexer(Lexer) {
Lexer.setSkipSpace(SkipSpace);
}
~AsmLexerSkipSpaceRAII() {
Lexer.setSkipSpace(true);
}
private:
AsmLexer &Lexer;
};
} // end anonymous namespace
bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg) {
if (Vararg) {
if (Lexer.isNot(AsmToken::EndOfStatement)) {
StringRef Str = parseStringToEndOfStatement();
MA.emplace_back(AsmToken::String, Str);
}
return false;
}
unsigned ParenLevel = 0;
// Darwin doesn't use spaces to delmit arguments.
AsmLexerSkipSpaceRAII ScopedSkipSpace(Lexer, IsDarwin);
bool SpaceEaten;
while (true) {
SpaceEaten = false;
if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal))
return TokError("unexpected token in macro instantiation");
if (ParenLevel == 0) {
if (Lexer.is(AsmToken::Comma))
break;
if (Lexer.is(AsmToken::Space)) {
SpaceEaten = true;
Lexer.Lex(); // Eat spaces
}
// Spaces can delimit parameters, but could also be part an expression.
// If the token after a space is an operator, add the token and the next
// one into this argument
if (!IsDarwin) {
if (isOperator(Lexer.getKind())) {
MA.push_back(getTok());
Lexer.Lex();
// Whitespace after an operator can be ignored.
if (Lexer.is(AsmToken::Space))
Lexer.Lex();
continue;
}
}
if (SpaceEaten)
break;
}
// handleMacroEntry relies on not advancing the lexer here
// to be able to fill in the remaining default parameter values
if (Lexer.is(AsmToken::EndOfStatement))
break;
// Adjust the current parentheses level.
if (Lexer.is(AsmToken::LParen))
++ParenLevel;
else if (Lexer.is(AsmToken::RParen) && ParenLevel)
--ParenLevel;
// Append the token to the current argument list.
MA.push_back(getTok());
Lexer.Lex();
}
if (ParenLevel != 0)
return TokError("unbalanced parentheses in macro argument");
return false;
}
// Parse the macro instantiation arguments.
bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
MCAsmMacroArguments &A) {
const unsigned NParameters = M ? M->Parameters.size() : 0;
bool NamedParametersFound = false;
SmallVector<SMLoc, 4> FALocs;
A.resize(NParameters);
FALocs.resize(NParameters);
// Parse two kinds of macro invocations:
// - macros defined without any parameters accept an arbitrary number of them
// - macros defined with parameters accept at most that many of them
bool HasVararg = NParameters ? M->Parameters.back().Vararg : false;
for (unsigned Parameter = 0; !NParameters || Parameter < NParameters;
++Parameter) {
SMLoc IDLoc = Lexer.getLoc();
MCAsmMacroParameter FA;
if (Lexer.is(AsmToken::Identifier) && Lexer.peekTok().is(AsmToken::Equal)) {
if (parseIdentifier(FA.Name))
return Error(IDLoc, "invalid argument identifier for formal argument");
if (Lexer.isNot(AsmToken::Equal))
return TokError("expected '=' after formal parameter identifier");
Lex();
NamedParametersFound = true;
}
bool Vararg = HasVararg && Parameter == (NParameters - 1);
if (NamedParametersFound && FA.Name.empty())
return Error(IDLoc, "cannot mix positional and keyword arguments");
SMLoc StrLoc = Lexer.getLoc();
SMLoc EndLoc;
if (AltMacroMode && Lexer.is(AsmToken::Percent)) {
const MCExpr *AbsoluteExp;
int64_t Value;
/// Eat '%'
Lex();
if (parseExpression(AbsoluteExp, EndLoc))
return false;
if (!AbsoluteExp->evaluateAsAbsolute(Value,
getStreamer().getAssemblerPtr()))
return Error(StrLoc, "expected absolute expression");
const char *StrChar = StrLoc.getPointer();
const char *EndChar = EndLoc.getPointer();
AsmToken newToken(AsmToken::Integer,
StringRef(StrChar, EndChar - StrChar), Value);
FA.Value.push_back(newToken);
} else if (AltMacroMode && Lexer.is(AsmToken::Less) &&
isAltmacroString(StrLoc, EndLoc)) {
const char *StrChar = StrLoc.getPointer();
const char *EndChar = EndLoc.getPointer();
jumpToLoc(EndLoc, CurBuffer);
/// Eat from '<' to '>'
Lex();
AsmToken newToken(AsmToken::String,
StringRef(StrChar, EndChar - StrChar));
FA.Value.push_back(newToken);
} else if(parseMacroArgument(FA.Value, Vararg))
return true;
unsigned PI = Parameter;
if (!FA.Name.empty()) {
unsigned FAI = 0;
for (FAI = 0; FAI < NParameters; ++FAI)
if (M->Parameters[FAI].Name == FA.Name)
break;
if (FAI >= NParameters) {
assert(M && "expected macro to be defined");
return Error(IDLoc, "parameter named '" + FA.Name +
"' does not exist for macro '" + M->Name + "'");
}
PI = FAI;
}
if (!FA.Value.empty()) {
if (A.size() <= PI)
A.resize(PI + 1);
A[PI] = FA.Value;
if (FALocs.size() <= PI)
FALocs.resize(PI + 1);
FALocs[PI] = Lexer.getLoc();
}
// At the end of the statement, fill in remaining arguments that have
// default values. If there aren't any, then the next argument is
// required but missing
if (Lexer.is(AsmToken::EndOfStatement)) {
bool Failure = false;
for (unsigned FAI = 0; FAI < NParameters; ++FAI) {
if (A[FAI].empty()) {
if (M->Parameters[FAI].Required) {
Error(FALocs[FAI].isValid() ? FALocs[FAI] : Lexer.getLoc(),
"missing value for required parameter "
"'" + M->Parameters[FAI].Name + "' in macro '" + M->Name + "'");
Failure = true;
}
if (!M->Parameters[FAI].Value.empty())
A[FAI] = M->Parameters[FAI].Value;
}
}
return Failure;
}
if (Lexer.is(AsmToken::Comma))
Lex();
}
return TokError("too many positional arguments");
}
bool AsmParser::handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc) {
// Arbitrarily limit macro nesting depth (default matches 'as'). We can
// eliminate this, although we should protect against infinite loops.
unsigned MaxNestingDepth = AsmMacroMaxNestingDepth;
if (ActiveMacros.size() == MaxNestingDepth) {
std::ostringstream MaxNestingDepthError;
MaxNestingDepthError << "macros cannot be nested more than "
<< MaxNestingDepth << " levels deep."
<< " Use -asm-macro-max-nesting-depth to increase "
"this limit.";
return TokError(MaxNestingDepthError.str());
}
MCAsmMacroArguments A;
if (parseMacroArguments(M, A))
return true;
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
StringRef Body = M->Body;
raw_svector_ostream OS(Buf);
if (expandMacro(OS, Body, M->Parameters, A, true, getTok().getLoc()))
return true;
// We include the .endmacro in the buffer as our cue to exit the macro
// instantiation.
OS << ".endmacro\n";
std::unique_ptr<MemoryBuffer> Instantiation =
MemoryBuffer::getMemBufferCopy(OS.str(), "<instantiation>");
// Create the macro instantiation object and add to the current macro
// instantiation stack.
MacroInstantiation *MI = new MacroInstantiation(
NameLoc, CurBuffer, getTok().getLoc(), TheCondStack.size());
ActiveMacros.push_back(MI);
++NumOfMacroInstantiations;
// Jump to the macro instantiation and prime the lexer.
CurBuffer = SrcMgr.AddNewSourceBuffer(std::move(Instantiation), SMLoc());
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
Lex();
return false;
}
void AsmParser::handleMacroExit() {
// Jump to the EndOfStatement we should return to, and consume it.
jumpToLoc(ActiveMacros.back()->ExitLoc, ActiveMacros.back()->ExitBuffer);
Lex();
// Pop the instantiation entry.
delete ActiveMacros.back();
ActiveMacros.pop_back();
}
bool AsmParser::parseAssignment(StringRef Name, bool allow_redef,
bool NoDeadStrip) {
MCSymbol *Sym;
const MCExpr *Value;
if (MCParserUtils::parseAssignmentExpression(Name, allow_redef, *this, Sym,
Value))
return true;
if (!Sym) {
// In the case where we parse an expression starting with a '.', we will
// not generate an error, nor will we create a symbol. In this case we
// should just return out.
return false;
}
// Do the assignment.
Out.EmitAssignment(Sym, Value);
if (NoDeadStrip)
Out.EmitSymbolAttribute(Sym, MCSA_NoDeadStrip);
return false;
}
/// parseIdentifier:
/// ::= identifier
/// ::= string
bool AsmParser::parseIdentifier(StringRef &Res) {
// The assembler has relaxed rules for accepting identifiers, in particular we
// allow things like '.globl $foo' and '.def @feat.00', which would normally be
// separate tokens. At this level, we have already lexed so we cannot (currently)
// handle this as a context dependent token, instead we detect adjacent tokens
// and return the combined identifier.
if (Lexer.is(AsmToken::Dollar) || Lexer.is(AsmToken::At)) {
SMLoc PrefixLoc = getLexer().getLoc();
// Consume the prefix character, and check for a following identifier.
AsmToken Buf[1];
Lexer.peekTokens(Buf, false);
if (Buf[0].isNot(AsmToken::Identifier))
return true;
// We have a '$' or '@' followed by an identifier, make sure they are adjacent.
if (PrefixLoc.getPointer() + 1 != Buf[0].getLoc().getPointer())
return true;
// eat $ or @
Lexer.Lex(); // Lexer's Lex guarantees consecutive token.
// Construct the joined identifier and consume the token.
Res =
StringRef(PrefixLoc.getPointer(), getTok().getIdentifier().size() + 1);
Lex(); // Parser Lex to maintain invariants.
return false;
}
if (Lexer.isNot(AsmToken::Identifier) && Lexer.isNot(AsmToken::String))
return true;
Res = getTok().getIdentifier();
Lex(); // Consume the identifier token.
return false;
}
/// parseDirectiveSet:
/// ::= .equ identifier ',' expression
/// ::= .equiv identifier ',' expression
/// ::= .set identifier ',' expression
bool AsmParser::parseDirectiveSet(StringRef IDVal, bool allow_redef) {
StringRef Name;
if (check(parseIdentifier(Name), "expected identifier") ||
parseToken(AsmToken::Comma) || parseAssignment(Name, allow_redef, true))
return addErrorSuffix(" in '" + Twine(IDVal) + "' directive");
return false;
}
bool AsmParser::parseEscapedString(std::string &Data) {
if (check(getTok().isNot(AsmToken::String), "expected string"))
return true;
Data = "";
StringRef Str = getTok().getStringContents();
for (unsigned i = 0, e = Str.size(); i != e; ++i) {
if (Str[i] != '\\') {
Data += Str[i];
continue;
}
// Recognize escaped characters. Note that this escape semantics currently
// loosely follows Darwin 'as'. Notably, it doesn't support hex escapes.
++i;
if (i == e)
return TokError("unexpected backslash at end of string");
// Recognize octal sequences.
if ((unsigned)(Str[i] - '0') <= 7) {
// Consume up to three octal characters.
unsigned Value = Str[i] - '0';
if (i + 1 != e && ((unsigned)(Str[i + 1] - '0')) <= 7) {
++i;
Value = Value * 8 + (Str[i] - '0');
if (i + 1 != e && ((unsigned)(Str[i + 1] - '0')) <= 7) {
++i;
Value = Value * 8 + (Str[i] - '0');
}
}
if (Value > 255)
return TokError("invalid octal escape sequence (out of range)");
Data += (unsigned char)Value;
continue;
}
// Otherwise recognize individual escapes.
switch (Str[i]) {
default:
// Just reject invalid escape sequences for now.
return TokError("invalid escape sequence (unrecognized character)");
case 'b': Data += '\b'; break;
case 'f': Data += '\f'; break;
case 'n': Data += '\n'; break;
case 'r': Data += '\r'; break;
case 't': Data += '\t'; break;
case '"': Data += '"'; break;
case '\\': Data += '\\'; break;
}
}
Lex();
return false;
}
/// parseDirectiveAscii:
/// ::= ( .ascii | .asciz | .string ) [ "string" ( , "string" )* ]
bool AsmParser::parseDirectiveAscii(StringRef IDVal, bool ZeroTerminated) {
auto parseOp = [&]() -> bool {
std::string Data;
if (checkForValidSection() || parseEscapedString(Data))
return true;
getStreamer().EmitBytes(Data);
if (ZeroTerminated)
getStreamer().EmitBytes(StringRef("\0", 1));
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in '" + Twine(IDVal) + "' directive");
return false;
}
/// parseDirectiveReloc
/// ::= .reloc expression , identifier [ , expression ]
bool AsmParser::parseDirectiveReloc(SMLoc DirectiveLoc) {
const MCExpr *Offset;
const MCExpr *Expr = nullptr;
int64_t OffsetValue;
SMLoc OffsetLoc = Lexer.getTok().getLoc();
if (parseExpression(Offset))
return true;
if ((Offset->evaluateAsAbsolute(OffsetValue,
getStreamer().getAssemblerPtr()) &&
check(OffsetValue < 0, OffsetLoc, "expression is negative")) ||
(check(Offset->getKind() != llvm::MCExpr::Constant &&
Offset->getKind() != llvm::MCExpr::SymbolRef,
OffsetLoc, "expected non-negative number or a label")) ||
(parseToken(AsmToken::Comma, "expected comma") ||
check(getTok().isNot(AsmToken::Identifier), "expected relocation name")))
return true;
SMLoc NameLoc = Lexer.getTok().getLoc();
StringRef Name = Lexer.getTok().getIdentifier();
Lex();
if (Lexer.is(AsmToken::Comma)) {
Lex();
SMLoc ExprLoc = Lexer.getLoc();
if (parseExpression(Expr))
return true;
MCValue Value;
if (!Expr->evaluateAsRelocatable(Value, nullptr, nullptr))
return Error(ExprLoc, "expression must be relocatable");
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in .reloc directive"))
return true;
const MCTargetAsmParser &MCT = getTargetParser();
const MCSubtargetInfo &STI = MCT.getSTI();
if (getStreamer().EmitRelocDirective(*Offset, Name, Expr, DirectiveLoc, STI))
return Error(NameLoc, "unknown relocation name");
return false;
}
/// parseDirectiveValue
/// ::= (.byte | .short | ... ) [ expression (, expression)* ]
bool AsmParser::parseDirectiveValue(StringRef IDVal, unsigned Size) {
auto parseOp = [&]() -> bool {
const MCExpr *Value;
SMLoc ExprLoc = getLexer().getLoc();
if (checkForValidSection() || parseExpression(Value))
return true;
// Special case constant expressions to match code generator.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value)) {
assert(Size <= 8 && "Invalid size");
uint64_t IntValue = MCE->getValue();
if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue))
return Error(ExprLoc, "out of range literal value");
getStreamer().EmitIntValue(IntValue, Size);
} else
getStreamer().EmitValue(Value, Size, ExprLoc);
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in '" + Twine(IDVal) + "' directive");
return false;
}
static bool parseHexOcta(AsmParser &Asm, uint64_t &hi, uint64_t &lo) {
if (Asm.getTok().isNot(AsmToken::Integer) &&
Asm.getTok().isNot(AsmToken::BigNum))
return Asm.TokError("unknown token in expression");
SMLoc ExprLoc = Asm.getTok().getLoc();
APInt IntValue = Asm.getTok().getAPIntVal();
Asm.Lex();
if (!IntValue.isIntN(128))
return Asm.Error(ExprLoc, "out of range literal value");
if (!IntValue.isIntN(64)) {
hi = IntValue.getHiBits(IntValue.getBitWidth() - 64).getZExtValue();
lo = IntValue.getLoBits(64).getZExtValue();
} else {
hi = 0;
lo = IntValue.getZExtValue();
}
return false;
}
/// ParseDirectiveOctaValue
/// ::= .octa [ hexconstant (, hexconstant)* ]
bool AsmParser::parseDirectiveOctaValue(StringRef IDVal) {
auto parseOp = [&]() -> bool {
if (checkForValidSection())
return true;
uint64_t hi, lo;
if (parseHexOcta(*this, hi, lo))
return true;
if (MAI.isLittleEndian()) {
getStreamer().EmitIntValue(lo, 8);
getStreamer().EmitIntValue(hi, 8);
} else {
getStreamer().EmitIntValue(hi, 8);
getStreamer().EmitIntValue(lo, 8);
}
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in '" + Twine(IDVal) + "' directive");
return false;
}
bool AsmParser::parseRealValue(const fltSemantics &Semantics, APInt &Res) {
// We don't truly support arithmetic on floating point expressions, so we
// have to manually parse unary prefixes.
bool IsNeg = false;
if (getLexer().is(AsmToken::Minus)) {
Lexer.Lex();
IsNeg = true;
} else if (getLexer().is(AsmToken::Plus))
Lexer.Lex();
if (Lexer.is(AsmToken::Error))
return TokError(Lexer.getErr());
if (Lexer.isNot(AsmToken::Integer) && Lexer.isNot(AsmToken::Real) &&
Lexer.isNot(AsmToken::Identifier))
return TokError("unexpected token in directive");
// Convert to an APFloat.
APFloat Value(Semantics);
StringRef IDVal = getTok().getString();
if (getLexer().is(AsmToken::Identifier)) {
if (!IDVal.compare_lower("infinity") || !IDVal.compare_lower("inf"))
Value = APFloat::getInf(Semantics);
else if (!IDVal.compare_lower("nan"))
Value = APFloat::getNaN(Semantics, false, ~0);
else
return TokError("invalid floating point literal");
} else if (Value.convertFromString(IDVal, APFloat::rmNearestTiesToEven) ==
APFloat::opInvalidOp)
return TokError("invalid floating point literal");
if (IsNeg)
Value.changeSign();
// Consume the numeric token.
Lex();
Res = Value.bitcastToAPInt();
return false;
}
/// parseDirectiveRealValue
/// ::= (.single | .double) [ expression (, expression)* ]
bool AsmParser::parseDirectiveRealValue(StringRef IDVal,
const fltSemantics &Semantics) {
auto parseOp = [&]() -> bool {
APInt AsInt;
if (checkForValidSection() || parseRealValue(Semantics, AsInt))
return true;
getStreamer().EmitIntValue(AsInt.getLimitedValue(),
AsInt.getBitWidth() / 8);
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in '" + Twine(IDVal) + "' directive");
return false;
}
/// parseDirectiveZero
/// ::= .zero expression
bool AsmParser::parseDirectiveZero() {
SMLoc NumBytesLoc = Lexer.getLoc();
const MCExpr *NumBytes;
if (checkForValidSection() || parseExpression(NumBytes))
return true;
int64_t Val = 0;
if (getLexer().is(AsmToken::Comma)) {
Lex();
if (parseAbsoluteExpression(Val))
return true;
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.zero' directive"))
return true;
getStreamer().emitFill(*NumBytes, Val, NumBytesLoc);
return false;
}
/// parseDirectiveFill
/// ::= .fill expression [ , expression [ , expression ] ]
bool AsmParser::parseDirectiveFill() {
SMLoc NumValuesLoc = Lexer.getLoc();
const MCExpr *NumValues;
if (checkForValidSection() || parseExpression(NumValues))
return true;
int64_t FillSize = 1;
int64_t FillExpr = 0;
SMLoc SizeLoc, ExprLoc;
if (parseOptionalToken(AsmToken::Comma)) {
SizeLoc = getTok().getLoc();
if (parseAbsoluteExpression(FillSize))
return true;
if (parseOptionalToken(AsmToken::Comma)) {
ExprLoc = getTok().getLoc();
if (parseAbsoluteExpression(FillExpr))
return true;
}
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.fill' directive"))
return true;
if (FillSize < 0) {
Warning(SizeLoc, "'.fill' directive with negative size has no effect");
return false;
}
if (FillSize > 8) {
Warning(SizeLoc, "'.fill' directive with size greater than 8 has been truncated to 8");
FillSize = 8;
}
if (!isUInt<32>(FillExpr) && FillSize > 4)
Warning(ExprLoc, "'.fill' directive pattern has been truncated to 32-bits");
getStreamer().emitFill(*NumValues, FillSize, FillExpr, NumValuesLoc);
return false;
}
/// parseDirectiveOrg
/// ::= .org expression [ , expression ]
bool AsmParser::parseDirectiveOrg() {
const MCExpr *Offset;
SMLoc OffsetLoc = Lexer.getLoc();
if (checkForValidSection() || parseExpression(Offset))
return true;
// Parse optional fill expression.
int64_t FillExpr = 0;
if (parseOptionalToken(AsmToken::Comma))
if (parseAbsoluteExpression(FillExpr))
return addErrorSuffix(" in '.org' directive");
if (parseToken(AsmToken::EndOfStatement))
return addErrorSuffix(" in '.org' directive");
getStreamer().emitValueToOffset(Offset, FillExpr, OffsetLoc);
return false;
}
/// parseDirectiveAlign
/// ::= {.align, ...} expression [ , expression [ , expression ]]
bool AsmParser::parseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
SMLoc AlignmentLoc = getLexer().getLoc();
int64_t Alignment;
SMLoc MaxBytesLoc;
bool HasFillExpr = false;
int64_t FillExpr = 0;
int64_t MaxBytesToFill = 0;
auto parseAlign = [&]() -> bool {
if (parseAbsoluteExpression(Alignment))
return true;
if (parseOptionalToken(AsmToken::Comma)) {
// The fill expression can be omitted while specifying a maximum number of
// alignment bytes, e.g:
// .align 3,,4
if (getTok().isNot(AsmToken::Comma)) {
HasFillExpr = true;
if (parseAbsoluteExpression(FillExpr))
return true;
}
if (parseOptionalToken(AsmToken::Comma))
if (parseTokenLoc(MaxBytesLoc) ||
parseAbsoluteExpression(MaxBytesToFill))
return true;
}
return parseToken(AsmToken::EndOfStatement);
};
if (checkForValidSection())
return addErrorSuffix(" in directive");
// Ignore empty '.p2align' directives for GNU-as compatibility
if (IsPow2 && (ValueSize == 1) && getTok().is(AsmToken::EndOfStatement)) {
Warning(AlignmentLoc, "p2align directive with no operand(s) is ignored");
return parseToken(AsmToken::EndOfStatement);
}
if (parseAlign())
return addErrorSuffix(" in directive");
// Always emit an alignment here even if we thrown an error.
bool ReturnVal = false;
// Compute alignment in bytes.
if (IsPow2) {
// FIXME: Diagnose overflow.
if (Alignment >= 32) {
ReturnVal |= Error(AlignmentLoc, "invalid alignment value");
Alignment = 31;
}
Alignment = 1ULL << Alignment;
} else {
// Reject alignments that aren't either a power of two or zero,
// for gas compatibility. Alignment of zero is silently rounded
// up to one.
if (Alignment == 0)
Alignment = 1;
if (!isPowerOf2_64(Alignment))
ReturnVal |= Error(AlignmentLoc, "alignment must be a power of 2");
}
// Diagnose non-sensical max bytes to align.
if (MaxBytesLoc.isValid()) {
if (MaxBytesToFill < 1) {
ReturnVal |= Error(MaxBytesLoc,
"alignment directive can never be satisfied in this "
"many bytes, ignoring maximum bytes expression");
MaxBytesToFill = 0;
}
if (MaxBytesToFill >= Alignment) {
Warning(MaxBytesLoc, "maximum bytes expression exceeds alignment and "
"has no effect");
MaxBytesToFill = 0;
}
}
// Check whether we should use optimal code alignment for this .align
// directive.
const MCSection *Section = getStreamer().getCurrentSectionOnly();
assert(Section && "must have section to emit alignment");
bool UseCodeAlign = Section->UseCodeAlign();
if ((!HasFillExpr || Lexer.getMAI().getTextAlignFillValue() == FillExpr) &&
ValueSize == 1 && UseCodeAlign) {
getStreamer().EmitCodeAlignment(Alignment, MaxBytesToFill);
} else {
// FIXME: Target specific behavior about how the "extra" bytes are filled.
getStreamer().EmitValueToAlignment(Alignment, FillExpr, ValueSize,
MaxBytesToFill);
}
return ReturnVal;
}
/// parseDirectiveFile
/// ::= .file filename
/// ::= .file number [directory] filename [md5 checksum] [source source-text]
bool AsmParser::parseDirectiveFile(SMLoc DirectiveLoc) {
// FIXME: I'm not sure what this is.
int64_t FileNumber = -1;
if (getLexer().is(AsmToken::Integer)) {
FileNumber = getTok().getIntVal();
Lex();
if (FileNumber < 0)
return TokError("negative file number");
}
std::string Path;
// Usually the directory and filename together, otherwise just the directory.
// Allow the strings to have escaped octal character sequence.
if (check(getTok().isNot(AsmToken::String),
"unexpected token in '.file' directive") ||
parseEscapedString(Path))
return true;
StringRef Directory;
StringRef Filename;
std::string FilenameData;
if (getLexer().is(AsmToken::String)) {
if (check(FileNumber == -1,
"explicit path specified, but no file number") ||
parseEscapedString(FilenameData))
return true;
Filename = FilenameData;
Directory = Path;
} else {
Filename = Path;
}
uint64_t MD5Hi, MD5Lo;
bool HasMD5 = false;
Optional<StringRef> Source;
bool HasSource = false;
std::string SourceString;
while (!parseOptionalToken(AsmToken::EndOfStatement)) {
StringRef Keyword;
if (check(getTok().isNot(AsmToken::Identifier),
"unexpected token in '.file' directive") ||
parseIdentifier(Keyword))
return true;
if (Keyword == "md5") {
HasMD5 = true;
if (check(FileNumber == -1,
"MD5 checksum specified, but no file number") ||
parseHexOcta(*this, MD5Hi, MD5Lo))
return true;
} else if (Keyword == "source") {
HasSource = true;
if (check(FileNumber == -1,
"source specified, but no file number") ||
check(getTok().isNot(AsmToken::String),
"unexpected token in '.file' directive") ||
parseEscapedString(SourceString))
return true;
} else {
return TokError("unexpected token in '.file' directive");
}
}
if (FileNumber == -1) {
- if (!getContext().getAsmInfo()->hasSingleParameterDotFile())
- return Error(DirectiveLoc,
- "target does not support '.file' without a number");
- getStreamer().EmitFileDirective(Filename);
+ // Ignore the directive if there is no number and the target doesn't support
+ // numberless .file directives. This allows some portability of assembler
+ // between different object file formats.
+ if (getContext().getAsmInfo()->hasSingleParameterDotFile())
+ getStreamer().EmitFileDirective(Filename);
} else {
// In case there is a -g option as well as debug info from directive .file,
// we turn off the -g option, directly use the existing debug info instead.
// Also reset any implicit ".file 0" for the assembler source.
if (Ctx.getGenDwarfForAssembly()) {
Ctx.getMCDwarfLineTable(0).resetRootFile();
Ctx.setGenDwarfForAssembly(false);
}
MD5::MD5Result *CKMem = nullptr;
if (HasMD5) {
CKMem = (MD5::MD5Result *)Ctx.allocate(sizeof(MD5::MD5Result), 1);
for (unsigned i = 0; i != 8; ++i) {
CKMem->Bytes[i] = uint8_t(MD5Hi >> ((7 - i) * 8));
CKMem->Bytes[i + 8] = uint8_t(MD5Lo >> ((7 - i) * 8));
}
}
if (HasSource) {
char *SourceBuf = static_cast<char *>(Ctx.allocate(SourceString.size()));
memcpy(SourceBuf, SourceString.data(), SourceString.size());
Source = StringRef(SourceBuf, SourceString.size());
}
if (FileNumber == 0) {
if (Ctx.getDwarfVersion() < 5)
return Warning(DirectiveLoc, "file 0 not supported prior to DWARF-5");
getStreamer().emitDwarfFile0Directive(Directory, Filename, CKMem, Source);
} else {
Expected<unsigned> FileNumOrErr = getStreamer().tryEmitDwarfFileDirective(
FileNumber, Directory, Filename, CKMem, Source);
if (!FileNumOrErr)
return Error(DirectiveLoc, toString(FileNumOrErr.takeError()));
FileNumber = FileNumOrErr.get();
}
// Alert the user if there are some .file directives with MD5 and some not.
// But only do that once.
if (!ReportedInconsistentMD5 && !Ctx.isDwarfMD5UsageConsistent(0)) {
ReportedInconsistentMD5 = true;
return Warning(DirectiveLoc, "inconsistent use of MD5 checksums");
}
}
return false;
}
/// parseDirectiveLine
/// ::= .line [number]
bool AsmParser::parseDirectiveLine() {
int64_t LineNumber;
if (getLexer().is(AsmToken::Integer)) {
if (parseIntToken(LineNumber, "unexpected token in '.line' directive"))
return true;
(void)LineNumber;
// FIXME: Do something with the .line.
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.line' directive"))
return true;
return false;
}
/// parseDirectiveLoc
/// ::= .loc FileNumber [LineNumber] [ColumnPos] [basic_block] [prologue_end]
/// [epilogue_begin] [is_stmt VALUE] [isa VALUE]
/// The first number is a file number, must have been previously assigned with
/// a .file directive, the second number is the line number and optionally the
/// third number is a column position (zero if not specified). The remaining
/// optional items are .loc sub-directives.
bool AsmParser::parseDirectiveLoc() {
int64_t FileNumber = 0, LineNumber = 0;
SMLoc Loc = getTok().getLoc();
if (parseIntToken(FileNumber, "unexpected token in '.loc' directive") ||
check(FileNumber < 1 && Ctx.getDwarfVersion() < 5, Loc,
"file number less than one in '.loc' directive") ||
check(!getContext().isValidDwarfFileNumber(FileNumber), Loc,
"unassigned file number in '.loc' directive"))
return true;
// optional
if (getLexer().is(AsmToken::Integer)) {
LineNumber = getTok().getIntVal();
if (LineNumber < 0)
return TokError("line number less than zero in '.loc' directive");
Lex();
}
int64_t ColumnPos = 0;
if (getLexer().is(AsmToken::Integer)) {
ColumnPos = getTok().getIntVal();
if (ColumnPos < 0)
return TokError("column position less than zero in '.loc' directive");
Lex();
}
unsigned Flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0;
unsigned Isa = 0;
int64_t Discriminator = 0;
auto parseLocOp = [&]() -> bool {
StringRef Name;
SMLoc Loc = getTok().getLoc();
if (parseIdentifier(Name))
return TokError("unexpected token in '.loc' directive");
if (Name == "basic_block")
Flags |= DWARF2_FLAG_BASIC_BLOCK;
else if (Name == "prologue_end")
Flags |= DWARF2_FLAG_PROLOGUE_END;
else if (Name == "epilogue_begin")
Flags |= DWARF2_FLAG_EPILOGUE_BEGIN;
else if (Name == "is_stmt") {
Loc = getTok().getLoc();
const MCExpr *Value;
if (parseExpression(Value))
return true;
// The expression must be the constant 0 or 1.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value)) {
int Value = MCE->getValue();
if (Value == 0)
Flags &= ~DWARF2_FLAG_IS_STMT;
else if (Value == 1)
Flags |= DWARF2_FLAG_IS_STMT;
else
return Error(Loc, "is_stmt value not 0 or 1");
} else {
return Error(Loc, "is_stmt value not the constant value of 0 or 1");
}
} else if (Name == "isa") {
Loc = getTok().getLoc();
const MCExpr *Value;
if (parseExpression(Value))
return true;
// The expression must be a constant greater or equal to 0.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value)) {
int Value = MCE->getValue();
if (Value < 0)
return Error(Loc, "isa number less than zero");
Isa = Value;
} else {
return Error(Loc, "isa number not a constant value");
}
} else if (Name == "discriminator") {
if (parseAbsoluteExpression(Discriminator))
return true;
} else {
return Error(Loc, "unknown sub-directive in '.loc' directive");
}
return false;
};
if (parseMany(parseLocOp, false /*hasComma*/))
return true;
getStreamer().EmitDwarfLocDirective(FileNumber, LineNumber, ColumnPos, Flags,
Isa, Discriminator, StringRef());
return false;
}
/// parseDirectiveStabs
/// ::= .stabs string, number, number, number
bool AsmParser::parseDirectiveStabs() {
return TokError("unsupported directive '.stabs'");
}
/// parseDirectiveCVFile
/// ::= .cv_file number filename [checksum] [checksumkind]
bool AsmParser::parseDirectiveCVFile() {
SMLoc FileNumberLoc = getTok().getLoc();
int64_t FileNumber;
std::string Filename;
std::string Checksum;
int64_t ChecksumKind = 0;
if (parseIntToken(FileNumber,
"expected file number in '.cv_file' directive") ||
check(FileNumber < 1, FileNumberLoc, "file number less than one") ||
check(getTok().isNot(AsmToken::String),
"unexpected token in '.cv_file' directive") ||
parseEscapedString(Filename))
return true;
if (!parseOptionalToken(AsmToken::EndOfStatement)) {
if (check(getTok().isNot(AsmToken::String),
"unexpected token in '.cv_file' directive") ||
parseEscapedString(Checksum) ||
parseIntToken(ChecksumKind,
"expected checksum kind in '.cv_file' directive") ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.cv_file' directive"))
return true;
}
Checksum = fromHex(Checksum);
void *CKMem = Ctx.allocate(Checksum.size(), 1);
memcpy(CKMem, Checksum.data(), Checksum.size());
ArrayRef<uint8_t> ChecksumAsBytes(reinterpret_cast<const uint8_t *>(CKMem),
Checksum.size());
if (!getStreamer().EmitCVFileDirective(FileNumber, Filename, ChecksumAsBytes,
static_cast<uint8_t>(ChecksumKind)))
return Error(FileNumberLoc, "file number already allocated");
return false;
}
bool AsmParser::parseCVFunctionId(int64_t &FunctionId,
StringRef DirectiveName) {
SMLoc Loc;
return parseTokenLoc(Loc) ||
parseIntToken(FunctionId, "expected function id in '" + DirectiveName +
"' directive") ||
check(FunctionId < 0 || FunctionId >= UINT_MAX, Loc,
"expected function id within range [0, UINT_MAX)");
}
bool AsmParser::parseCVFileId(int64_t &FileNumber, StringRef DirectiveName) {
SMLoc Loc;
return parseTokenLoc(Loc) ||
parseIntToken(FileNumber, "expected integer in '" + DirectiveName +
"' directive") ||
check(FileNumber < 1, Loc, "file number less than one in '" +
DirectiveName + "' directive") ||
check(!getCVContext().isValidFileNumber(FileNumber), Loc,
"unassigned file number in '" + DirectiveName + "' directive");
}
/// parseDirectiveCVFuncId
/// ::= .cv_func_id FunctionId
///
/// Introduces a function ID that can be used with .cv_loc.
bool AsmParser::parseDirectiveCVFuncId() {
SMLoc FunctionIdLoc = getTok().getLoc();
int64_t FunctionId;
if (parseCVFunctionId(FunctionId, ".cv_func_id") ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.cv_func_id' directive"))
return true;
if (!getStreamer().EmitCVFuncIdDirective(FunctionId))
return Error(FunctionIdLoc, "function id already allocated");
return false;
}
/// parseDirectiveCVInlineSiteId
/// ::= .cv_inline_site_id FunctionId
/// "within" IAFunc
/// "inlined_at" IAFile IALine [IACol]
///
/// Introduces a function ID that can be used with .cv_loc. Includes "inlined
/// at" source location information for use in the line table of the caller,
/// whether the caller is a real function or another inlined call site.
bool AsmParser::parseDirectiveCVInlineSiteId() {
SMLoc FunctionIdLoc = getTok().getLoc();
int64_t FunctionId;
int64_t IAFunc;
int64_t IAFile;
int64_t IALine;
int64_t IACol = 0;
// FunctionId
if (parseCVFunctionId(FunctionId, ".cv_inline_site_id"))
return true;
// "within"
if (check((getLexer().isNot(AsmToken::Identifier) ||
getTok().getIdentifier() != "within"),
"expected 'within' identifier in '.cv_inline_site_id' directive"))
return true;
Lex();
// IAFunc
if (parseCVFunctionId(IAFunc, ".cv_inline_site_id"))
return true;
// "inlined_at"
if (check((getLexer().isNot(AsmToken::Identifier) ||
getTok().getIdentifier() != "inlined_at"),
"expected 'inlined_at' identifier in '.cv_inline_site_id' "
"directive") )
return true;
Lex();
// IAFile IALine
if (parseCVFileId(IAFile, ".cv_inline_site_id") ||
parseIntToken(IALine, "expected line number after 'inlined_at'"))
return true;
// [IACol]
if (getLexer().is(AsmToken::Integer)) {
IACol = getTok().getIntVal();
Lex();
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.cv_inline_site_id' directive"))
return true;
if (!getStreamer().EmitCVInlineSiteIdDirective(FunctionId, IAFunc, IAFile,
IALine, IACol, FunctionIdLoc))
return Error(FunctionIdLoc, "function id already allocated");
return false;
}
/// parseDirectiveCVLoc
/// ::= .cv_loc FunctionId FileNumber [LineNumber] [ColumnPos] [prologue_end]
/// [is_stmt VALUE]
/// The first number is a file number, must have been previously assigned with
/// a .file directive, the second number is the line number and optionally the
/// third number is a column position (zero if not specified). The remaining
/// optional items are .loc sub-directives.
bool AsmParser::parseDirectiveCVLoc() {
SMLoc DirectiveLoc = getTok().getLoc();
int64_t FunctionId, FileNumber;
if (parseCVFunctionId(FunctionId, ".cv_loc") ||
parseCVFileId(FileNumber, ".cv_loc"))
return true;
int64_t LineNumber = 0;
if (getLexer().is(AsmToken::Integer)) {
LineNumber = getTok().getIntVal();
if (LineNumber < 0)
return TokError("line number less than zero in '.cv_loc' directive");
Lex();
}
int64_t ColumnPos = 0;
if (getLexer().is(AsmToken::Integer)) {
ColumnPos = getTok().getIntVal();
if (ColumnPos < 0)
return TokError("column position less than zero in '.cv_loc' directive");
Lex();
}
bool PrologueEnd = false;
uint64_t IsStmt = 0;
auto parseOp = [&]() -> bool {
StringRef Name;
SMLoc Loc = getTok().getLoc();
if (parseIdentifier(Name))
return TokError("unexpected token in '.cv_loc' directive");
if (Name == "prologue_end")
PrologueEnd = true;
else if (Name == "is_stmt") {
Loc = getTok().getLoc();
const MCExpr *Value;
if (parseExpression(Value))
return true;
// The expression must be the constant 0 or 1.
IsStmt = ~0ULL;
if (const auto *MCE = dyn_cast<MCConstantExpr>(Value))
IsStmt = MCE->getValue();
if (IsStmt > 1)
return Error(Loc, "is_stmt value not 0 or 1");
} else {
return Error(Loc, "unknown sub-directive in '.cv_loc' directive");
}
return false;
};
if (parseMany(parseOp, false /*hasComma*/))
return true;
getStreamer().EmitCVLocDirective(FunctionId, FileNumber, LineNumber,
ColumnPos, PrologueEnd, IsStmt, StringRef(),
DirectiveLoc);
return false;
}
/// parseDirectiveCVLinetable
/// ::= .cv_linetable FunctionId, FnStart, FnEnd
bool AsmParser::parseDirectiveCVLinetable() {
int64_t FunctionId;
StringRef FnStartName, FnEndName;
SMLoc Loc = getTok().getLoc();
if (parseCVFunctionId(FunctionId, ".cv_linetable") ||
parseToken(AsmToken::Comma,
"unexpected token in '.cv_linetable' directive") ||
parseTokenLoc(Loc) || check(parseIdentifier(FnStartName), Loc,
"expected identifier in directive") ||
parseToken(AsmToken::Comma,
"unexpected token in '.cv_linetable' directive") ||
parseTokenLoc(Loc) || check(parseIdentifier(FnEndName), Loc,
"expected identifier in directive"))
return true;
MCSymbol *FnStartSym = getContext().getOrCreateSymbol(FnStartName);
MCSymbol *FnEndSym = getContext().getOrCreateSymbol(FnEndName);
getStreamer().EmitCVLinetableDirective(FunctionId, FnStartSym, FnEndSym);
return false;
}
/// parseDirectiveCVInlineLinetable
/// ::= .cv_inline_linetable PrimaryFunctionId FileId LineNum FnStart FnEnd
bool AsmParser::parseDirectiveCVInlineLinetable() {
int64_t PrimaryFunctionId, SourceFileId, SourceLineNum;
StringRef FnStartName, FnEndName;
SMLoc Loc = getTok().getLoc();
if (parseCVFunctionId(PrimaryFunctionId, ".cv_inline_linetable") ||
parseTokenLoc(Loc) ||
parseIntToken(
SourceFileId,
"expected SourceField in '.cv_inline_linetable' directive") ||
check(SourceFileId <= 0, Loc,
"File id less than zero in '.cv_inline_linetable' directive") ||
parseTokenLoc(Loc) ||
parseIntToken(
SourceLineNum,
"expected SourceLineNum in '.cv_inline_linetable' directive") ||
check(SourceLineNum < 0, Loc,
"Line number less than zero in '.cv_inline_linetable' directive") ||
parseTokenLoc(Loc) || check(parseIdentifier(FnStartName), Loc,
"expected identifier in directive") ||
parseTokenLoc(Loc) || check(parseIdentifier(FnEndName), Loc,
"expected identifier in directive"))
return true;
if (parseToken(AsmToken::EndOfStatement, "Expected End of Statement"))
return true;
MCSymbol *FnStartSym = getContext().getOrCreateSymbol(FnStartName);
MCSymbol *FnEndSym = getContext().getOrCreateSymbol(FnEndName);
getStreamer().EmitCVInlineLinetableDirective(PrimaryFunctionId, SourceFileId,
SourceLineNum, FnStartSym,
FnEndSym);
return false;
}
/// parseDirectiveCVDefRange
/// ::= .cv_def_range RangeStart RangeEnd (GapStart GapEnd)*, bytes*
bool AsmParser::parseDirectiveCVDefRange() {
SMLoc Loc;
std::vector<std::pair<const MCSymbol *, const MCSymbol *>> Ranges;
while (getLexer().is(AsmToken::Identifier)) {
Loc = getLexer().getLoc();
StringRef GapStartName;
if (parseIdentifier(GapStartName))
return Error(Loc, "expected identifier in directive");
MCSymbol *GapStartSym = getContext().getOrCreateSymbol(GapStartName);
Loc = getLexer().getLoc();
StringRef GapEndName;
if (parseIdentifier(GapEndName))
return Error(Loc, "expected identifier in directive");
MCSymbol *GapEndSym = getContext().getOrCreateSymbol(GapEndName);
Ranges.push_back({GapStartSym, GapEndSym});
}
std::string FixedSizePortion;
if (parseToken(AsmToken::Comma, "unexpected token in directive") ||
parseEscapedString(FixedSizePortion))
return true;
getStreamer().EmitCVDefRangeDirective(Ranges, FixedSizePortion);
return false;
}
/// parseDirectiveCVString
/// ::= .cv_stringtable "string"
bool AsmParser::parseDirectiveCVString() {
std::string Data;
if (checkForValidSection() || parseEscapedString(Data))
return addErrorSuffix(" in '.cv_string' directive");
// Put the string in the table and emit the offset.
std::pair<StringRef, unsigned> Insertion =
getCVContext().addToStringTable(Data);
getStreamer().EmitIntValue(Insertion.second, 4);
return false;
}
/// parseDirectiveCVStringTable
/// ::= .cv_stringtable
bool AsmParser::parseDirectiveCVStringTable() {
getStreamer().EmitCVStringTableDirective();
return false;
}
/// parseDirectiveCVFileChecksums
/// ::= .cv_filechecksums
bool AsmParser::parseDirectiveCVFileChecksums() {
getStreamer().EmitCVFileChecksumsDirective();
return false;
}
/// parseDirectiveCVFileChecksumOffset
/// ::= .cv_filechecksumoffset fileno
bool AsmParser::parseDirectiveCVFileChecksumOffset() {
int64_t FileNo;
if (parseIntToken(FileNo, "expected identifier in directive"))
return true;
if (parseToken(AsmToken::EndOfStatement, "Expected End of Statement"))
return true;
getStreamer().EmitCVFileChecksumOffsetDirective(FileNo);
return false;
}
/// parseDirectiveCVFPOData
/// ::= .cv_fpo_data procsym
bool AsmParser::parseDirectiveCVFPOData() {
SMLoc DirLoc = getLexer().getLoc();
StringRef ProcName;
if (parseIdentifier(ProcName))
return TokError("expected symbol name");
if (parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_data' directive");
MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
getStreamer().EmitCVFPOData(ProcSym, DirLoc);
return false;
}
/// parseDirectiveCFISections
/// ::= .cfi_sections section [, section]
bool AsmParser::parseDirectiveCFISections() {
StringRef Name;
bool EH = false;
bool Debug = false;
if (parseIdentifier(Name))
return TokError("Expected an identifier");
if (Name == ".eh_frame")
EH = true;
else if (Name == ".debug_frame")
Debug = true;
if (getLexer().is(AsmToken::Comma)) {
Lex();
if (parseIdentifier(Name))
return TokError("Expected an identifier");
if (Name == ".eh_frame")
EH = true;
else if (Name == ".debug_frame")
Debug = true;
}
getStreamer().EmitCFISections(EH, Debug);
return false;
}
/// parseDirectiveCFIStartProc
/// ::= .cfi_startproc [simple]
bool AsmParser::parseDirectiveCFIStartProc() {
StringRef Simple;
if (!parseOptionalToken(AsmToken::EndOfStatement)) {
if (check(parseIdentifier(Simple) || Simple != "simple",
"unexpected token") ||
parseToken(AsmToken::EndOfStatement))
return addErrorSuffix(" in '.cfi_startproc' directive");
}
// TODO(kristina): Deal with a corner case of incorrect diagnostic context
// being produced if this directive is emitted as part of preprocessor macro
// expansion which can *ONLY* happen if Clang's cc1as is the API consumer.
// Tools like llvm-mc on the other hand are not affected by it, and report
// correct context information.
getStreamer().EmitCFIStartProc(!Simple.empty(), Lexer.getLoc());
return false;
}
/// parseDirectiveCFIEndProc
/// ::= .cfi_endproc
bool AsmParser::parseDirectiveCFIEndProc() {
getStreamer().EmitCFIEndProc();
return false;
}
/// parse register name or number.
bool AsmParser::parseRegisterOrRegisterNumber(int64_t &Register,
SMLoc DirectiveLoc) {
unsigned RegNo;
if (getLexer().isNot(AsmToken::Integer)) {
if (getTargetParser().ParseRegister(RegNo, DirectiveLoc, DirectiveLoc))
return true;
Register = getContext().getRegisterInfo()->getDwarfRegNum(RegNo, true);
} else
return parseAbsoluteExpression(Register);
return false;
}
/// parseDirectiveCFIDefCfa
/// ::= .cfi_def_cfa register, offset
bool AsmParser::parseDirectiveCFIDefCfa(SMLoc DirectiveLoc) {
int64_t Register = 0, Offset = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc) ||
parseToken(AsmToken::Comma, "unexpected token in directive") ||
parseAbsoluteExpression(Offset))
return true;
getStreamer().EmitCFIDefCfa(Register, Offset);
return false;
}
/// parseDirectiveCFIDefCfaOffset
/// ::= .cfi_def_cfa_offset offset
bool AsmParser::parseDirectiveCFIDefCfaOffset() {
int64_t Offset = 0;
if (parseAbsoluteExpression(Offset))
return true;
getStreamer().EmitCFIDefCfaOffset(Offset);
return false;
}
/// parseDirectiveCFIRegister
/// ::= .cfi_register register, register
bool AsmParser::parseDirectiveCFIRegister(SMLoc DirectiveLoc) {
int64_t Register1 = 0, Register2 = 0;
if (parseRegisterOrRegisterNumber(Register1, DirectiveLoc) ||
parseToken(AsmToken::Comma, "unexpected token in directive") ||
parseRegisterOrRegisterNumber(Register2, DirectiveLoc))
return true;
getStreamer().EmitCFIRegister(Register1, Register2);
return false;
}
/// parseDirectiveCFIWindowSave
/// ::= .cfi_window_save
bool AsmParser::parseDirectiveCFIWindowSave() {
getStreamer().EmitCFIWindowSave();
return false;
}
/// parseDirectiveCFIAdjustCfaOffset
/// ::= .cfi_adjust_cfa_offset adjustment
bool AsmParser::parseDirectiveCFIAdjustCfaOffset() {
int64_t Adjustment = 0;
if (parseAbsoluteExpression(Adjustment))
return true;
getStreamer().EmitCFIAdjustCfaOffset(Adjustment);
return false;
}
/// parseDirectiveCFIDefCfaRegister
/// ::= .cfi_def_cfa_register register
bool AsmParser::parseDirectiveCFIDefCfaRegister(SMLoc DirectiveLoc) {
int64_t Register = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
getStreamer().EmitCFIDefCfaRegister(Register);
return false;
}
/// parseDirectiveCFIOffset
/// ::= .cfi_offset register, offset
bool AsmParser::parseDirectiveCFIOffset(SMLoc DirectiveLoc) {
int64_t Register = 0;
int64_t Offset = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc) ||
parseToken(AsmToken::Comma, "unexpected token in directive") ||
parseAbsoluteExpression(Offset))
return true;
getStreamer().EmitCFIOffset(Register, Offset);
return false;
}
/// parseDirectiveCFIRelOffset
/// ::= .cfi_rel_offset register, offset
bool AsmParser::parseDirectiveCFIRelOffset(SMLoc DirectiveLoc) {
int64_t Register = 0, Offset = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc) ||
parseToken(AsmToken::Comma, "unexpected token in directive") ||
parseAbsoluteExpression(Offset))
return true;
getStreamer().EmitCFIRelOffset(Register, Offset);
return false;
}
static bool isValidEncoding(int64_t Encoding) {
if (Encoding & ~0xff)
return false;
if (Encoding == dwarf::DW_EH_PE_omit)
return true;
const unsigned Format = Encoding & 0xf;
if (Format != dwarf::DW_EH_PE_absptr && Format != dwarf::DW_EH_PE_udata2 &&
Format != dwarf::DW_EH_PE_udata4 && Format != dwarf::DW_EH_PE_udata8 &&
Format != dwarf::DW_EH_PE_sdata2 && Format != dwarf::DW_EH_PE_sdata4 &&
Format != dwarf::DW_EH_PE_sdata8 && Format != dwarf::DW_EH_PE_signed)
return false;
const unsigned Application = Encoding & 0x70;
if (Application != dwarf::DW_EH_PE_absptr &&
Application != dwarf::DW_EH_PE_pcrel)
return false;
return true;
}
/// parseDirectiveCFIPersonalityOrLsda
/// IsPersonality true for cfi_personality, false for cfi_lsda
/// ::= .cfi_personality encoding, [symbol_name]
/// ::= .cfi_lsda encoding, [symbol_name]
bool AsmParser::parseDirectiveCFIPersonalityOrLsda(bool IsPersonality) {
int64_t Encoding = 0;
if (parseAbsoluteExpression(Encoding))
return true;
if (Encoding == dwarf::DW_EH_PE_omit)
return false;
StringRef Name;
if (check(!isValidEncoding(Encoding), "unsupported encoding.") ||
parseToken(AsmToken::Comma, "unexpected token in directive") ||
check(parseIdentifier(Name), "expected identifier in directive"))
return true;
MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
if (IsPersonality)
getStreamer().EmitCFIPersonality(Sym, Encoding);
else
getStreamer().EmitCFILsda(Sym, Encoding);
return false;
}
/// parseDirectiveCFIRememberState
/// ::= .cfi_remember_state
bool AsmParser::parseDirectiveCFIRememberState() {
getStreamer().EmitCFIRememberState();
return false;
}
/// parseDirectiveCFIRestoreState
/// ::= .cfi_remember_state
bool AsmParser::parseDirectiveCFIRestoreState() {
getStreamer().EmitCFIRestoreState();
return false;
}
/// parseDirectiveCFISameValue
/// ::= .cfi_same_value register
bool AsmParser::parseDirectiveCFISameValue(SMLoc DirectiveLoc) {
int64_t Register = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
getStreamer().EmitCFISameValue(Register);
return false;
}
/// parseDirectiveCFIRestore
/// ::= .cfi_restore register
bool AsmParser::parseDirectiveCFIRestore(SMLoc DirectiveLoc) {
int64_t Register = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
getStreamer().EmitCFIRestore(Register);
return false;
}
/// parseDirectiveCFIEscape
/// ::= .cfi_escape expression[,...]
bool AsmParser::parseDirectiveCFIEscape() {
std::string Values;
int64_t CurrValue;
if (parseAbsoluteExpression(CurrValue))
return true;
Values.push_back((uint8_t)CurrValue);
while (getLexer().is(AsmToken::Comma)) {
Lex();
if (parseAbsoluteExpression(CurrValue))
return true;
Values.push_back((uint8_t)CurrValue);
}
getStreamer().EmitCFIEscape(Values);
return false;
}
/// parseDirectiveCFIReturnColumn
/// ::= .cfi_return_column register
bool AsmParser::parseDirectiveCFIReturnColumn(SMLoc DirectiveLoc) {
int64_t Register = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
getStreamer().EmitCFIReturnColumn(Register);
return false;
}
/// parseDirectiveCFISignalFrame
/// ::= .cfi_signal_frame
bool AsmParser::parseDirectiveCFISignalFrame() {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.cfi_signal_frame'"))
return true;
getStreamer().EmitCFISignalFrame();
return false;
}
/// parseDirectiveCFIUndefined
/// ::= .cfi_undefined register
bool AsmParser::parseDirectiveCFIUndefined(SMLoc DirectiveLoc) {
int64_t Register = 0;
if (parseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
getStreamer().EmitCFIUndefined(Register);
return false;
}
/// parseDirectiveAltmacro
/// ::= .altmacro
/// ::= .noaltmacro
bool AsmParser::parseDirectiveAltmacro(StringRef Directive) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '" + Directive + "' directive");
AltMacroMode = (Directive == ".altmacro");
return false;
}
/// parseDirectiveMacrosOnOff
/// ::= .macros_on
/// ::= .macros_off
bool AsmParser::parseDirectiveMacrosOnOff(StringRef Directive) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Directive + "' directive"))
return true;
setMacrosEnabled(Directive == ".macros_on");
return false;
}
/// parseDirectiveMacro
/// ::= .macro name[,] [parameters]
bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
StringRef Name;
if (parseIdentifier(Name))
return TokError("expected identifier in '.macro' directive");
if (getLexer().is(AsmToken::Comma))
Lex();
MCAsmMacroParameters Parameters;
while (getLexer().isNot(AsmToken::EndOfStatement)) {
if (!Parameters.empty() && Parameters.back().Vararg)
return Error(Lexer.getLoc(),
"Vararg parameter '" + Parameters.back().Name +
"' should be last one in the list of parameters.");
MCAsmMacroParameter Parameter;
if (parseIdentifier(Parameter.Name))
return TokError("expected identifier in '.macro' directive");
// Emit an error if two (or more) named parameters share the same name
for (const MCAsmMacroParameter& CurrParam : Parameters)
if (CurrParam.Name.equals(Parameter.Name))
return TokError("macro '" + Name + "' has multiple parameters"
" named '" + Parameter.Name + "'");
if (Lexer.is(AsmToken::Colon)) {
Lex(); // consume ':'
SMLoc QualLoc;
StringRef Qualifier;
QualLoc = Lexer.getLoc();
if (parseIdentifier(Qualifier))
return Error(QualLoc, "missing parameter qualifier for "
"'" + Parameter.Name + "' in macro '" + Name + "'");
if (Qualifier == "req")
Parameter.Required = true;
else if (Qualifier == "vararg")
Parameter.Vararg = true;
else
return Error(QualLoc, Qualifier + " is not a valid parameter qualifier "
"for '" + Parameter.Name + "' in macro '" + Name + "'");
}
if (getLexer().is(AsmToken::Equal)) {
Lex();
SMLoc ParamLoc;
ParamLoc = Lexer.getLoc();
if (parseMacroArgument(Parameter.Value, /*Vararg=*/false ))
return true;
if (Parameter.Required)
Warning(ParamLoc, "pointless default value for required parameter "
"'" + Parameter.Name + "' in macro '" + Name + "'");
}
Parameters.push_back(std::move(Parameter));
if (getLexer().is(AsmToken::Comma))
Lex();
}
// Eat just the end of statement.
Lexer.Lex();
// Consuming deferred text, so use Lexer.Lex to ignore Lexing Errors
AsmToken EndToken, StartToken = getTok();
unsigned MacroDepth = 0;
// Lex the macro definition.
while (true) {
// Ignore Lexing errors in macros.
while (Lexer.is(AsmToken::Error)) {
Lexer.Lex();
}
// Check whether we have reached the end of the file.
if (getLexer().is(AsmToken::Eof))
return Error(DirectiveLoc, "no matching '.endmacro' in definition");
// Otherwise, check whether we have reach the .endmacro.
if (getLexer().is(AsmToken::Identifier)) {
if (getTok().getIdentifier() == ".endm" ||
getTok().getIdentifier() == ".endmacro") {
if (MacroDepth == 0) { // Outermost macro.
EndToken = getTok();
Lexer.Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '" + EndToken.getIdentifier() +
"' directive");
break;
} else {
// Otherwise we just found the end of an inner macro.
--MacroDepth;
}
} else if (getTok().getIdentifier() == ".macro") {
// We allow nested macros. Those aren't instantiated until the outermost
// macro is expanded so just ignore them for now.
++MacroDepth;
}
}
// Otherwise, scan til the end of the statement.
eatToEndOfStatement();
}
if (getContext().lookupMacro(Name)) {
return Error(DirectiveLoc, "macro '" + Name + "' is already defined");
}
const char *BodyStart = StartToken.getLoc().getPointer();
const char *BodyEnd = EndToken.getLoc().getPointer();
StringRef Body = StringRef(BodyStart, BodyEnd - BodyStart);
checkForBadMacro(DirectiveLoc, Name, Body, Parameters);
MCAsmMacro Macro(Name, Body, std::move(Parameters));
DEBUG_WITH_TYPE("asm-macros", dbgs() << "Defining new macro:\n";
Macro.dump());
getContext().defineMacro(Name, std::move(Macro));
return false;
}
/// checkForBadMacro
///
/// With the support added for named parameters there may be code out there that
/// is transitioning from positional parameters. In versions of gas that did
/// not support named parameters they would be ignored on the macro definition.
/// But to support both styles of parameters this is not possible so if a macro
/// definition has named parameters but does not use them and has what appears
/// to be positional parameters, strings like $1, $2, ... and $n, then issue a
/// warning that the positional parameter found in body which have no effect.
/// Hoping the developer will either remove the named parameters from the macro
/// definition so the positional parameters get used if that was what was
/// intended or change the macro to use the named parameters. It is possible
/// this warning will trigger when the none of the named parameters are used
/// and the strings like $1 are infact to simply to be passed trough unchanged.
void AsmParser::checkForBadMacro(SMLoc DirectiveLoc, StringRef Name,
StringRef Body,
ArrayRef<MCAsmMacroParameter> Parameters) {
// If this macro is not defined with named parameters the warning we are
// checking for here doesn't apply.
unsigned NParameters = Parameters.size();
if (NParameters == 0)
return;
bool NamedParametersFound = false;
bool PositionalParametersFound = false;
// Look at the body of the macro for use of both the named parameters and what
// are likely to be positional parameters. This is what expandMacro() is
// doing when it finds the parameters in the body.
while (!Body.empty()) {
// Scan for the next possible parameter.
std::size_t End = Body.size(), Pos = 0;
for (; Pos != End; ++Pos) {
// Check for a substitution or escape.
// This macro is defined with parameters, look for \foo, \bar, etc.
if (Body[Pos] == '\\' && Pos + 1 != End)
break;
// This macro should have parameters, but look for $0, $1, ..., $n too.
if (Body[Pos] != '$' || Pos + 1 == End)
continue;
char Next = Body[Pos + 1];
if (Next == '$' || Next == 'n' ||
isdigit(static_cast<unsigned char>(Next)))
break;
}
// Check if we reached the end.
if (Pos == End)
break;
if (Body[Pos] == '$') {
switch (Body[Pos + 1]) {
// $$ => $
case '$':
break;
// $n => number of arguments
case 'n':
PositionalParametersFound = true;
break;
// $[0-9] => argument
default: {
PositionalParametersFound = true;
break;
}
}
Pos += 2;
} else {
unsigned I = Pos + 1;
while (isIdentifierChar(Body[I]) && I + 1 != End)
++I;
const char *Begin = Body.data() + Pos + 1;
StringRef Argument(Begin, I - (Pos + 1));
unsigned Index = 0;
for (; Index < NParameters; ++Index)
if (Parameters[Index].Name == Argument)
break;
if (Index == NParameters) {
if (Body[Pos + 1] == '(' && Body[Pos + 2] == ')')
Pos += 3;
else {
Pos = I;
}
} else {
NamedParametersFound = true;
Pos += 1 + Argument.size();
}
}
// Update the scan point.
Body = Body.substr(Pos);
}
if (!NamedParametersFound && PositionalParametersFound)
Warning(DirectiveLoc, "macro defined with named parameters which are not "
"used in macro body, possible positional parameter "
"found in body which will have no effect");
}
/// parseDirectiveExitMacro
/// ::= .exitm
bool AsmParser::parseDirectiveExitMacro(StringRef Directive) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Directive + "' directive"))
return true;
if (!isInsideMacroInstantiation())
return TokError("unexpected '" + Directive + "' in file, "
"no current macro definition");
// Exit all conditionals that are active in the current macro.
while (TheCondStack.size() != ActiveMacros.back()->CondStackDepth) {
TheCondState = TheCondStack.back();
TheCondStack.pop_back();
}
handleMacroExit();
return false;
}
/// parseDirectiveEndMacro
/// ::= .endm
/// ::= .endmacro
bool AsmParser::parseDirectiveEndMacro(StringRef Directive) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '" + Directive + "' directive");
// If we are inside a macro instantiation, terminate the current
// instantiation.
if (isInsideMacroInstantiation()) {
handleMacroExit();
return false;
}
// Otherwise, this .endmacro is a stray entry in the file; well formed
// .endmacro directives are handled during the macro definition parsing.
return TokError("unexpected '" + Directive + "' in file, "
"no current macro definition");
}
/// parseDirectivePurgeMacro
/// ::= .purgem
bool AsmParser::parseDirectivePurgeMacro(SMLoc DirectiveLoc) {
StringRef Name;
SMLoc Loc;
if (parseTokenLoc(Loc) ||
check(parseIdentifier(Name), Loc,
"expected identifier in '.purgem' directive") ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.purgem' directive"))
return true;
if (!getContext().lookupMacro(Name))
return Error(DirectiveLoc, "macro '" + Name + "' is not defined");
getContext().undefineMacro(Name);
DEBUG_WITH_TYPE("asm-macros", dbgs()
<< "Un-defining macro: " << Name << "\n");
return false;
}
/// parseDirectiveBundleAlignMode
/// ::= {.bundle_align_mode} expression
bool AsmParser::parseDirectiveBundleAlignMode() {
// Expect a single argument: an expression that evaluates to a constant
// in the inclusive range 0-30.
SMLoc ExprLoc = getLexer().getLoc();
int64_t AlignSizePow2;
if (checkForValidSection() || parseAbsoluteExpression(AlignSizePow2) ||
parseToken(AsmToken::EndOfStatement, "unexpected token after expression "
"in '.bundle_align_mode' "
"directive") ||
check(AlignSizePow2 < 0 || AlignSizePow2 > 30, ExprLoc,
"invalid bundle alignment size (expected between 0 and 30)"))
return true;
// Because of AlignSizePow2's verified range we can safely truncate it to
// unsigned.
getStreamer().EmitBundleAlignMode(static_cast<unsigned>(AlignSizePow2));
return false;
}
/// parseDirectiveBundleLock
/// ::= {.bundle_lock} [align_to_end]
bool AsmParser::parseDirectiveBundleLock() {
if (checkForValidSection())
return true;
bool AlignToEnd = false;
StringRef Option;
SMLoc Loc = getTok().getLoc();
const char *kInvalidOptionError =
"invalid option for '.bundle_lock' directive";
if (!parseOptionalToken(AsmToken::EndOfStatement)) {
if (check(parseIdentifier(Option), Loc, kInvalidOptionError) ||
check(Option != "align_to_end", Loc, kInvalidOptionError) ||
parseToken(AsmToken::EndOfStatement,
"unexpected token after '.bundle_lock' directive option"))
return true;
AlignToEnd = true;
}
getStreamer().EmitBundleLock(AlignToEnd);
return false;
}
/// parseDirectiveBundleLock
/// ::= {.bundle_lock}
bool AsmParser::parseDirectiveBundleUnlock() {
if (checkForValidSection() ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.bundle_unlock' directive"))
return true;
getStreamer().EmitBundleUnlock();
return false;
}
/// parseDirectiveSpace
/// ::= (.skip | .space) expression [ , expression ]
bool AsmParser::parseDirectiveSpace(StringRef IDVal) {
SMLoc NumBytesLoc = Lexer.getLoc();
const MCExpr *NumBytes;
if (checkForValidSection() || parseExpression(NumBytes))
return true;
int64_t FillExpr = 0;
if (parseOptionalToken(AsmToken::Comma))
if (parseAbsoluteExpression(FillExpr))
return addErrorSuffix("in '" + Twine(IDVal) + "' directive");
if (parseToken(AsmToken::EndOfStatement))
return addErrorSuffix("in '" + Twine(IDVal) + "' directive");
// FIXME: Sometimes the fill expr is 'nop' if it isn't supplied, instead of 0.
getStreamer().emitFill(*NumBytes, FillExpr, NumBytesLoc);
return false;
}
/// parseDirectiveDCB
/// ::= .dcb.{b, l, w} expression, expression
bool AsmParser::parseDirectiveDCB(StringRef IDVal, unsigned Size) {
SMLoc NumValuesLoc = Lexer.getLoc();
int64_t NumValues;
if (checkForValidSection() || parseAbsoluteExpression(NumValues))
return true;
if (NumValues < 0) {
Warning(NumValuesLoc, "'" + Twine(IDVal) + "' directive with negative repeat count has no effect");
return false;
}
if (parseToken(AsmToken::Comma,
"unexpected token in '" + Twine(IDVal) + "' directive"))
return true;
const MCExpr *Value;
SMLoc ExprLoc = getLexer().getLoc();
if (parseExpression(Value))
return true;
// Special case constant expressions to match code generator.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value)) {
assert(Size <= 8 && "Invalid size");
uint64_t IntValue = MCE->getValue();
if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue))
return Error(ExprLoc, "literal value out of range for directive");
for (uint64_t i = 0, e = NumValues; i != e; ++i)
getStreamer().EmitIntValue(IntValue, Size);
} else {
for (uint64_t i = 0, e = NumValues; i != e; ++i)
getStreamer().EmitValue(Value, Size, ExprLoc);
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Twine(IDVal) + "' directive"))
return true;
return false;
}
/// parseDirectiveRealDCB
/// ::= .dcb.{d, s} expression, expression
bool AsmParser::parseDirectiveRealDCB(StringRef IDVal, const fltSemantics &Semantics) {
SMLoc NumValuesLoc = Lexer.getLoc();
int64_t NumValues;
if (checkForValidSection() || parseAbsoluteExpression(NumValues))
return true;
if (NumValues < 0) {
Warning(NumValuesLoc, "'" + Twine(IDVal) + "' directive with negative repeat count has no effect");
return false;
}
if (parseToken(AsmToken::Comma,
"unexpected token in '" + Twine(IDVal) + "' directive"))
return true;
APInt AsInt;
if (parseRealValue(Semantics, AsInt))
return true;
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Twine(IDVal) + "' directive"))
return true;
for (uint64_t i = 0, e = NumValues; i != e; ++i)
getStreamer().EmitIntValue(AsInt.getLimitedValue(),
AsInt.getBitWidth() / 8);
return false;
}
/// parseDirectiveDS
/// ::= .ds.{b, d, l, p, s, w, x} expression
bool AsmParser::parseDirectiveDS(StringRef IDVal, unsigned Size) {
SMLoc NumValuesLoc = Lexer.getLoc();
int64_t NumValues;
if (checkForValidSection() || parseAbsoluteExpression(NumValues))
return true;
if (NumValues < 0) {
Warning(NumValuesLoc, "'" + Twine(IDVal) + "' directive with negative repeat count has no effect");
return false;
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Twine(IDVal) + "' directive"))
return true;
for (uint64_t i = 0, e = NumValues; i != e; ++i)
getStreamer().emitFill(Size, 0);
return false;
}
/// parseDirectiveLEB128
/// ::= (.sleb128 | .uleb128) [ expression (, expression)* ]
bool AsmParser::parseDirectiveLEB128(bool Signed) {
if (checkForValidSection())
return true;
auto parseOp = [&]() -> bool {
const MCExpr *Value;
if (parseExpression(Value))
return true;
if (Signed)
getStreamer().EmitSLEB128Value(Value);
else
getStreamer().EmitULEB128Value(Value);
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in directive");
return false;
}
/// parseDirectiveSymbolAttribute
/// ::= { ".globl", ".weak", ... } [ identifier ( , identifier )* ]
bool AsmParser::parseDirectiveSymbolAttribute(MCSymbolAttr Attr) {
auto parseOp = [&]() -> bool {
StringRef Name;
SMLoc Loc = getTok().getLoc();
if (parseIdentifier(Name))
return Error(Loc, "expected identifier");
MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
// Assembler local symbols don't make any sense here. Complain loudly.
if (Sym->isTemporary())
return Error(Loc, "non-local symbol required");
if (!getStreamer().EmitSymbolAttribute(Sym, Attr))
return Error(Loc, "unable to emit symbol attribute");
return false;
};
if (parseMany(parseOp))
return addErrorSuffix(" in directive");
return false;
}
/// parseDirectiveComm
/// ::= ( .comm | .lcomm ) identifier , size_expression [ , align_expression ]
bool AsmParser::parseDirectiveComm(bool IsLocal) {
if (checkForValidSection())
return true;
SMLoc IDLoc = getLexer().getLoc();
StringRef Name;
if (parseIdentifier(Name))
return TokError("expected identifier in directive");
// Handle the identifier as the key symbol.
MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
int64_t Size;
SMLoc SizeLoc = getLexer().getLoc();
if (parseAbsoluteExpression(Size))
return true;
int64_t Pow2Alignment = 0;
SMLoc Pow2AlignmentLoc;
if (getLexer().is(AsmToken::Comma)) {
Lex();
Pow2AlignmentLoc = getLexer().getLoc();
if (parseAbsoluteExpression(Pow2Alignment))
return true;
LCOMM::LCOMMType LCOMM = Lexer.getMAI().getLCOMMDirectiveAlignmentType();
if (IsLocal && LCOMM == LCOMM::NoAlignment)
return Error(Pow2AlignmentLoc, "alignment not supported on this target");
// If this target takes alignments in bytes (not log) validate and convert.
if ((!IsLocal && Lexer.getMAI().getCOMMDirectiveAlignmentIsInBytes()) ||
(IsLocal && LCOMM == LCOMM::ByteAlignment)) {
if (!isPowerOf2_64(Pow2Alignment))
return Error(Pow2AlignmentLoc, "alignment must be a power of 2");
Pow2Alignment = Log2_64(Pow2Alignment);
}
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.comm' or '.lcomm' directive"))
return true;
// NOTE: a size of zero for a .comm should create a undefined symbol
// but a size of .lcomm creates a bss symbol of size zero.
if (Size < 0)
return Error(SizeLoc, "invalid '.comm' or '.lcomm' directive size, can't "
"be less than zero");
// NOTE: The alignment in the directive is a power of 2 value, the assembler
// may internally end up wanting an alignment in bytes.
// FIXME: Diagnose overflow.
if (Pow2Alignment < 0)
return Error(Pow2AlignmentLoc, "invalid '.comm' or '.lcomm' directive "
"alignment, can't be less than zero");
Sym->redefineIfPossible();
if (!Sym->isUndefined())
return Error(IDLoc, "invalid symbol redefinition");
// Create the Symbol as a common or local common with Size and Pow2Alignment
if (IsLocal) {
getStreamer().EmitLocalCommonSymbol(Sym, Size, 1 << Pow2Alignment);
return false;
}
getStreamer().EmitCommonSymbol(Sym, Size, 1 << Pow2Alignment);
return false;
}
/// parseDirectiveAbort
/// ::= .abort [... message ...]
bool AsmParser::parseDirectiveAbort() {
// FIXME: Use loc from directive.
SMLoc Loc = getLexer().getLoc();
StringRef Str = parseStringToEndOfStatement();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.abort' directive"))
return true;
if (Str.empty())
return Error(Loc, ".abort detected. Assembly stopping.");
else
return Error(Loc, ".abort '" + Str + "' detected. Assembly stopping.");
// FIXME: Actually abort assembly here.
return false;
}
/// parseDirectiveInclude
/// ::= .include "filename"
bool AsmParser::parseDirectiveInclude() {
// Allow the strings to have escaped octal character sequence.
std::string Filename;
SMLoc IncludeLoc = getTok().getLoc();
if (check(getTok().isNot(AsmToken::String),
"expected string in '.include' directive") ||
parseEscapedString(Filename) ||
check(getTok().isNot(AsmToken::EndOfStatement),
"unexpected token in '.include' directive") ||
// Attempt to switch the lexer to the included file before consuming the
// end of statement to avoid losing it when we switch.
check(enterIncludeFile(Filename), IncludeLoc,
"Could not find include file '" + Filename + "'"))
return true;
return false;
}
/// parseDirectiveIncbin
/// ::= .incbin "filename" [ , skip [ , count ] ]
bool AsmParser::parseDirectiveIncbin() {
// Allow the strings to have escaped octal character sequence.
std::string Filename;
SMLoc IncbinLoc = getTok().getLoc();
if (check(getTok().isNot(AsmToken::String),
"expected string in '.incbin' directive") ||
parseEscapedString(Filename))
return true;
int64_t Skip = 0;
const MCExpr *Count = nullptr;
SMLoc SkipLoc, CountLoc;
if (parseOptionalToken(AsmToken::Comma)) {
// The skip expression can be omitted while specifying the count, e.g:
// .incbin "filename",,4
if (getTok().isNot(AsmToken::Comma)) {
if (parseTokenLoc(SkipLoc) || parseAbsoluteExpression(Skip))
return true;
}
if (parseOptionalToken(AsmToken::Comma)) {
CountLoc = getTok().getLoc();
if (parseExpression(Count))
return true;
}
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.incbin' directive"))
return true;
if (check(Skip < 0, SkipLoc, "skip is negative"))
return true;
// Attempt to process the included file.
if (processIncbinFile(Filename, Skip, Count, CountLoc))
return Error(IncbinLoc, "Could not find incbin file '" + Filename + "'");
return false;
}
/// parseDirectiveIf
/// ::= .if{,eq,ge,gt,le,lt,ne} expression
bool AsmParser::parseDirectiveIf(SMLoc DirectiveLoc, DirectiveKind DirKind) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
if (TheCondState.Ignore) {
eatToEndOfStatement();
} else {
int64_t ExprValue;
if (parseAbsoluteExpression(ExprValue) ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '.if' directive"))
return true;
switch (DirKind) {
default:
llvm_unreachable("unsupported directive");
case DK_IF:
case DK_IFNE:
break;
case DK_IFEQ:
ExprValue = ExprValue == 0;
break;
case DK_IFGE:
ExprValue = ExprValue >= 0;
break;
case DK_IFGT:
ExprValue = ExprValue > 0;
break;
case DK_IFLE:
ExprValue = ExprValue <= 0;
break;
case DK_IFLT:
ExprValue = ExprValue < 0;
break;
}
TheCondState.CondMet = ExprValue;
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
/// parseDirectiveIfb
/// ::= .ifb string
bool AsmParser::parseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
if (TheCondState.Ignore) {
eatToEndOfStatement();
} else {
StringRef Str = parseStringToEndOfStatement();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.ifb' directive"))
return true;
TheCondState.CondMet = ExpectBlank == Str.empty();
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
/// parseDirectiveIfc
/// ::= .ifc string1, string2
/// ::= .ifnc string1, string2
bool AsmParser::parseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
if (TheCondState.Ignore) {
eatToEndOfStatement();
} else {
StringRef Str1 = parseStringToComma();
if (parseToken(AsmToken::Comma, "unexpected token in '.ifc' directive"))
return true;
StringRef Str2 = parseStringToEndOfStatement();
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.ifc' directive"))
return true;
TheCondState.CondMet = ExpectEqual == (Str1.trim() == Str2.trim());
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
/// parseDirectiveIfeqs
/// ::= .ifeqs string1, string2
bool AsmParser::parseDirectiveIfeqs(SMLoc DirectiveLoc, bool ExpectEqual) {
if (Lexer.isNot(AsmToken::String)) {
if (ExpectEqual)
return TokError("expected string parameter for '.ifeqs' directive");
return TokError("expected string parameter for '.ifnes' directive");
}
StringRef String1 = getTok().getStringContents();
Lex();
if (Lexer.isNot(AsmToken::Comma)) {
if (ExpectEqual)
return TokError(
"expected comma after first string for '.ifeqs' directive");
return TokError("expected comma after first string for '.ifnes' directive");
}
Lex();
if (Lexer.isNot(AsmToken::String)) {
if (ExpectEqual)
return TokError("expected string parameter for '.ifeqs' directive");
return TokError("expected string parameter for '.ifnes' directive");
}
StringRef String2 = getTok().getStringContents();
Lex();
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
TheCondState.CondMet = ExpectEqual == (String1 == String2);
TheCondState.Ignore = !TheCondState.CondMet;
return false;
}
/// parseDirectiveIfdef
/// ::= .ifdef symbol
bool AsmParser::parseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) {
StringRef Name;
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
if (TheCondState.Ignore) {
eatToEndOfStatement();
} else {
if (check(parseIdentifier(Name), "expected identifier after '.ifdef'") ||
parseToken(AsmToken::EndOfStatement, "unexpected token in '.ifdef'"))
return true;
MCSymbol *Sym = getContext().lookupSymbol(Name);
if (expect_defined)
TheCondState.CondMet = (Sym && !Sym->isUndefined());
else
TheCondState.CondMet = (!Sym || Sym->isUndefined());
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
/// parseDirectiveElseIf
/// ::= .elseif expression
bool AsmParser::parseDirectiveElseIf(SMLoc DirectiveLoc) {
if (TheCondState.TheCond != AsmCond::IfCond &&
TheCondState.TheCond != AsmCond::ElseIfCond)
return Error(DirectiveLoc, "Encountered a .elseif that doesn't follow an"
" .if or an .elseif");
TheCondState.TheCond = AsmCond::ElseIfCond;
bool LastIgnoreState = false;
if (!TheCondStack.empty())
LastIgnoreState = TheCondStack.back().Ignore;
if (LastIgnoreState || TheCondState.CondMet) {
TheCondState.Ignore = true;
eatToEndOfStatement();
} else {
int64_t ExprValue;
if (parseAbsoluteExpression(ExprValue))
return true;
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.elseif' directive"))
return true;
TheCondState.CondMet = ExprValue;
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
/// parseDirectiveElse
/// ::= .else
bool AsmParser::parseDirectiveElse(SMLoc DirectiveLoc) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.else' directive"))
return true;
if (TheCondState.TheCond != AsmCond::IfCond &&
TheCondState.TheCond != AsmCond::ElseIfCond)
return Error(DirectiveLoc, "Encountered a .else that doesn't follow "
" an .if or an .elseif");
TheCondState.TheCond = AsmCond::ElseCond;
bool LastIgnoreState = false;
if (!TheCondStack.empty())
LastIgnoreState = TheCondStack.back().Ignore;
if (LastIgnoreState || TheCondState.CondMet)
TheCondState.Ignore = true;
else
TheCondState.Ignore = false;
return false;
}
/// parseDirectiveEnd
/// ::= .end
bool AsmParser::parseDirectiveEnd(SMLoc DirectiveLoc) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.end' directive"))
return true;
while (Lexer.isNot(AsmToken::Eof))
Lexer.Lex();
return false;
}
/// parseDirectiveError
/// ::= .err
/// ::= .error [string]
bool AsmParser::parseDirectiveError(SMLoc L, bool WithMessage) {
if (!TheCondStack.empty()) {
if (TheCondStack.back().Ignore) {
eatToEndOfStatement();
return false;
}
}
if (!WithMessage)
return Error(L, ".err encountered");
StringRef Message = ".error directive invoked in source file";
if (Lexer.isNot(AsmToken::EndOfStatement)) {
if (Lexer.isNot(AsmToken::String))
return TokError(".error argument must be a string");
Message = getTok().getStringContents();
Lex();
}
return Error(L, Message);
}
/// parseDirectiveWarning
/// ::= .warning [string]
bool AsmParser::parseDirectiveWarning(SMLoc L) {
if (!TheCondStack.empty()) {
if (TheCondStack.back().Ignore) {
eatToEndOfStatement();
return false;
}
}
StringRef Message = ".warning directive invoked in source file";
if (!parseOptionalToken(AsmToken::EndOfStatement)) {
if (Lexer.isNot(AsmToken::String))
return TokError(".warning argument must be a string");
Message = getTok().getStringContents();
Lex();
if (parseToken(AsmToken::EndOfStatement,
"expected end of statement in '.warning' directive"))
return true;
}
return Warning(L, Message);
}
/// parseDirectiveEndIf
/// ::= .endif
bool AsmParser::parseDirectiveEndIf(SMLoc DirectiveLoc) {
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.endif' directive"))
return true;
if ((TheCondState.TheCond == AsmCond::NoCond) || TheCondStack.empty())
return Error(DirectiveLoc, "Encountered a .endif that doesn't follow "
"an .if or .else");
if (!TheCondStack.empty()) {
TheCondState = TheCondStack.back();
TheCondStack.pop_back();
}
return false;
}
void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".set"] = DK_SET;
DirectiveKindMap[".equ"] = DK_EQU;
DirectiveKindMap[".equiv"] = DK_EQUIV;
DirectiveKindMap[".ascii"] = DK_ASCII;
DirectiveKindMap[".asciz"] = DK_ASCIZ;
DirectiveKindMap[".string"] = DK_STRING;
DirectiveKindMap[".byte"] = DK_BYTE;
DirectiveKindMap[".short"] = DK_SHORT;
DirectiveKindMap[".value"] = DK_VALUE;
DirectiveKindMap[".2byte"] = DK_2BYTE;
DirectiveKindMap[".long"] = DK_LONG;
DirectiveKindMap[".int"] = DK_INT;
DirectiveKindMap[".4byte"] = DK_4BYTE;
DirectiveKindMap[".quad"] = DK_QUAD;
DirectiveKindMap[".8byte"] = DK_8BYTE;
DirectiveKindMap[".octa"] = DK_OCTA;
DirectiveKindMap[".single"] = DK_SINGLE;
DirectiveKindMap[".float"] = DK_FLOAT;
DirectiveKindMap[".double"] = DK_DOUBLE;
DirectiveKindMap[".align"] = DK_ALIGN;
DirectiveKindMap[".align32"] = DK_ALIGN32;
DirectiveKindMap[".balign"] = DK_BALIGN;
DirectiveKindMap[".balignw"] = DK_BALIGNW;
DirectiveKindMap[".balignl"] = DK_BALIGNL;
DirectiveKindMap[".p2align"] = DK_P2ALIGN;
DirectiveKindMap[".p2alignw"] = DK_P2ALIGNW;
DirectiveKindMap[".p2alignl"] = DK_P2ALIGNL;
DirectiveKindMap[".org"] = DK_ORG;
DirectiveKindMap[".fill"] = DK_FILL;
DirectiveKindMap[".zero"] = DK_ZERO;
DirectiveKindMap[".extern"] = DK_EXTERN;
DirectiveKindMap[".globl"] = DK_GLOBL;
DirectiveKindMap[".global"] = DK_GLOBAL;
DirectiveKindMap[".lazy_reference"] = DK_LAZY_REFERENCE;
DirectiveKindMap[".no_dead_strip"] = DK_NO_DEAD_STRIP;
DirectiveKindMap[".symbol_resolver"] = DK_SYMBOL_RESOLVER;
DirectiveKindMap[".private_extern"] = DK_PRIVATE_EXTERN;
DirectiveKindMap[".reference"] = DK_REFERENCE;
DirectiveKindMap[".weak_definition"] = DK_WEAK_DEFINITION;
DirectiveKindMap[".weak_reference"] = DK_WEAK_REFERENCE;
DirectiveKindMap[".weak_def_can_be_hidden"] = DK_WEAK_DEF_CAN_BE_HIDDEN;
DirectiveKindMap[".comm"] = DK_COMM;
DirectiveKindMap[".common"] = DK_COMMON;
DirectiveKindMap[".lcomm"] = DK_LCOMM;
DirectiveKindMap[".abort"] = DK_ABORT;
DirectiveKindMap[".include"] = DK_INCLUDE;
DirectiveKindMap[".incbin"] = DK_INCBIN;
DirectiveKindMap[".code16"] = DK_CODE16;
DirectiveKindMap[".code16gcc"] = DK_CODE16GCC;
DirectiveKindMap[".rept"] = DK_REPT;
DirectiveKindMap[".rep"] = DK_REPT;
DirectiveKindMap[".irp"] = DK_IRP;
DirectiveKindMap[".irpc"] = DK_IRPC;
DirectiveKindMap[".endr"] = DK_ENDR;
DirectiveKindMap[".bundle_align_mode"] = DK_BUNDLE_ALIGN_MODE;
DirectiveKindMap[".bundle_lock"] = DK_BUNDLE_LOCK;
DirectiveKindMap[".bundle_unlock"] = DK_BUNDLE_UNLOCK;
DirectiveKindMap[".if"] = DK_IF;
DirectiveKindMap[".ifeq"] = DK_IFEQ;
DirectiveKindMap[".ifge"] = DK_IFGE;
DirectiveKindMap[".ifgt"] = DK_IFGT;
DirectiveKindMap[".ifle"] = DK_IFLE;
DirectiveKindMap[".iflt"] = DK_IFLT;
DirectiveKindMap[".ifne"] = DK_IFNE;
DirectiveKindMap[".ifb"] = DK_IFB;
DirectiveKindMap[".ifnb"] = DK_IFNB;
DirectiveKindMap[".ifc"] = DK_IFC;
DirectiveKindMap[".ifeqs"] = DK_IFEQS;
DirectiveKindMap[".ifnc"] = DK_IFNC;
DirectiveKindMap[".ifnes"] = DK_IFNES;
DirectiveKindMap[".ifdef"] = DK_IFDEF;
DirectiveKindMap[".ifndef"] = DK_IFNDEF;
DirectiveKindMap[".ifnotdef"] = DK_IFNOTDEF;
DirectiveKindMap[".elseif"] = DK_ELSEIF;
DirectiveKindMap[".else"] = DK_ELSE;
DirectiveKindMap[".end"] = DK_END;
DirectiveKindMap[".endif"] = DK_ENDIF;
DirectiveKindMap[".skip"] = DK_SKIP;
DirectiveKindMap[".space"] = DK_SPACE;
DirectiveKindMap[".file"] = DK_FILE;
DirectiveKindMap[".line"] = DK_LINE;
DirectiveKindMap[".loc"] = DK_LOC;
DirectiveKindMap[".stabs"] = DK_STABS;
DirectiveKindMap[".cv_file"] = DK_CV_FILE;
DirectiveKindMap[".cv_func_id"] = DK_CV_FUNC_ID;
DirectiveKindMap[".cv_loc"] = DK_CV_LOC;
DirectiveKindMap[".cv_linetable"] = DK_CV_LINETABLE;
DirectiveKindMap[".cv_inline_linetable"] = DK_CV_INLINE_LINETABLE;
DirectiveKindMap[".cv_inline_site_id"] = DK_CV_INLINE_SITE_ID;
DirectiveKindMap[".cv_def_range"] = DK_CV_DEF_RANGE;
DirectiveKindMap[".cv_string"] = DK_CV_STRING;
DirectiveKindMap[".cv_stringtable"] = DK_CV_STRINGTABLE;
DirectiveKindMap[".cv_filechecksums"] = DK_CV_FILECHECKSUMS;
DirectiveKindMap[".cv_filechecksumoffset"] = DK_CV_FILECHECKSUM_OFFSET;
DirectiveKindMap[".cv_fpo_data"] = DK_CV_FPO_DATA;
DirectiveKindMap[".sleb128"] = DK_SLEB128;
DirectiveKindMap[".uleb128"] = DK_ULEB128;
DirectiveKindMap[".cfi_sections"] = DK_CFI_SECTIONS;
DirectiveKindMap[".cfi_startproc"] = DK_CFI_STARTPROC;
DirectiveKindMap[".cfi_endproc"] = DK_CFI_ENDPROC;
DirectiveKindMap[".cfi_def_cfa"] = DK_CFI_DEF_CFA;
DirectiveKindMap[".cfi_def_cfa_offset"] = DK_CFI_DEF_CFA_OFFSET;
DirectiveKindMap[".cfi_adjust_cfa_offset"] = DK_CFI_ADJUST_CFA_OFFSET;
DirectiveKindMap[".cfi_def_cfa_register"] = DK_CFI_DEF_CFA_REGISTER;
DirectiveKindMap[".cfi_offset"] = DK_CFI_OFFSET;
DirectiveKindMap[".cfi_rel_offset"] = DK_CFI_REL_OFFSET;
DirectiveKindMap[".cfi_personality"] = DK_CFI_PERSONALITY;
DirectiveKindMap[".cfi_lsda"] = DK_CFI_LSDA;
DirectiveKindMap[".cfi_remember_state"] = DK_CFI_REMEMBER_STATE;
DirectiveKindMap[".cfi_restore_state"] = DK_CFI_RESTORE_STATE;
DirectiveKindMap[".cfi_same_value"] = DK_CFI_SAME_VALUE;
DirectiveKindMap[".cfi_restore"] = DK_CFI_RESTORE;
DirectiveKindMap[".cfi_escape"] = DK_CFI_ESCAPE;
DirectiveKindMap[".cfi_return_column"] = DK_CFI_RETURN_COLUMN;
DirectiveKindMap[".cfi_signal_frame"] = DK_CFI_SIGNAL_FRAME;
DirectiveKindMap[".cfi_undefined"] = DK_CFI_UNDEFINED;
DirectiveKindMap[".cfi_register"] = DK_CFI_REGISTER;
DirectiveKindMap[".cfi_window_save"] = DK_CFI_WINDOW_SAVE;
DirectiveKindMap[".cfi_b_key_frame"] = DK_CFI_B_KEY_FRAME;
DirectiveKindMap[".macros_on"] = DK_MACROS_ON;
DirectiveKindMap[".macros_off"] = DK_MACROS_OFF;
DirectiveKindMap[".macro"] = DK_MACRO;
DirectiveKindMap[".exitm"] = DK_EXITM;
DirectiveKindMap[".endm"] = DK_ENDM;
DirectiveKindMap[".endmacro"] = DK_ENDMACRO;
DirectiveKindMap[".purgem"] = DK_PURGEM;
DirectiveKindMap[".err"] = DK_ERR;
DirectiveKindMap[".error"] = DK_ERROR;
DirectiveKindMap[".warning"] = DK_WARNING;
DirectiveKindMap[".altmacro"] = DK_ALTMACRO;
DirectiveKindMap[".noaltmacro"] = DK_NOALTMACRO;
DirectiveKindMap[".reloc"] = DK_RELOC;
DirectiveKindMap[".dc"] = DK_DC;
DirectiveKindMap[".dc.a"] = DK_DC_A;
DirectiveKindMap[".dc.b"] = DK_DC_B;
DirectiveKindMap[".dc.d"] = DK_DC_D;
DirectiveKindMap[".dc.l"] = DK_DC_L;
DirectiveKindMap[".dc.s"] = DK_DC_S;
DirectiveKindMap[".dc.w"] = DK_DC_W;
DirectiveKindMap[".dc.x"] = DK_DC_X;
DirectiveKindMap[".dcb"] = DK_DCB;
DirectiveKindMap[".dcb.b"] = DK_DCB_B;
DirectiveKindMap[".dcb.d"] = DK_DCB_D;
DirectiveKindMap[".dcb.l"] = DK_DCB_L;
DirectiveKindMap[".dcb.s"] = DK_DCB_S;
DirectiveKindMap[".dcb.w"] = DK_DCB_W;
DirectiveKindMap[".dcb.x"] = DK_DCB_X;
DirectiveKindMap[".ds"] = DK_DS;
DirectiveKindMap[".ds.b"] = DK_DS_B;
DirectiveKindMap[".ds.d"] = DK_DS_D;
DirectiveKindMap[".ds.l"] = DK_DS_L;
DirectiveKindMap[".ds.p"] = DK_DS_P;
DirectiveKindMap[".ds.s"] = DK_DS_S;
DirectiveKindMap[".ds.w"] = DK_DS_W;
DirectiveKindMap[".ds.x"] = DK_DS_X;
DirectiveKindMap[".print"] = DK_PRINT;
DirectiveKindMap[".addrsig"] = DK_ADDRSIG;
DirectiveKindMap[".addrsig_sym"] = DK_ADDRSIG_SYM;
}
MCAsmMacro *AsmParser::parseMacroLikeBody(SMLoc DirectiveLoc) {
AsmToken EndToken, StartToken = getTok();
unsigned NestLevel = 0;
while (true) {
// Check whether we have reached the end of the file.
if (getLexer().is(AsmToken::Eof)) {
printError(DirectiveLoc, "no matching '.endr' in definition");
return nullptr;
}
if (Lexer.is(AsmToken::Identifier) &&
(getTok().getIdentifier() == ".rep" ||
getTok().getIdentifier() == ".rept" ||
getTok().getIdentifier() == ".irp" ||
getTok().getIdentifier() == ".irpc")) {
++NestLevel;
}
// Otherwise, check whether we have reached the .endr.
if (Lexer.is(AsmToken::Identifier) && getTok().getIdentifier() == ".endr") {
if (NestLevel == 0) {
EndToken = getTok();
Lex();
if (Lexer.isNot(AsmToken::EndOfStatement)) {
printError(getTok().getLoc(),
"unexpected token in '.endr' directive");
return nullptr;
}
break;
}
--NestLevel;
}
// Otherwise, scan till the end of the statement.
eatToEndOfStatement();
}
const char *BodyStart = StartToken.getLoc().getPointer();
const char *BodyEnd = EndToken.getLoc().getPointer();
StringRef Body = StringRef(BodyStart, BodyEnd - BodyStart);
// We Are Anonymous.
MacroLikeBodies.emplace_back(StringRef(), Body, MCAsmMacroParameters());
return &MacroLikeBodies.back();
}
void AsmParser::instantiateMacroLikeBody(MCAsmMacro *M, SMLoc DirectiveLoc,
raw_svector_ostream &OS) {
OS << ".endr\n";
std::unique_ptr<MemoryBuffer> Instantiation =
MemoryBuffer::getMemBufferCopy(OS.str(), "<instantiation>");
// Create the macro instantiation object and add to the current macro
// instantiation stack.
MacroInstantiation *MI = new MacroInstantiation(
DirectiveLoc, CurBuffer, getTok().getLoc(), TheCondStack.size());
ActiveMacros.push_back(MI);
// Jump to the macro instantiation and prime the lexer.
CurBuffer = SrcMgr.AddNewSourceBuffer(std::move(Instantiation), SMLoc());
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
Lex();
}
/// parseDirectiveRept
/// ::= .rep | .rept count
bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc, StringRef Dir) {
const MCExpr *CountExpr;
SMLoc CountLoc = getTok().getLoc();
if (parseExpression(CountExpr))
return true;
int64_t Count;
if (!CountExpr->evaluateAsAbsolute(Count, getStreamer().getAssemblerPtr())) {
return Error(CountLoc, "unexpected token in '" + Dir + "' directive");
}
if (check(Count < 0, CountLoc, "Count is negative") ||
parseToken(AsmToken::EndOfStatement,
"unexpected token in '" + Dir + "' directive"))
return true;
// Lex the rept definition.
MCAsmMacro *M = parseMacroLikeBody(DirectiveLoc);
if (!M)
return true;
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
raw_svector_ostream OS(Buf);
while (Count--) {
// Note that the AtPseudoVariable is disabled for instantiations of .rep(t).
if (expandMacro(OS, M->Body, None, None, false, getTok().getLoc()))
return true;
}
instantiateMacroLikeBody(M, DirectiveLoc, OS);
return false;
}
/// parseDirectiveIrp
/// ::= .irp symbol,values
bool AsmParser::parseDirectiveIrp(SMLoc DirectiveLoc) {
MCAsmMacroParameter Parameter;
MCAsmMacroArguments A;
if (check(parseIdentifier(Parameter.Name),
"expected identifier in '.irp' directive") ||
parseToken(AsmToken::Comma, "expected comma in '.irp' directive") ||
parseMacroArguments(nullptr, A) ||
parseToken(AsmToken::EndOfStatement, "expected End of Statement"))
return true;
// Lex the irp definition.
MCAsmMacro *M = parseMacroLikeBody(DirectiveLoc);
if (!M)
return true;
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
raw_svector_ostream OS(Buf);
for (const MCAsmMacroArgument &Arg : A) {
// Note that the AtPseudoVariable is enabled for instantiations of .irp.
// This is undocumented, but GAS seems to support it.
if (expandMacro(OS, M->Body, Parameter, Arg, true, getTok().getLoc()))
return true;
}
instantiateMacroLikeBody(M, DirectiveLoc, OS);
return false;
}
/// parseDirectiveIrpc
/// ::= .irpc symbol,values
bool AsmParser::parseDirectiveIrpc(SMLoc DirectiveLoc) {
MCAsmMacroParameter Parameter;
MCAsmMacroArguments A;
if (check(parseIdentifier(Parameter.Name),
"expected identifier in '.irpc' directive") ||
parseToken(AsmToken::Comma, "expected comma in '.irpc' directive") ||
parseMacroArguments(nullptr, A))
return true;
if (A.size() != 1 || A.front().size() != 1)
return TokError("unexpected token in '.irpc' directive");
// Eat the end of statement.
if (parseToken(AsmToken::EndOfStatement, "expected end of statement"))
return true;
// Lex the irpc definition.
MCAsmMacro *M = parseMacroLikeBody(DirectiveLoc);
if (!M)
return true;
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
raw_svector_ostream OS(Buf);
StringRef Values = A.front().front().getString();
for (std::size_t I = 0, End = Values.size(); I != End; ++I) {
MCAsmMacroArgument Arg;
Arg.emplace_back(AsmToken::Identifier, Values.slice(I, I + 1));
// Note that the AtPseudoVariable is enabled for instantiations of .irpc.
// This is undocumented, but GAS seems to support it.
if (expandMacro(OS, M->Body, Parameter, Arg, true, getTok().getLoc()))
return true;
}
instantiateMacroLikeBody(M, DirectiveLoc, OS);
return false;
}
bool AsmParser::parseDirectiveEndr(SMLoc DirectiveLoc) {
if (ActiveMacros.empty())
return TokError("unmatched '.endr' directive");
// The only .repl that should get here are the ones created by
// instantiateMacroLikeBody.
assert(getLexer().is(AsmToken::EndOfStatement));
handleMacroExit();
return false;
}
bool AsmParser::parseDirectiveMSEmit(SMLoc IDLoc, ParseStatementInfo &Info,
size_t Len) {
const MCExpr *Value;
SMLoc ExprLoc = getLexer().getLoc();
if (parseExpression(Value))
return true;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value);
if (!MCE)
return Error(ExprLoc, "unexpected expression in _emit");
uint64_t IntValue = MCE->getValue();
if (!isUInt<8>(IntValue) && !isInt<8>(IntValue))
return Error(ExprLoc, "literal value out of range for directive");
Info.AsmRewrites->emplace_back(AOK_Emit, IDLoc, Len);
return false;
}
bool AsmParser::parseDirectiveMSAlign(SMLoc IDLoc, ParseStatementInfo &Info) {
const MCExpr *Value;
SMLoc ExprLoc = getLexer().getLoc();
if (parseExpression(Value))
return true;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value);
if (!MCE)
return Error(ExprLoc, "unexpected expression in align");
uint64_t IntValue = MCE->getValue();
if (!isPowerOf2_64(IntValue))
return Error(ExprLoc, "literal value not a power of two greater then zero");
Info.AsmRewrites->emplace_back(AOK_Align, IDLoc, 5, Log2_64(IntValue));
return false;
}
bool AsmParser::parseDirectivePrint(SMLoc DirectiveLoc) {
const AsmToken StrTok = getTok();
Lex();
if (StrTok.isNot(AsmToken::String) || StrTok.getString().front() != '"')
return Error(DirectiveLoc, "expected double quoted string after .print");
if (parseToken(AsmToken::EndOfStatement, "expected end of statement"))
return true;
llvm::outs() << StrTok.getStringContents() << '\n';
return false;
}
bool AsmParser::parseDirectiveAddrsig() {
getStreamer().EmitAddrsig();
return false;
}
bool AsmParser::parseDirectiveAddrsigSym() {
StringRef Name;
if (check(parseIdentifier(Name),
"expected identifier in '.addrsig_sym' directive"))
return true;
MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
getStreamer().EmitAddrsigSym(Sym);
return false;
}
// We are comparing pointers, but the pointers are relative to a single string.
// Thus, this should always be deterministic.
static int rewritesSort(const AsmRewrite *AsmRewriteA,
const AsmRewrite *AsmRewriteB) {
if (AsmRewriteA->Loc.getPointer() < AsmRewriteB->Loc.getPointer())
return -1;
if (AsmRewriteB->Loc.getPointer() < AsmRewriteA->Loc.getPointer())
return 1;
// It's possible to have a SizeDirective, Imm/ImmPrefix and an Input/Output
// rewrite to the same location. Make sure the SizeDirective rewrite is
// performed first, then the Imm/ImmPrefix and finally the Input/Output. This
// ensures the sort algorithm is stable.
if (AsmRewritePrecedence[AsmRewriteA->Kind] >
AsmRewritePrecedence[AsmRewriteB->Kind])
return -1;
if (AsmRewritePrecedence[AsmRewriteA->Kind] <
AsmRewritePrecedence[AsmRewriteB->Kind])
return 1;
llvm_unreachable("Unstable rewrite sort.");
}
bool AsmParser::parseMSInlineAsm(
void *AsmLoc, std::string &AsmString, unsigned &NumOutputs,
unsigned &NumInputs, SmallVectorImpl<std::pair<void *, bool>> &OpDecls,
SmallVectorImpl<std::string> &Constraints,
SmallVectorImpl<std::string> &Clobbers, const MCInstrInfo *MII,
const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) {
SmallVector<void *, 4> InputDecls;
SmallVector<void *, 4> OutputDecls;
SmallVector<bool, 4> InputDeclsAddressOf;
SmallVector<bool, 4> OutputDeclsAddressOf;
SmallVector<std::string, 4> InputConstraints;
SmallVector<std::string, 4> OutputConstraints;
SmallVector<unsigned, 4> ClobberRegs;
SmallVector<AsmRewrite, 4> AsmStrRewrites;
// Prime the lexer.
Lex();
// While we have input, parse each statement.
unsigned InputIdx = 0;
unsigned OutputIdx = 0;
while (getLexer().isNot(AsmToken::Eof)) {
// Parse curly braces marking block start/end
if (parseCurlyBlockScope(AsmStrRewrites))
continue;
ParseStatementInfo Info(&AsmStrRewrites);
bool StatementErr = parseStatement(Info, &SI);
if (StatementErr || Info.ParseError) {
// Emit pending errors if any exist.
printPendingErrors();
return true;
}
// No pending error should exist here.
assert(!hasPendingError() && "unexpected error from parseStatement");
if (Info.Opcode == ~0U)
continue;
const MCInstrDesc &Desc = MII->get(Info.Opcode);
// Build the list of clobbers, outputs and inputs.
for (unsigned i = 1, e = Info.ParsedOperands.size(); i != e; ++i) {
MCParsedAsmOperand &Operand = *Info.ParsedOperands[i];
// Immediate.
if (Operand.isImm())
continue;
// Register operand.
if (Operand.isReg() && !Operand.needAddressOf() &&
!getTargetParser().OmitRegisterFromClobberLists(Operand.getReg())) {
unsigned NumDefs = Desc.getNumDefs();
// Clobber.
if (NumDefs && Operand.getMCOperandNum() < NumDefs)
ClobberRegs.push_back(Operand.getReg());
continue;
}
// Expr/Input or Output.
StringRef SymName = Operand.getSymName();
if (SymName.empty())
continue;
void *OpDecl = Operand.getOpDecl();
if (!OpDecl)
continue;
bool isOutput = (i == 1) && Desc.mayStore();
SMLoc Start = SMLoc::getFromPointer(SymName.data());
if (isOutput) {
++InputIdx;
OutputDecls.push_back(OpDecl);
OutputDeclsAddressOf.push_back(Operand.needAddressOf());
OutputConstraints.push_back(("=" + Operand.getConstraint()).str());
AsmStrRewrites.emplace_back(AOK_Output, Start, SymName.size());
} else {
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Operand.getConstraint().str());
AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size());
}
}
// Consider implicit defs to be clobbers. Think of cpuid and push.
ArrayRef<MCPhysReg> ImpDefs(Desc.getImplicitDefs(),
Desc.getNumImplicitDefs());
ClobberRegs.insert(ClobberRegs.end(), ImpDefs.begin(), ImpDefs.end());
}
// Set the number of Outputs and Inputs.
NumOutputs = OutputDecls.size();
NumInputs = InputDecls.size();
// Set the unique clobbers.
array_pod_sort(ClobberRegs.begin(), ClobberRegs.end());
ClobberRegs.erase(std::unique(ClobberRegs.begin(), ClobberRegs.end()),
ClobberRegs.end());
Clobbers.assign(ClobberRegs.size(), std::string());
for (unsigned I = 0, E = ClobberRegs.size(); I != E; ++I) {
raw_string_ostream OS(Clobbers[I]);
IP->printRegName(OS, ClobberRegs[I]);
}
// Merge the various outputs and inputs. Output are expected first.
if (NumOutputs || NumInputs) {
unsigned NumExprs = NumOutputs + NumInputs;
OpDecls.resize(NumExprs);
Constraints.resize(NumExprs);
for (unsigned i = 0; i < NumOutputs; ++i) {
OpDecls[i] = std::make_pair(OutputDecls[i], OutputDeclsAddressOf[i]);
Constraints[i] = OutputConstraints[i];
}
for (unsigned i = 0, j = NumOutputs; i < NumInputs; ++i, ++j) {
OpDecls[j] = std::make_pair(InputDecls[i], InputDeclsAddressOf[i]);
Constraints[j] = InputConstraints[i];
}
}
// Build the IR assembly string.
std::string AsmStringIR;
raw_string_ostream OS(AsmStringIR);
StringRef ASMString =
SrcMgr.getMemoryBuffer(SrcMgr.getMainFileID())->getBuffer();
const char *AsmStart = ASMString.begin();
const char *AsmEnd = ASMString.end();
array_pod_sort(AsmStrRewrites.begin(), AsmStrRewrites.end(), rewritesSort);
for (const AsmRewrite &AR : AsmStrRewrites) {
AsmRewriteKind Kind = AR.Kind;
const char *Loc = AR.Loc.getPointer();
assert(Loc >= AsmStart && "Expected Loc to be at or after Start!");
// Emit everything up to the immediate/expression.
if (unsigned Len = Loc - AsmStart)
OS << StringRef(AsmStart, Len);
// Skip the original expression.
if (Kind == AOK_Skip) {
AsmStart = Loc + AR.Len;
continue;
}
unsigned AdditionalSkip = 0;
// Rewrite expressions in $N notation.
switch (Kind) {
default:
break;
case AOK_IntelExpr:
assert(AR.IntelExp.isValid() && "cannot write invalid intel expression");
if (AR.IntelExp.NeedBracs)
OS << "[";
if (AR.IntelExp.hasBaseReg())
OS << AR.IntelExp.BaseReg;
if (AR.IntelExp.hasIndexReg())
OS << (AR.IntelExp.hasBaseReg() ? " + " : "")
<< AR.IntelExp.IndexReg;
if (AR.IntelExp.Scale > 1)
OS << " * $$" << AR.IntelExp.Scale;
if (AR.IntelExp.Imm || !AR.IntelExp.hasRegs())
OS << (AR.IntelExp.hasRegs() ? " + $$" : "$$") << AR.IntelExp.Imm;
if (AR.IntelExp.NeedBracs)
OS << "]";
break;
case AOK_Label:
OS << Ctx.getAsmInfo()->getPrivateLabelPrefix() << AR.Label;
break;
case AOK_Input:
OS << '$' << InputIdx++;
break;
case AOK_Output:
OS << '$' << OutputIdx++;
break;
case AOK_SizeDirective:
switch (AR.Val) {
default: break;
case 8: OS << "byte ptr "; break;
case 16: OS << "word ptr "; break;
case 32: OS << "dword ptr "; break;
case 64: OS << "qword ptr "; break;
case 80: OS << "xword ptr "; break;
case 128: OS << "xmmword ptr "; break;
case 256: OS << "ymmword ptr "; break;
}
break;
case AOK_Emit:
OS << ".byte";
break;
case AOK_Align: {
// MS alignment directives are measured in bytes. If the native assembler
// measures alignment in bytes, we can pass it straight through.
OS << ".align";
if (getContext().getAsmInfo()->getAlignmentIsInBytes())
break;
// Alignment is in log2 form, so print that instead and skip the original
// immediate.
unsigned Val = AR.Val;
OS << ' ' << Val;
assert(Val < 10 && "Expected alignment less then 2^10.");
AdditionalSkip = (Val < 4) ? 2 : Val < 7 ? 3 : 4;
break;
}
case AOK_EVEN:
OS << ".even";
break;
case AOK_EndOfStatement:
OS << "\n\t";
break;
}
// Skip the original expression.
AsmStart = Loc + AR.Len + AdditionalSkip;
}
// Emit the remainder of the asm string.
if (AsmStart != AsmEnd)
OS << StringRef(AsmStart, AsmEnd - AsmStart);
AsmString = OS.str();
return false;
}
namespace llvm {
namespace MCParserUtils {
/// Returns whether the given symbol is used anywhere in the given expression,
/// or subexpressions.
static bool isSymbolUsedInExpression(const MCSymbol *Sym, const MCExpr *Value) {
switch (Value->getKind()) {
case MCExpr::Binary: {
const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Value);
return isSymbolUsedInExpression(Sym, BE->getLHS()) ||
isSymbolUsedInExpression(Sym, BE->getRHS());
}
case MCExpr::Target:
case MCExpr::Constant:
return false;
case MCExpr::SymbolRef: {
const MCSymbol &S =
static_cast<const MCSymbolRefExpr *>(Value)->getSymbol();
if (S.isVariable())
return isSymbolUsedInExpression(Sym, S.getVariableValue());
return &S == Sym;
}
case MCExpr::Unary:
return isSymbolUsedInExpression(
Sym, static_cast<const MCUnaryExpr *>(Value)->getSubExpr());
}
llvm_unreachable("Unknown expr kind!");
}
bool parseAssignmentExpression(StringRef Name, bool allow_redef,
MCAsmParser &Parser, MCSymbol *&Sym,
const MCExpr *&Value) {
// FIXME: Use better location, we should use proper tokens.
SMLoc EqualLoc = Parser.getTok().getLoc();
if (Parser.parseExpression(Value))
return Parser.TokError("missing expression");
// Note: we don't count b as used in "a = b". This is to allow
// a = b
// b = c
if (Parser.parseToken(AsmToken::EndOfStatement))
return true;
// Validate that the LHS is allowed to be a variable (either it has not been
// used as a symbol, or it is an absolute symbol).
Sym = Parser.getContext().lookupSymbol(Name);
if (Sym) {
// Diagnose assignment to a label.
//
// FIXME: Diagnostics. Note the location of the definition as a label.
// FIXME: Diagnose assignment to protected identifier (e.g., register name).
if (isSymbolUsedInExpression(Sym, Value))
return Parser.Error(EqualLoc, "Recursive use of '" + Name + "'");
else if (Sym->isUndefined(/*SetUsed*/ false) && !Sym->isUsed() &&
!Sym->isVariable())
; // Allow redefinitions of undefined symbols only used in directives.
else if (Sym->isVariable() && !Sym->isUsed() && allow_redef)
; // Allow redefinitions of variables that haven't yet been used.
else if (!Sym->isUndefined() && (!Sym->isVariable() || !allow_redef))
return Parser.Error(EqualLoc, "redefinition of '" + Name + "'");
else if (!Sym->isVariable())
return Parser.Error(EqualLoc, "invalid assignment to '" + Name + "'");
else if (!isa<MCConstantExpr>(Sym->getVariableValue()))
return Parser.Error(EqualLoc,
"invalid reassignment of non-absolute variable '" +
Name + "'");
} else if (Name == ".") {
Parser.getStreamer().emitValueToOffset(Value, 0, EqualLoc);
return false;
} else
Sym = Parser.getContext().getOrCreateSymbol(Name);
Sym->setRedefinable(allow_redef);
return false;
}
} // end namespace MCParserUtils
} // end namespace llvm
/// Create an MCAsmParser instance.
MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM, MCContext &C,
MCStreamer &Out, const MCAsmInfo &MAI,
unsigned CB) {
return new AsmParser(SM, C, Out, MAI, CB);
}
Index: vendor/llvm/dist-release_80/lib/MC/WasmObjectWriter.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/MC/WasmObjectWriter.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/MC/WasmObjectWriter.cpp (revision 344166)
@@ -1,1587 +1,1590 @@
//===- lib/MC/WasmObjectWriter.cpp - Wasm File Writer ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements Wasm object file writer information.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionWasm.h"
#include "llvm/MC/MCSymbolWasm.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWasmObjectWriter.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/StringSaver.h"
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "mc"
namespace {
// Went we ceate the indirect function table we start at 1, so that there is
// and emtpy slot at 0 and therefore calling a null function pointer will trap.
static const uint32_t kInitialTableOffset = 1;
// For patching purposes, we need to remember where each section starts, both
// for patching up the section size field, and for patching up references to
// locations within the section.
struct SectionBookkeeping {
// Where the size of the section is written.
uint64_t SizeOffset;
// Where the section header ends (without custom section name).
uint64_t PayloadOffset;
// Where the contents of the section starts.
uint64_t ContentsOffset;
uint32_t Index;
};
// The signature of a wasm function or event, in a struct capable of being used
// as a DenseMap key.
// TODO: Consider using wasm::WasmSignature directly instead.
struct WasmSignature {
// Support empty and tombstone instances, needed by DenseMap.
enum { Plain, Empty, Tombstone } State;
// The return types of the function.
SmallVector<wasm::ValType, 1> Returns;
// The parameter types of the function.
SmallVector<wasm::ValType, 4> Params;
WasmSignature() : State(Plain) {}
bool operator==(const WasmSignature &Other) const {
return State == Other.State && Returns == Other.Returns &&
Params == Other.Params;
}
};
// Traits for using WasmSignature in a DenseMap.
struct WasmSignatureDenseMapInfo {
static WasmSignature getEmptyKey() {
WasmSignature Sig;
Sig.State = WasmSignature::Empty;
return Sig;
}
static WasmSignature getTombstoneKey() {
WasmSignature Sig;
Sig.State = WasmSignature::Tombstone;
return Sig;
}
static unsigned getHashValue(const WasmSignature &Sig) {
uintptr_t Value = Sig.State;
for (wasm::ValType Ret : Sig.Returns)
Value += DenseMapInfo<uint32_t>::getHashValue(uint32_t(Ret));
for (wasm::ValType Param : Sig.Params)
Value += DenseMapInfo<uint32_t>::getHashValue(uint32_t(Param));
return Value;
}
static bool isEqual(const WasmSignature &LHS, const WasmSignature &RHS) {
return LHS == RHS;
}
};
// A wasm data segment. A wasm binary contains only a single data section
// but that can contain many segments, each with their own virtual location
// in memory. Each MCSection data created by llvm is modeled as its own
// wasm data segment.
struct WasmDataSegment {
MCSectionWasm *Section;
StringRef Name;
uint32_t Offset;
uint32_t Alignment;
uint32_t Flags;
SmallVector<char, 4> Data;
};
// A wasm function to be written into the function section.
struct WasmFunction {
uint32_t SigIndex;
const MCSymbolWasm *Sym;
};
// A wasm global to be written into the global section.
struct WasmGlobal {
wasm::WasmGlobalType Type;
uint64_t InitialValue;
};
// Information about a single item which is part of a COMDAT. For each data
// segment or function which is in the COMDAT, there is a corresponding
// WasmComdatEntry.
struct WasmComdatEntry {
unsigned Kind;
uint32_t Index;
};
// Information about a single relocation.
struct WasmRelocationEntry {
uint64_t Offset; // Where is the relocation.
const MCSymbolWasm *Symbol; // The symbol to relocate with.
int64_t Addend; // A value to add to the symbol.
unsigned Type; // The type of the relocation.
const MCSectionWasm *FixupSection; // The section the relocation is targeting.
WasmRelocationEntry(uint64_t Offset, const MCSymbolWasm *Symbol,
int64_t Addend, unsigned Type,
const MCSectionWasm *FixupSection)
: Offset(Offset), Symbol(Symbol), Addend(Addend), Type(Type),
FixupSection(FixupSection) {}
bool hasAddend() const {
switch (Type) {
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32:
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32:
return true;
default:
return false;
}
}
void print(raw_ostream &Out) const {
Out << wasm::relocTypetoString(Type) << " Off=" << Offset
<< ", Sym=" << *Symbol << ", Addend=" << Addend
<< ", FixupSection=" << FixupSection->getSectionName();
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
#endif
};
static const uint32_t INVALID_INDEX = -1;
struct WasmCustomSection {
StringRef Name;
MCSectionWasm *Section;
uint32_t OutputContentsOffset;
uint32_t OutputIndex;
WasmCustomSection(StringRef Name, MCSectionWasm *Section)
: Name(Name), Section(Section), OutputContentsOffset(0),
OutputIndex(INVALID_INDEX) {}
};
#if !defined(NDEBUG)
raw_ostream &operator<<(raw_ostream &OS, const WasmRelocationEntry &Rel) {
Rel.print(OS);
return OS;
}
#endif
class WasmObjectWriter : public MCObjectWriter {
support::endian::Writer W;
/// The target specific Wasm writer instance.
std::unique_ptr<MCWasmObjectTargetWriter> TargetObjectWriter;
// Relocations for fixing up references in the code section.
std::vector<WasmRelocationEntry> CodeRelocations;
uint32_t CodeSectionIndex;
// Relocations for fixing up references in the data section.
std::vector<WasmRelocationEntry> DataRelocations;
uint32_t DataSectionIndex;
// Index values to use for fixing up call_indirect type indices.
// Maps function symbols to the index of the type of the function
DenseMap<const MCSymbolWasm *, uint32_t> TypeIndices;
// Maps function symbols to the table element index space. Used
// for TABLE_INDEX relocation types (i.e. address taken functions).
DenseMap<const MCSymbolWasm *, uint32_t> TableIndices;
// Maps function/global symbols to the function/global/event/section index
// space.
DenseMap<const MCSymbolWasm *, uint32_t> WasmIndices;
// Maps data symbols to the Wasm segment and offset/size with the segment.
DenseMap<const MCSymbolWasm *, wasm::WasmDataReference> DataLocations;
// Stores output data (index, relocations, content offset) for custom
// section.
std::vector<WasmCustomSection> CustomSections;
// Relocations for fixing up references in the custom sections.
DenseMap<const MCSectionWasm *, std::vector<WasmRelocationEntry>>
CustomSectionsRelocations;
// Map from section to defining function symbol.
DenseMap<const MCSection *, const MCSymbol *> SectionFunctions;
DenseMap<WasmSignature, uint32_t, WasmSignatureDenseMapInfo> SignatureIndices;
SmallVector<WasmSignature, 4> Signatures;
SmallVector<WasmGlobal, 4> Globals;
SmallVector<WasmDataSegment, 4> DataSegments;
unsigned NumFunctionImports = 0;
unsigned NumGlobalImports = 0;
unsigned NumEventImports = 0;
uint32_t SectionCount = 0;
// TargetObjectWriter wrappers.
bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup) const {
return TargetObjectWriter->getRelocType(Target, Fixup);
}
void startSection(SectionBookkeeping &Section, unsigned SectionId);
void startCustomSection(SectionBookkeeping &Section, StringRef Name);
void endSection(SectionBookkeeping &Section);
public:
WasmObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
raw_pwrite_stream &OS)
: W(OS, support::little), TargetObjectWriter(std::move(MOTW)) {}
~WasmObjectWriter() override;
private:
void reset() override {
CodeRelocations.clear();
DataRelocations.clear();
TypeIndices.clear();
WasmIndices.clear();
TableIndices.clear();
DataLocations.clear();
CustomSectionsRelocations.clear();
SignatureIndices.clear();
Signatures.clear();
Globals.clear();
DataSegments.clear();
SectionFunctions.clear();
NumFunctionImports = 0;
NumGlobalImports = 0;
MCObjectWriter::reset();
}
void writeHeader(const MCAssembler &Asm);
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) override;
void executePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) override;
uint64_t writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
void writeString(const StringRef Str) {
encodeULEB128(Str.size(), W.OS);
W.OS << Str;
}
void writeValueType(wasm::ValType Ty) { W.OS << static_cast<char>(Ty); }
void writeTypeSection(ArrayRef<WasmSignature> Signatures);
void writeImportSection(ArrayRef<wasm::WasmImport> Imports, uint32_t DataSize,
uint32_t NumElements);
void writeFunctionSection(ArrayRef<WasmFunction> Functions);
void writeGlobalSection();
void writeExportSection(ArrayRef<wasm::WasmExport> Exports);
void writeElemSection(ArrayRef<uint32_t> TableElems);
void writeCodeSection(const MCAssembler &Asm, const MCAsmLayout &Layout,
ArrayRef<WasmFunction> Functions);
void writeDataSection();
void writeEventSection(ArrayRef<wasm::WasmEventType> Events);
void writeRelocSection(uint32_t SectionIndex, StringRef Name,
std::vector<WasmRelocationEntry> &Relocations);
void writeLinkingMetaDataSection(
ArrayRef<wasm::WasmSymbolInfo> SymbolInfos,
ArrayRef<std::pair<uint16_t, uint32_t>> InitFuncs,
const std::map<StringRef, std::vector<WasmComdatEntry>> &Comdats);
void writeCustomSections(const MCAssembler &Asm, const MCAsmLayout &Layout);
void writeCustomRelocSections();
void
updateCustomSectionRelocations(const SmallVector<WasmFunction, 4> &Functions,
const MCAsmLayout &Layout);
uint32_t getProvisionalValue(const WasmRelocationEntry &RelEntry);
void applyRelocations(ArrayRef<WasmRelocationEntry> Relocations,
uint64_t ContentsOffset);
uint32_t getRelocationIndexValue(const WasmRelocationEntry &RelEntry);
uint32_t getFunctionType(const MCSymbolWasm &Symbol);
uint32_t getEventType(const MCSymbolWasm &Symbol);
void registerFunctionType(const MCSymbolWasm &Symbol);
void registerEventType(const MCSymbolWasm &Symbol);
};
} // end anonymous namespace
WasmObjectWriter::~WasmObjectWriter() {}
// Write out a section header and a patchable section size field.
void WasmObjectWriter::startSection(SectionBookkeeping &Section,
unsigned SectionId) {
LLVM_DEBUG(dbgs() << "startSection " << SectionId << "\n");
W.OS << char(SectionId);
Section.SizeOffset = W.OS.tell();
// The section size. We don't know the size yet, so reserve enough space
// for any 32-bit value; we'll patch it later.
encodeULEB128(UINT32_MAX, W.OS);
// The position where the section starts, for measuring its size.
Section.ContentsOffset = W.OS.tell();
Section.PayloadOffset = W.OS.tell();
Section.Index = SectionCount++;
}
void WasmObjectWriter::startCustomSection(SectionBookkeeping &Section,
StringRef Name) {
LLVM_DEBUG(dbgs() << "startCustomSection " << Name << "\n");
startSection(Section, wasm::WASM_SEC_CUSTOM);
// The position where the section header ends, for measuring its size.
Section.PayloadOffset = W.OS.tell();
// Custom sections in wasm also have a string identifier.
writeString(Name);
// The position where the custom section starts.
Section.ContentsOffset = W.OS.tell();
}
// Now that the section is complete and we know how big it is, patch up the
// section size field at the start of the section.
void WasmObjectWriter::endSection(SectionBookkeeping &Section) {
uint64_t Size = W.OS.tell() - Section.PayloadOffset;
if (uint32_t(Size) != Size)
report_fatal_error("section size does not fit in a uint32_t");
LLVM_DEBUG(dbgs() << "endSection size=" << Size << "\n");
// Write the final section size to the payload_len field, which follows
// the section id byte.
uint8_t Buffer[16];
unsigned SizeLen = encodeULEB128(Size, Buffer, 5);
assert(SizeLen == 5);
static_cast<raw_pwrite_stream &>(W.OS).pwrite((char *)Buffer, SizeLen,
Section.SizeOffset);
}
// Emit the Wasm header.
void WasmObjectWriter::writeHeader(const MCAssembler &Asm) {
W.OS.write(wasm::WasmMagic, sizeof(wasm::WasmMagic));
W.write<uint32_t>(wasm::WasmVersion);
}
void WasmObjectWriter::executePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) {
// Build a map of sections to the function that defines them, for use
// in recordRelocation.
for (const MCSymbol &S : Asm.symbols()) {
const auto &WS = static_cast<const MCSymbolWasm &>(S);
if (WS.isDefined() && WS.isFunction() && !WS.isVariable()) {
const auto &Sec = static_cast<const MCSectionWasm &>(S.getSection());
auto Pair = SectionFunctions.insert(std::make_pair(&Sec, &S));
if (!Pair.second)
report_fatal_error("section already has a defining function: " +
Sec.getSectionName());
}
}
}
void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
MCAsmBackend &Backend = Asm.getBackend();
bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
MCFixupKindInfo::FKF_IsPCRel;
const auto &FixupSection = cast<MCSectionWasm>(*Fragment->getParent());
uint64_t C = Target.getConstant();
uint64_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
MCContext &Ctx = Asm.getContext();
// The .init_array isn't translated as data, so don't do relocations in it.
if (FixupSection.getSectionName().startswith(".init_array"))
return;
if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
assert(RefB->getKind() == MCSymbolRefExpr::VK_None &&
"Should not have constructed this");
// Let A, B and C being the components of Target and R be the location of
// the fixup. If the fixup is not pcrel, we want to compute (A - B + C).
// If it is pcrel, we want to compute (A - B + C - R).
// In general, Wasm has no relocations for -B. It can only represent (A + C)
// or (A + C - R). If B = R + K and the relocation is not pcrel, we can
// replace B to implement it: (A - R - K + C)
if (IsPCRel) {
Ctx.reportError(
Fixup.getLoc(),
"No relocation available to represent this relative expression");
return;
}
const auto &SymB = cast<MCSymbolWasm>(RefB->getSymbol());
if (SymB.isUndefined()) {
Ctx.reportError(Fixup.getLoc(),
Twine("symbol '") + SymB.getName() +
"' can not be undefined in a subtraction expression");
return;
}
assert(!SymB.isAbsolute() && "Should have been folded");
const MCSection &SecB = SymB.getSection();
if (&SecB != &FixupSection) {
Ctx.reportError(Fixup.getLoc(),
"Cannot represent a difference across sections");
return;
}
uint64_t SymBOffset = Layout.getSymbolOffset(SymB);
uint64_t K = SymBOffset - FixupOffset;
IsPCRel = true;
C -= K;
}
// We either rejected the fixup or folded B into C at this point.
const MCSymbolRefExpr *RefA = Target.getSymA();
const auto *SymA = RefA ? cast<MCSymbolWasm>(&RefA->getSymbol()) : nullptr;
if (SymA && SymA->isVariable()) {
const MCExpr *Expr = SymA->getVariableValue();
const auto *Inner = cast<MCSymbolRefExpr>(Expr);
if (Inner->getKind() == MCSymbolRefExpr::VK_WEAKREF)
llvm_unreachable("weakref used in reloc not yet implemented");
}
// Put any constant offset in an addend. Offsets can be negative, and
// LLVM expects wrapping, in contrast to wasm's immediates which can't
// be negative and don't wrap.
FixedValue = 0;
unsigned Type = getRelocType(Target, Fixup);
assert(!IsPCRel);
assert(SymA);
// Absolute offset within a section or a function.
// Currently only supported for for metadata sections.
// See: test/MC/WebAssembly/blockaddress.ll
if (Type == wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32 ||
Type == wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32) {
if (!FixupSection.getKind().isMetadata())
report_fatal_error("relocations for function or section offsets are "
"only supported in metadata sections");
const MCSymbol *SectionSymbol = nullptr;
const MCSection &SecA = SymA->getSection();
if (SecA.getKind().isText())
SectionSymbol = SectionFunctions.find(&SecA)->second;
else
SectionSymbol = SecA.getBeginSymbol();
if (!SectionSymbol)
report_fatal_error("section symbol is required for relocation");
C += Layout.getSymbolOffset(*SymA);
SymA = cast<MCSymbolWasm>(SectionSymbol);
}
// Relocation other than R_WEBASSEMBLY_TYPE_INDEX_LEB are required to be
// against a named symbol.
if (Type != wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB) {
if (SymA->getName().empty())
report_fatal_error("relocations against un-named temporaries are not yet "
"supported by wasm");
SymA->setUsedInReloc();
}
WasmRelocationEntry Rec(FixupOffset, SymA, C, Type, &FixupSection);
LLVM_DEBUG(dbgs() << "WasmReloc: " << Rec << "\n");
if (FixupSection.isWasmData()) {
DataRelocations.push_back(Rec);
} else if (FixupSection.getKind().isText()) {
CodeRelocations.push_back(Rec);
} else if (FixupSection.getKind().isMetadata()) {
CustomSectionsRelocations[&FixupSection].push_back(Rec);
} else {
llvm_unreachable("unexpected section type");
}
}
// Write X as an (unsigned) LEB value at offset Offset in Stream, padded
// to allow patching.
static void WritePatchableLEB(raw_pwrite_stream &Stream, uint32_t X,
uint64_t Offset) {
uint8_t Buffer[5];
unsigned SizeLen = encodeULEB128(X, Buffer, 5);
assert(SizeLen == 5);
Stream.pwrite((char *)Buffer, SizeLen, Offset);
}
// Write X as an signed LEB value at offset Offset in Stream, padded
// to allow patching.
static void WritePatchableSLEB(raw_pwrite_stream &Stream, int32_t X,
uint64_t Offset) {
uint8_t Buffer[5];
unsigned SizeLen = encodeSLEB128(X, Buffer, 5);
assert(SizeLen == 5);
Stream.pwrite((char *)Buffer, SizeLen, Offset);
}
// Write X as a plain integer value at offset Offset in Stream.
static void WriteI32(raw_pwrite_stream &Stream, uint32_t X, uint64_t Offset) {
uint8_t Buffer[4];
support::endian::write32le(Buffer, X);
Stream.pwrite((char *)Buffer, sizeof(Buffer), Offset);
}
static const MCSymbolWasm *ResolveSymbol(const MCSymbolWasm &Symbol) {
if (Symbol.isVariable()) {
const MCExpr *Expr = Symbol.getVariableValue();
auto *Inner = cast<MCSymbolRefExpr>(Expr);
return cast<MCSymbolWasm>(&Inner->getSymbol());
}
return &Symbol;
}
// Compute a value to write into the code at the location covered
// by RelEntry. This value isn't used by the static linker; it just serves
// to make the object format more readable and more likely to be directly
// useable.
uint32_t
WasmObjectWriter::getProvisionalValue(const WasmRelocationEntry &RelEntry) {
switch (RelEntry.Type) {
case wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB:
case wasm::R_WEBASSEMBLY_TABLE_INDEX_I32: {
// Provisional value is table address of the resolved symbol itself
const MCSymbolWasm *Sym = ResolveSymbol(*RelEntry.Symbol);
assert(Sym->isFunction());
return TableIndices[Sym];
}
case wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB:
// Provisional value is same as the index
return getRelocationIndexValue(RelEntry);
case wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB:
case wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB:
case wasm::R_WEBASSEMBLY_EVENT_INDEX_LEB:
// Provisional value is function/global/event Wasm index
if (!WasmIndices.count(RelEntry.Symbol))
report_fatal_error("symbol not found in wasm index space: " +
RelEntry.Symbol->getName());
return WasmIndices[RelEntry.Symbol];
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32: {
const auto &Section =
static_cast<const MCSectionWasm &>(RelEntry.Symbol->getSection());
return Section.getSectionOffset() + RelEntry.Addend;
}
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB: {
// Provisional value is address of the global
const MCSymbolWasm *Sym = ResolveSymbol(*RelEntry.Symbol);
// For undefined symbols, use zero
if (!Sym->isDefined())
return 0;
const wasm::WasmDataReference &Ref = DataLocations[Sym];
const WasmDataSegment &Segment = DataSegments[Ref.Segment];
// Ignore overflow. LLVM allows address arithmetic to silently wrap.
return Segment.Offset + Ref.Offset + RelEntry.Addend;
}
default:
llvm_unreachable("invalid relocation type");
}
}
static void addData(SmallVectorImpl<char> &DataBytes,
MCSectionWasm &DataSection) {
LLVM_DEBUG(errs() << "addData: " << DataSection.getSectionName() << "\n");
DataBytes.resize(alignTo(DataBytes.size(), DataSection.getAlignment()));
for (const MCFragment &Frag : DataSection) {
if (Frag.hasInstructions())
report_fatal_error("only data supported in data sections");
if (auto *Align = dyn_cast<MCAlignFragment>(&Frag)) {
if (Align->getValueSize() != 1)
report_fatal_error("only byte values supported for alignment");
// If nops are requested, use zeros, as this is the data section.
uint8_t Value = Align->hasEmitNops() ? 0 : Align->getValue();
uint64_t Size =
std::min<uint64_t>(alignTo(DataBytes.size(), Align->getAlignment()),
DataBytes.size() + Align->getMaxBytesToEmit());
DataBytes.resize(Size, Value);
} else if (auto *Fill = dyn_cast<MCFillFragment>(&Frag)) {
int64_t NumValues;
if (!Fill->getNumValues().evaluateAsAbsolute(NumValues))
llvm_unreachable("The fill should be an assembler constant");
DataBytes.insert(DataBytes.end(), Fill->getValueSize() * NumValues,
Fill->getValue());
} else if (auto *LEB = dyn_cast<MCLEBFragment>(&Frag)) {
const SmallVectorImpl<char> &Contents = LEB->getContents();
DataBytes.insert(DataBytes.end(), Contents.begin(), Contents.end());
} else {
const auto &DataFrag = cast<MCDataFragment>(Frag);
const SmallVectorImpl<char> &Contents = DataFrag.getContents();
DataBytes.insert(DataBytes.end(), Contents.begin(), Contents.end());
}
}
LLVM_DEBUG(dbgs() << "addData -> " << DataBytes.size() << "\n");
}
uint32_t
WasmObjectWriter::getRelocationIndexValue(const WasmRelocationEntry &RelEntry) {
if (RelEntry.Type == wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB) {
if (!TypeIndices.count(RelEntry.Symbol))
report_fatal_error("symbol not found in type index space: " +
RelEntry.Symbol->getName());
return TypeIndices[RelEntry.Symbol];
}
return RelEntry.Symbol->getIndex();
}
// Apply the portions of the relocation records that we can handle ourselves
// directly.
void WasmObjectWriter::applyRelocations(
ArrayRef<WasmRelocationEntry> Relocations, uint64_t ContentsOffset) {
auto &Stream = static_cast<raw_pwrite_stream &>(W.OS);
for (const WasmRelocationEntry &RelEntry : Relocations) {
uint64_t Offset = ContentsOffset +
RelEntry.FixupSection->getSectionOffset() +
RelEntry.Offset;
LLVM_DEBUG(dbgs() << "applyRelocation: " << RelEntry << "\n");
uint32_t Value = getProvisionalValue(RelEntry);
switch (RelEntry.Type) {
case wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB:
case wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB:
case wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB:
case wasm::R_WEBASSEMBLY_EVENT_INDEX_LEB:
WritePatchableLEB(Stream, Value, Offset);
break;
case wasm::R_WEBASSEMBLY_TABLE_INDEX_I32:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32:
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32:
WriteI32(Stream, Value, Offset);
break;
case wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB:
WritePatchableSLEB(Stream, Value, Offset);
break;
default:
llvm_unreachable("invalid relocation type");
}
}
}
void WasmObjectWriter::writeTypeSection(ArrayRef<WasmSignature> Signatures) {
if (Signatures.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_TYPE);
encodeULEB128(Signatures.size(), W.OS);
for (const WasmSignature &Sig : Signatures) {
W.OS << char(wasm::WASM_TYPE_FUNC);
encodeULEB128(Sig.Params.size(), W.OS);
for (wasm::ValType Ty : Sig.Params)
writeValueType(Ty);
encodeULEB128(Sig.Returns.size(), W.OS);
for (wasm::ValType Ty : Sig.Returns)
writeValueType(Ty);
}
endSection(Section);
}
void WasmObjectWriter::writeImportSection(ArrayRef<wasm::WasmImport> Imports,
uint32_t DataSize,
uint32_t NumElements) {
if (Imports.empty())
return;
uint32_t NumPages = (DataSize + wasm::WasmPageSize - 1) / wasm::WasmPageSize;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_IMPORT);
encodeULEB128(Imports.size(), W.OS);
for (const wasm::WasmImport &Import : Imports) {
writeString(Import.Module);
writeString(Import.Field);
W.OS << char(Import.Kind);
switch (Import.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
encodeULEB128(Import.SigIndex, W.OS);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
W.OS << char(Import.Global.Type);
W.OS << char(Import.Global.Mutable ? 1 : 0);
break;
case wasm::WASM_EXTERNAL_MEMORY:
encodeULEB128(0, W.OS); // flags
encodeULEB128(NumPages, W.OS); // initial
break;
case wasm::WASM_EXTERNAL_TABLE:
W.OS << char(Import.Table.ElemType);
encodeULEB128(0, W.OS); // flags
encodeULEB128(NumElements, W.OS); // initial
break;
case wasm::WASM_EXTERNAL_EVENT:
encodeULEB128(Import.Event.Attribute, W.OS);
encodeULEB128(Import.Event.SigIndex, W.OS);
break;
default:
llvm_unreachable("unsupported import kind");
}
}
endSection(Section);
}
void WasmObjectWriter::writeFunctionSection(ArrayRef<WasmFunction> Functions) {
if (Functions.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_FUNCTION);
encodeULEB128(Functions.size(), W.OS);
for (const WasmFunction &Func : Functions)
encodeULEB128(Func.SigIndex, W.OS);
endSection(Section);
}
void WasmObjectWriter::writeGlobalSection() {
if (Globals.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_GLOBAL);
encodeULEB128(Globals.size(), W.OS);
for (const WasmGlobal &Global : Globals) {
writeValueType(static_cast<wasm::ValType>(Global.Type.Type));
W.OS << char(Global.Type.Mutable);
W.OS << char(wasm::WASM_OPCODE_I32_CONST);
encodeSLEB128(Global.InitialValue, W.OS);
W.OS << char(wasm::WASM_OPCODE_END);
}
endSection(Section);
}
void WasmObjectWriter::writeEventSection(ArrayRef<wasm::WasmEventType> Events) {
if (Events.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_EVENT);
encodeULEB128(Events.size(), W.OS);
for (const wasm::WasmEventType &Event : Events) {
encodeULEB128(Event.Attribute, W.OS);
encodeULEB128(Event.SigIndex, W.OS);
}
endSection(Section);
}
void WasmObjectWriter::writeExportSection(ArrayRef<wasm::WasmExport> Exports) {
if (Exports.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_EXPORT);
encodeULEB128(Exports.size(), W.OS);
for (const wasm::WasmExport &Export : Exports) {
writeString(Export.Name);
W.OS << char(Export.Kind);
encodeULEB128(Export.Index, W.OS);
}
endSection(Section);
}
void WasmObjectWriter::writeElemSection(ArrayRef<uint32_t> TableElems) {
if (TableElems.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_ELEM);
encodeULEB128(1, W.OS); // number of "segments"
encodeULEB128(0, W.OS); // the table index
// init expr for starting offset
W.OS << char(wasm::WASM_OPCODE_I32_CONST);
encodeSLEB128(kInitialTableOffset, W.OS);
W.OS << char(wasm::WASM_OPCODE_END);
encodeULEB128(TableElems.size(), W.OS);
for (uint32_t Elem : TableElems)
encodeULEB128(Elem, W.OS);
endSection(Section);
}
void WasmObjectWriter::writeCodeSection(const MCAssembler &Asm,
const MCAsmLayout &Layout,
ArrayRef<WasmFunction> Functions) {
if (Functions.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_CODE);
CodeSectionIndex = Section.Index;
encodeULEB128(Functions.size(), W.OS);
for (const WasmFunction &Func : Functions) {
auto &FuncSection = static_cast<MCSectionWasm &>(Func.Sym->getSection());
int64_t Size = 0;
if (!Func.Sym->getSize()->evaluateAsAbsolute(Size, Layout))
report_fatal_error(".size expression must be evaluatable");
encodeULEB128(Size, W.OS);
FuncSection.setSectionOffset(W.OS.tell() - Section.ContentsOffset);
Asm.writeSectionData(W.OS, &FuncSection, Layout);
}
// Apply fixups.
applyRelocations(CodeRelocations, Section.ContentsOffset);
endSection(Section);
}
void WasmObjectWriter::writeDataSection() {
if (DataSegments.empty())
return;
SectionBookkeeping Section;
startSection(Section, wasm::WASM_SEC_DATA);
DataSectionIndex = Section.Index;
encodeULEB128(DataSegments.size(), W.OS); // count
for (const WasmDataSegment &Segment : DataSegments) {
encodeULEB128(0, W.OS); // memory index
W.OS << char(wasm::WASM_OPCODE_I32_CONST);
encodeSLEB128(Segment.Offset, W.OS); // offset
W.OS << char(wasm::WASM_OPCODE_END);
encodeULEB128(Segment.Data.size(), W.OS); // size
Segment.Section->setSectionOffset(W.OS.tell() - Section.ContentsOffset);
W.OS << Segment.Data; // data
}
// Apply fixups.
applyRelocations(DataRelocations, Section.ContentsOffset);
endSection(Section);
}
void WasmObjectWriter::writeRelocSection(
uint32_t SectionIndex, StringRef Name,
std::vector<WasmRelocationEntry> &Relocs) {
// See: https://github.com/WebAssembly/tool-conventions/blob/master/Linking.md
// for descriptions of the reloc sections.
if (Relocs.empty())
return;
// First, ensure the relocations are sorted in offset order. In general they
// should already be sorted since `recordRelocation` is called in offset
// order, but for the code section we combine many MC sections into single
// wasm section, and this order is determined by the order of Asm.Symbols()
// not the sections order.
std::stable_sort(
Relocs.begin(), Relocs.end(),
[](const WasmRelocationEntry &A, const WasmRelocationEntry &B) {
return (A.Offset + A.FixupSection->getSectionOffset()) <
(B.Offset + B.FixupSection->getSectionOffset());
});
SectionBookkeeping Section;
startCustomSection(Section, std::string("reloc.") + Name.str());
encodeULEB128(SectionIndex, W.OS);
encodeULEB128(Relocs.size(), W.OS);
for (const WasmRelocationEntry &RelEntry : Relocs) {
uint64_t Offset =
RelEntry.Offset + RelEntry.FixupSection->getSectionOffset();
uint32_t Index = getRelocationIndexValue(RelEntry);
W.OS << char(RelEntry.Type);
encodeULEB128(Offset, W.OS);
encodeULEB128(Index, W.OS);
if (RelEntry.hasAddend())
encodeSLEB128(RelEntry.Addend, W.OS);
}
endSection(Section);
}
void WasmObjectWriter::writeCustomRelocSections() {
for (const auto &Sec : CustomSections) {
auto &Relocations = CustomSectionsRelocations[Sec.Section];
writeRelocSection(Sec.OutputIndex, Sec.Name, Relocations);
}
}
void WasmObjectWriter::writeLinkingMetaDataSection(
ArrayRef<wasm::WasmSymbolInfo> SymbolInfos,
ArrayRef<std::pair<uint16_t, uint32_t>> InitFuncs,
const std::map<StringRef, std::vector<WasmComdatEntry>> &Comdats) {
SectionBookkeeping Section;
startCustomSection(Section, "linking");
encodeULEB128(wasm::WasmMetadataVersion, W.OS);
SectionBookkeeping SubSection;
if (SymbolInfos.size() != 0) {
startSection(SubSection, wasm::WASM_SYMBOL_TABLE);
encodeULEB128(SymbolInfos.size(), W.OS);
for (const wasm::WasmSymbolInfo &Sym : SymbolInfos) {
encodeULEB128(Sym.Kind, W.OS);
encodeULEB128(Sym.Flags, W.OS);
switch (Sym.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
case wasm::WASM_SYMBOL_TYPE_EVENT:
encodeULEB128(Sym.ElementIndex, W.OS);
- if ((Sym.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0)
+ if ((Sym.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0 ||
+ (Sym.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
writeString(Sym.Name);
break;
case wasm::WASM_SYMBOL_TYPE_DATA:
writeString(Sym.Name);
if ((Sym.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0) {
encodeULEB128(Sym.DataRef.Segment, W.OS);
encodeULEB128(Sym.DataRef.Offset, W.OS);
encodeULEB128(Sym.DataRef.Size, W.OS);
}
break;
case wasm::WASM_SYMBOL_TYPE_SECTION: {
const uint32_t SectionIndex =
CustomSections[Sym.ElementIndex].OutputIndex;
encodeULEB128(SectionIndex, W.OS);
break;
}
default:
llvm_unreachable("unexpected kind");
}
}
endSection(SubSection);
}
if (DataSegments.size()) {
startSection(SubSection, wasm::WASM_SEGMENT_INFO);
encodeULEB128(DataSegments.size(), W.OS);
for (const WasmDataSegment &Segment : DataSegments) {
writeString(Segment.Name);
encodeULEB128(Segment.Alignment, W.OS);
encodeULEB128(Segment.Flags, W.OS);
}
endSection(SubSection);
}
if (!InitFuncs.empty()) {
startSection(SubSection, wasm::WASM_INIT_FUNCS);
encodeULEB128(InitFuncs.size(), W.OS);
for (auto &StartFunc : InitFuncs) {
encodeULEB128(StartFunc.first, W.OS); // priority
encodeULEB128(StartFunc.second, W.OS); // function index
}
endSection(SubSection);
}
if (Comdats.size()) {
startSection(SubSection, wasm::WASM_COMDAT_INFO);
encodeULEB128(Comdats.size(), W.OS);
for (const auto &C : Comdats) {
writeString(C.first);
encodeULEB128(0, W.OS); // flags for future use
encodeULEB128(C.second.size(), W.OS);
for (const WasmComdatEntry &Entry : C.second) {
encodeULEB128(Entry.Kind, W.OS);
encodeULEB128(Entry.Index, W.OS);
}
}
endSection(SubSection);
}
endSection(Section);
}
void WasmObjectWriter::writeCustomSections(const MCAssembler &Asm,
const MCAsmLayout &Layout) {
for (auto &CustomSection : CustomSections) {
SectionBookkeeping Section;
auto *Sec = CustomSection.Section;
startCustomSection(Section, CustomSection.Name);
Sec->setSectionOffset(W.OS.tell() - Section.ContentsOffset);
Asm.writeSectionData(W.OS, Sec, Layout);
CustomSection.OutputContentsOffset = Section.ContentsOffset;
CustomSection.OutputIndex = Section.Index;
endSection(Section);
// Apply fixups.
auto &Relocations = CustomSectionsRelocations[CustomSection.Section];
applyRelocations(Relocations, CustomSection.OutputContentsOffset);
}
}
uint32_t WasmObjectWriter::getFunctionType(const MCSymbolWasm &Symbol) {
assert(Symbol.isFunction());
assert(TypeIndices.count(&Symbol));
return TypeIndices[&Symbol];
}
uint32_t WasmObjectWriter::getEventType(const MCSymbolWasm &Symbol) {
assert(Symbol.isEvent());
assert(TypeIndices.count(&Symbol));
return TypeIndices[&Symbol];
}
void WasmObjectWriter::registerFunctionType(const MCSymbolWasm &Symbol) {
assert(Symbol.isFunction());
WasmSignature S;
const MCSymbolWasm *ResolvedSym = ResolveSymbol(Symbol);
if (auto *Sig = ResolvedSym->getSignature()) {
S.Returns = Sig->Returns;
S.Params = Sig->Params;
}
auto Pair = SignatureIndices.insert(std::make_pair(S, Signatures.size()));
if (Pair.second)
Signatures.push_back(S);
TypeIndices[&Symbol] = Pair.first->second;
LLVM_DEBUG(dbgs() << "registerFunctionType: " << Symbol
<< " new:" << Pair.second << "\n");
LLVM_DEBUG(dbgs() << " -> type index: " << Pair.first->second << "\n");
}
void WasmObjectWriter::registerEventType(const MCSymbolWasm &Symbol) {
assert(Symbol.isEvent());
// TODO Currently we don't generate imported exceptions, but if we do, we
// should have a way of infering types of imported exceptions.
WasmSignature S;
if (auto *Sig = Symbol.getSignature()) {
S.Returns = Sig->Returns;
S.Params = Sig->Params;
}
auto Pair = SignatureIndices.insert(std::make_pair(S, Signatures.size()));
if (Pair.second)
Signatures.push_back(S);
TypeIndices[&Symbol] = Pair.first->second;
LLVM_DEBUG(dbgs() << "registerEventType: " << Symbol << " new:" << Pair.second
<< "\n");
LLVM_DEBUG(dbgs() << " -> type index: " << Pair.first->second << "\n");
}
static bool isInSymtab(const MCSymbolWasm &Sym) {
if (Sym.isUsedInReloc())
return true;
if (Sym.isComdat() && !Sym.isDefined())
return false;
if (Sym.isTemporary() && Sym.getName().empty())
return false;
if (Sym.isTemporary() && Sym.isData() && !Sym.getSize())
return false;
if (Sym.isSection())
return false;
return true;
}
uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
const MCAsmLayout &Layout) {
uint64_t StartOffset = W.OS.tell();
LLVM_DEBUG(dbgs() << "WasmObjectWriter::writeObject\n");
MCContext &Ctx = Asm.getContext();
// Collect information from the available symbols.
SmallVector<WasmFunction, 4> Functions;
SmallVector<uint32_t, 4> TableElems;
SmallVector<wasm::WasmImport, 4> Imports;
SmallVector<wasm::WasmExport, 4> Exports;
SmallVector<wasm::WasmEventType, 1> Events;
SmallVector<wasm::WasmSymbolInfo, 4> SymbolInfos;
SmallVector<std::pair<uint16_t, uint32_t>, 2> InitFuncs;
std::map<StringRef, std::vector<WasmComdatEntry>> Comdats;
uint32_t DataSize = 0;
// For now, always emit the memory import, since loads and stores are not
// valid without it. In the future, we could perhaps be more clever and omit
// it if there are no loads or stores.
MCSymbolWasm *MemorySym =
cast<MCSymbolWasm>(Ctx.getOrCreateSymbol("__linear_memory"));
wasm::WasmImport MemImport;
- MemImport.Module = MemorySym->getModuleName();
- MemImport.Field = MemorySym->getName();
+ MemImport.Module = MemorySym->getImportModule();
+ MemImport.Field = MemorySym->getImportName();
MemImport.Kind = wasm::WASM_EXTERNAL_MEMORY;
Imports.push_back(MemImport);
// For now, always emit the table section, since indirect calls are not
// valid without it. In the future, we could perhaps be more clever and omit
// it if there are no indirect calls.
MCSymbolWasm *TableSym =
cast<MCSymbolWasm>(Ctx.getOrCreateSymbol("__indirect_function_table"));
wasm::WasmImport TableImport;
- TableImport.Module = TableSym->getModuleName();
- TableImport.Field = TableSym->getName();
+ TableImport.Module = TableSym->getImportModule();
+ TableImport.Field = TableSym->getImportName();
TableImport.Kind = wasm::WASM_EXTERNAL_TABLE;
TableImport.Table.ElemType = wasm::WASM_TYPE_FUNCREF;
Imports.push_back(TableImport);
// Populate SignatureIndices, and Imports and WasmIndices for undefined
// symbols. This must be done before populating WasmIndices for defined
// symbols.
for (const MCSymbol &S : Asm.symbols()) {
const auto &WS = static_cast<const MCSymbolWasm &>(S);
// Register types for all functions, including those with private linkage
// (because wasm always needs a type signature).
if (WS.isFunction())
registerFunctionType(WS);
if (WS.isEvent())
registerEventType(WS);
if (WS.isTemporary())
continue;
// If the symbol is not defined in this translation unit, import it.
if (!WS.isDefined() && !WS.isComdat()) {
if (WS.isFunction()) {
wasm::WasmImport Import;
- Import.Module = WS.getModuleName();
- Import.Field = WS.getName();
+ Import.Module = WS.getImportModule();
+ Import.Field = WS.getImportName();
Import.Kind = wasm::WASM_EXTERNAL_FUNCTION;
Import.SigIndex = getFunctionType(WS);
Imports.push_back(Import);
WasmIndices[&WS] = NumFunctionImports++;
} else if (WS.isGlobal()) {
if (WS.isWeak())
report_fatal_error("undefined global symbol cannot be weak");
wasm::WasmImport Import;
- Import.Module = WS.getModuleName();
- Import.Field = WS.getName();
+ Import.Module = WS.getImportModule();
+ Import.Field = WS.getImportName();
Import.Kind = wasm::WASM_EXTERNAL_GLOBAL;
Import.Global = WS.getGlobalType();
Imports.push_back(Import);
WasmIndices[&WS] = NumGlobalImports++;
} else if (WS.isEvent()) {
if (WS.isWeak())
report_fatal_error("undefined event symbol cannot be weak");
wasm::WasmImport Import;
- Import.Module = WS.getModuleName();
- Import.Field = WS.getName();
+ Import.Module = WS.getImportModule();
+ Import.Field = WS.getImportName();
Import.Kind = wasm::WASM_EXTERNAL_EVENT;
Import.Event.Attribute = wasm::WASM_EVENT_ATTRIBUTE_EXCEPTION;
Import.Event.SigIndex = getEventType(WS);
Imports.push_back(Import);
WasmIndices[&WS] = NumEventImports++;
}
}
}
// Populate DataSegments and CustomSections, which must be done before
// populating DataLocations.
for (MCSection &Sec : Asm) {
auto &Section = static_cast<MCSectionWasm &>(Sec);
StringRef SectionName = Section.getSectionName();
// .init_array sections are handled specially elsewhere.
if (SectionName.startswith(".init_array"))
continue;
// Code is handled separately
if (Section.getKind().isText())
continue;
if (Section.isWasmData()) {
uint32_t SegmentIndex = DataSegments.size();
DataSize = alignTo(DataSize, Section.getAlignment());
DataSegments.emplace_back();
WasmDataSegment &Segment = DataSegments.back();
Segment.Name = SectionName;
Segment.Offset = DataSize;
Segment.Section = &Section;
addData(Segment.Data, Section);
Segment.Alignment = Log2_32(Section.getAlignment());
Segment.Flags = 0;
DataSize += Segment.Data.size();
Section.setSegmentIndex(SegmentIndex);
if (const MCSymbolWasm *C = Section.getGroup()) {
Comdats[C->getName()].emplace_back(
WasmComdatEntry{wasm::WASM_COMDAT_DATA, SegmentIndex});
}
} else {
// Create custom sections
assert(Sec.getKind().isMetadata());
StringRef Name = SectionName;
// For user-defined custom sections, strip the prefix
if (Name.startswith(".custom_section."))
Name = Name.substr(strlen(".custom_section."));
MCSymbol *Begin = Sec.getBeginSymbol();
if (Begin) {
WasmIndices[cast<MCSymbolWasm>(Begin)] = CustomSections.size();
if (SectionName != Begin->getName())
report_fatal_error("section name and begin symbol should match: " +
Twine(SectionName));
}
CustomSections.emplace_back(Name, &Section);
}
}
// Populate WasmIndices and DataLocations for defined symbols.
for (const MCSymbol &S : Asm.symbols()) {
// Ignore unnamed temporary symbols, which aren't ever exported, imported,
// or used in relocations.
if (S.isTemporary() && S.getName().empty())
continue;
const auto &WS = static_cast<const MCSymbolWasm &>(S);
LLVM_DEBUG(
dbgs() << "MCSymbol: " << toString(WS.getType()) << " '" << S << "'"
<< " isDefined=" << S.isDefined() << " isExternal="
<< S.isExternal() << " isTemporary=" << S.isTemporary()
<< " isWeak=" << WS.isWeak() << " isHidden=" << WS.isHidden()
<< " isVariable=" << WS.isVariable() << "\n");
if (WS.isVariable())
continue;
if (WS.isComdat() && !WS.isDefined())
continue;
if (WS.isFunction()) {
unsigned Index;
if (WS.isDefined()) {
if (WS.getOffset() != 0)
report_fatal_error(
"function sections must contain one function each");
if (WS.getSize() == 0)
report_fatal_error(
"function symbols must have a size set with .size");
// A definition. Write out the function body.
Index = NumFunctionImports + Functions.size();
WasmFunction Func;
Func.SigIndex = getFunctionType(WS);
Func.Sym = &WS;
WasmIndices[&WS] = Index;
Functions.push_back(Func);
auto &Section = static_cast<MCSectionWasm &>(WS.getSection());
if (const MCSymbolWasm *C = Section.getGroup()) {
Comdats[C->getName()].emplace_back(
WasmComdatEntry{wasm::WASM_COMDAT_FUNCTION, Index});
}
} else {
// An import; the index was assigned above.
Index = WasmIndices.find(&WS)->second;
}
LLVM_DEBUG(dbgs() << " -> function index: " << Index << "\n");
} else if (WS.isData()) {
if (WS.isTemporary() && !WS.getSize())
continue;
if (!WS.isDefined()) {
LLVM_DEBUG(dbgs() << " -> segment index: -1"
<< "\n");
continue;
}
if (!WS.getSize())
report_fatal_error("data symbols must have a size set with .size: " +
WS.getName());
int64_t Size = 0;
if (!WS.getSize()->evaluateAsAbsolute(Size, Layout))
report_fatal_error(".size expression must be evaluatable");
auto &DataSection = static_cast<MCSectionWasm &>(WS.getSection());
assert(DataSection.isWasmData());
// For each data symbol, export it in the symtab as a reference to the
// corresponding Wasm data segment.
wasm::WasmDataReference Ref = wasm::WasmDataReference{
DataSection.getSegmentIndex(),
static_cast<uint32_t>(Layout.getSymbolOffset(WS)),
static_cast<uint32_t>(Size)};
DataLocations[&WS] = Ref;
LLVM_DEBUG(dbgs() << " -> segment index: " << Ref.Segment << "\n");
} else if (WS.isGlobal()) {
// A "true" Wasm global (currently just __stack_pointer)
if (WS.isDefined())
report_fatal_error("don't yet support defined globals");
// An import; the index was assigned above
LLVM_DEBUG(dbgs() << " -> global index: "
<< WasmIndices.find(&WS)->second << "\n");
} else if (WS.isEvent()) {
// C++ exception symbol (__cpp_exception)
unsigned Index;
if (WS.isDefined()) {
Index = NumEventImports + Events.size();
wasm::WasmEventType Event;
Event.SigIndex = getEventType(WS);
Event.Attribute = wasm::WASM_EVENT_ATTRIBUTE_EXCEPTION;
WasmIndices[&WS] = Index;
Events.push_back(Event);
} else {
// An import; the index was assigned above.
Index = WasmIndices.find(&WS)->second;
}
LLVM_DEBUG(dbgs() << " -> event index: " << WasmIndices.find(&WS)->second
<< "\n");
} else {
assert(WS.isSection());
}
}
// Populate WasmIndices and DataLocations for aliased symbols. We need to
// process these in a separate pass because we need to have processed the
// target of the alias before the alias itself and the symbols are not
// necessarily ordered in this way.
for (const MCSymbol &S : Asm.symbols()) {
if (!S.isVariable())
continue;
assert(S.isDefined());
// Find the target symbol of this weak alias and export that index
const auto &WS = static_cast<const MCSymbolWasm &>(S);
const MCSymbolWasm *ResolvedSym = ResolveSymbol(WS);
LLVM_DEBUG(dbgs() << WS.getName() << ": weak alias of '" << *ResolvedSym
<< "'\n");
if (WS.isFunction()) {
assert(WasmIndices.count(ResolvedSym) > 0);
uint32_t WasmIndex = WasmIndices.find(ResolvedSym)->second;
WasmIndices[&WS] = WasmIndex;
LLVM_DEBUG(dbgs() << " -> index:" << WasmIndex << "\n");
} else if (WS.isData()) {
assert(DataLocations.count(ResolvedSym) > 0);
const wasm::WasmDataReference &Ref =
DataLocations.find(ResolvedSym)->second;
DataLocations[&WS] = Ref;
LLVM_DEBUG(dbgs() << " -> index:" << Ref.Segment << "\n");
} else {
report_fatal_error("don't yet support global/event aliases");
}
}
// Finally, populate the symbol table itself, in its "natural" order.
for (const MCSymbol &S : Asm.symbols()) {
const auto &WS = static_cast<const MCSymbolWasm &>(S);
if (!isInSymtab(WS)) {
WS.setIndex(INVALID_INDEX);
continue;
}
LLVM_DEBUG(dbgs() << "adding to symtab: " << WS << "\n");
uint32_t Flags = 0;
if (WS.isWeak())
Flags |= wasm::WASM_SYMBOL_BINDING_WEAK;
if (WS.isHidden())
Flags |= wasm::WASM_SYMBOL_VISIBILITY_HIDDEN;
if (!WS.isExternal() && WS.isDefined())
Flags |= wasm::WASM_SYMBOL_BINDING_LOCAL;
if (WS.isUndefined())
Flags |= wasm::WASM_SYMBOL_UNDEFINED;
+ if (WS.getName() != WS.getImportName())
+ Flags |= wasm::WASM_SYMBOL_EXPLICIT_NAME;
wasm::WasmSymbolInfo Info;
Info.Name = WS.getName();
Info.Kind = WS.getType();
Info.Flags = Flags;
if (!WS.isData()) {
assert(WasmIndices.count(&WS) > 0);
Info.ElementIndex = WasmIndices.find(&WS)->second;
} else if (WS.isDefined()) {
assert(DataLocations.count(&WS) > 0);
Info.DataRef = DataLocations.find(&WS)->second;
}
WS.setIndex(SymbolInfos.size());
SymbolInfos.emplace_back(Info);
}
{
auto HandleReloc = [&](const WasmRelocationEntry &Rel) {
// Functions referenced by a relocation need to put in the table. This is
// purely to make the object file's provisional values readable, and is
// ignored by the linker, which re-calculates the relocations itself.
if (Rel.Type != wasm::R_WEBASSEMBLY_TABLE_INDEX_I32 &&
Rel.Type != wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB)
return;
assert(Rel.Symbol->isFunction());
const MCSymbolWasm &WS = *ResolveSymbol(*Rel.Symbol);
uint32_t FunctionIndex = WasmIndices.find(&WS)->second;
uint32_t TableIndex = TableElems.size() + kInitialTableOffset;
if (TableIndices.try_emplace(&WS, TableIndex).second) {
LLVM_DEBUG(dbgs() << " -> adding " << WS.getName()
<< " to table: " << TableIndex << "\n");
TableElems.push_back(FunctionIndex);
registerFunctionType(WS);
}
};
for (const WasmRelocationEntry &RelEntry : CodeRelocations)
HandleReloc(RelEntry);
for (const WasmRelocationEntry &RelEntry : DataRelocations)
HandleReloc(RelEntry);
}
// Translate .init_array section contents into start functions.
for (const MCSection &S : Asm) {
const auto &WS = static_cast<const MCSectionWasm &>(S);
if (WS.getSectionName().startswith(".fini_array"))
report_fatal_error(".fini_array sections are unsupported");
if (!WS.getSectionName().startswith(".init_array"))
continue;
if (WS.getFragmentList().empty())
continue;
// init_array is expected to contain a single non-empty data fragment
if (WS.getFragmentList().size() != 3)
report_fatal_error("only one .init_array section fragment supported");
auto IT = WS.begin();
const MCFragment &EmptyFrag = *IT;
if (EmptyFrag.getKind() != MCFragment::FT_Data)
report_fatal_error(".init_array section should be aligned");
IT = std::next(IT);
const MCFragment &AlignFrag = *IT;
if (AlignFrag.getKind() != MCFragment::FT_Align)
report_fatal_error(".init_array section should be aligned");
if (cast<MCAlignFragment>(AlignFrag).getAlignment() != (is64Bit() ? 8 : 4))
report_fatal_error(".init_array section should be aligned for pointers");
const MCFragment &Frag = *std::next(IT);
if (Frag.hasInstructions() || Frag.getKind() != MCFragment::FT_Data)
report_fatal_error("only data supported in .init_array section");
uint16_t Priority = UINT16_MAX;
unsigned PrefixLength = strlen(".init_array");
if (WS.getSectionName().size() > PrefixLength) {
if (WS.getSectionName()[PrefixLength] != '.')
report_fatal_error(
".init_array section priority should start with '.'");
if (WS.getSectionName()
.substr(PrefixLength + 1)
.getAsInteger(10, Priority))
report_fatal_error("invalid .init_array section priority");
}
const auto &DataFrag = cast<MCDataFragment>(Frag);
const SmallVectorImpl<char> &Contents = DataFrag.getContents();
for (const uint8_t *
p = (const uint8_t *)Contents.data(),
*end = (const uint8_t *)Contents.data() + Contents.size();
p != end; ++p) {
if (*p != 0)
report_fatal_error("non-symbolic data in .init_array section");
}
for (const MCFixup &Fixup : DataFrag.getFixups()) {
assert(Fixup.getKind() ==
MCFixup::getKindForSize(is64Bit() ? 8 : 4, false));
const MCExpr *Expr = Fixup.getValue();
auto *Sym = dyn_cast<MCSymbolRefExpr>(Expr);
if (!Sym)
report_fatal_error("fixups in .init_array should be symbol references");
if (Sym->getKind() != MCSymbolRefExpr::VK_WebAssembly_FUNCTION)
report_fatal_error("symbols in .init_array should be for functions");
if (Sym->getSymbol().getIndex() == INVALID_INDEX)
report_fatal_error("symbols in .init_array should exist in symbtab");
InitFuncs.push_back(
std::make_pair(Priority, Sym->getSymbol().getIndex()));
}
}
// Write out the Wasm header.
writeHeader(Asm);
writeTypeSection(Signatures);
writeImportSection(Imports, DataSize, TableElems.size());
writeFunctionSection(Functions);
// Skip the "table" section; we import the table instead.
// Skip the "memory" section; we import the memory instead.
writeGlobalSection();
writeEventSection(Events);
writeExportSection(Exports);
writeElemSection(TableElems);
writeCodeSection(Asm, Layout, Functions);
writeDataSection();
writeCustomSections(Asm, Layout);
writeLinkingMetaDataSection(SymbolInfos, InitFuncs, Comdats);
writeRelocSection(CodeSectionIndex, "CODE", CodeRelocations);
writeRelocSection(DataSectionIndex, "DATA", DataRelocations);
writeCustomRelocSections();
// TODO: Translate the .comment section to the output.
return W.OS.tell() - StartOffset;
}
std::unique_ptr<MCObjectWriter>
llvm::createWasmObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
raw_pwrite_stream &OS) {
return llvm::make_unique<WasmObjectWriter>(std::move(MOTW), OS);
}
Index: vendor/llvm/dist-release_80/lib/Object/WasmObjectFile.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Object/WasmObjectFile.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Object/WasmObjectFile.cpp (revision 344166)
@@ -1,1499 +1,1513 @@
//===- WasmObjectFile.cpp - Wasm object file implementation ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Object/Wasm.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/ScopedPrinter.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <system_error>
#define DEBUG_TYPE "wasm-object"
using namespace llvm;
using namespace object;
void WasmSymbol::print(raw_ostream &Out) const {
Out << "Name=" << Info.Name
<< ", Kind=" << toString(wasm::WasmSymbolType(Info.Kind))
<< ", Flags=" << Info.Flags;
if (!isTypeData()) {
Out << ", ElemIndex=" << Info.ElementIndex;
} else if (isDefined()) {
Out << ", Segment=" << Info.DataRef.Segment;
Out << ", Offset=" << Info.DataRef.Offset;
Out << ", Size=" << Info.DataRef.Size;
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void WasmSymbol::dump() const { print(dbgs()); }
#endif
Expected<std::unique_ptr<WasmObjectFile>>
ObjectFile::createWasmObjectFile(MemoryBufferRef Buffer) {
Error Err = Error::success();
auto ObjectFile = llvm::make_unique<WasmObjectFile>(Buffer, Err);
if (Err)
return std::move(Err);
return std::move(ObjectFile);
}
#define VARINT7_MAX ((1 << 7) - 1)
#define VARINT7_MIN (-(1 << 7))
#define VARUINT7_MAX (1 << 7)
#define VARUINT1_MAX (1)
static uint8_t readUint8(WasmObjectFile::ReadContext &Ctx) {
if (Ctx.Ptr == Ctx.End)
report_fatal_error("EOF while reading uint8");
return *Ctx.Ptr++;
}
static uint32_t readUint32(WasmObjectFile::ReadContext &Ctx) {
if (Ctx.Ptr + 4 > Ctx.End)
report_fatal_error("EOF while reading uint32");
uint32_t Result = support::endian::read32le(Ctx.Ptr);
Ctx.Ptr += 4;
return Result;
}
static int32_t readFloat32(WasmObjectFile::ReadContext &Ctx) {
if (Ctx.Ptr + 4 > Ctx.End)
report_fatal_error("EOF while reading float64");
int32_t Result = 0;
memcpy(&Result, Ctx.Ptr, sizeof(Result));
Ctx.Ptr += sizeof(Result);
return Result;
}
static int64_t readFloat64(WasmObjectFile::ReadContext &Ctx) {
if (Ctx.Ptr + 8 > Ctx.End)
report_fatal_error("EOF while reading float64");
int64_t Result = 0;
memcpy(&Result, Ctx.Ptr, sizeof(Result));
Ctx.Ptr += sizeof(Result);
return Result;
}
static uint64_t readULEB128(WasmObjectFile::ReadContext &Ctx) {
unsigned Count;
const char *Error = nullptr;
uint64_t Result = decodeULEB128(Ctx.Ptr, &Count, Ctx.End, &Error);
if (Error)
report_fatal_error(Error);
Ctx.Ptr += Count;
return Result;
}
static StringRef readString(WasmObjectFile::ReadContext &Ctx) {
uint32_t StringLen = readULEB128(Ctx);
if (Ctx.Ptr + StringLen > Ctx.End)
report_fatal_error("EOF while reading string");
StringRef Return =
StringRef(reinterpret_cast<const char *>(Ctx.Ptr), StringLen);
Ctx.Ptr += StringLen;
return Return;
}
static int64_t readLEB128(WasmObjectFile::ReadContext &Ctx) {
unsigned Count;
const char *Error = nullptr;
uint64_t Result = decodeSLEB128(Ctx.Ptr, &Count, Ctx.End, &Error);
if (Error)
report_fatal_error(Error);
Ctx.Ptr += Count;
return Result;
}
static uint8_t readVaruint1(WasmObjectFile::ReadContext &Ctx) {
int64_t result = readLEB128(Ctx);
if (result > VARUINT1_MAX || result < 0)
report_fatal_error("LEB is outside Varuint1 range");
return result;
}
static int32_t readVarint32(WasmObjectFile::ReadContext &Ctx) {
int64_t result = readLEB128(Ctx);
if (result > INT32_MAX || result < INT32_MIN)
report_fatal_error("LEB is outside Varint32 range");
return result;
}
static uint32_t readVaruint32(WasmObjectFile::ReadContext &Ctx) {
uint64_t result = readULEB128(Ctx);
if (result > UINT32_MAX)
report_fatal_error("LEB is outside Varuint32 range");
return result;
}
static int64_t readVarint64(WasmObjectFile::ReadContext &Ctx) {
return readLEB128(Ctx);
}
static uint8_t readOpcode(WasmObjectFile::ReadContext &Ctx) {
return readUint8(Ctx);
}
static Error readInitExpr(wasm::WasmInitExpr &Expr,
WasmObjectFile::ReadContext &Ctx) {
Expr.Opcode = readOpcode(Ctx);
switch (Expr.Opcode) {
case wasm::WASM_OPCODE_I32_CONST:
Expr.Value.Int32 = readVarint32(Ctx);
break;
case wasm::WASM_OPCODE_I64_CONST:
Expr.Value.Int64 = readVarint64(Ctx);
break;
case wasm::WASM_OPCODE_F32_CONST:
Expr.Value.Float32 = readFloat32(Ctx);
break;
case wasm::WASM_OPCODE_F64_CONST:
Expr.Value.Float64 = readFloat64(Ctx);
break;
case wasm::WASM_OPCODE_GLOBAL_GET:
Expr.Value.Global = readULEB128(Ctx);
break;
default:
return make_error<GenericBinaryError>("Invalid opcode in init_expr",
object_error::parse_failed);
}
uint8_t EndOpcode = readOpcode(Ctx);
if (EndOpcode != wasm::WASM_OPCODE_END) {
return make_error<GenericBinaryError>("Invalid init_expr",
object_error::parse_failed);
}
return Error::success();
}
static wasm::WasmLimits readLimits(WasmObjectFile::ReadContext &Ctx) {
wasm::WasmLimits Result;
Result.Flags = readVaruint32(Ctx);
Result.Initial = readVaruint32(Ctx);
if (Result.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX)
Result.Maximum = readVaruint32(Ctx);
return Result;
}
static wasm::WasmTable readTable(WasmObjectFile::ReadContext &Ctx) {
wasm::WasmTable Table;
Table.ElemType = readUint8(Ctx);
Table.Limits = readLimits(Ctx);
return Table;
}
static Error readSection(WasmSection &Section, WasmObjectFile::ReadContext &Ctx,
WasmSectionOrderChecker &Checker) {
Section.Offset = Ctx.Ptr - Ctx.Start;
Section.Type = readUint8(Ctx);
LLVM_DEBUG(dbgs() << "readSection type=" << Section.Type << "\n");
uint32_t Size = readVaruint32(Ctx);
if (Size == 0)
return make_error<StringError>("Zero length section",
object_error::parse_failed);
if (Ctx.Ptr + Size > Ctx.End)
return make_error<StringError>("Section too large",
object_error::parse_failed);
if (Section.Type == wasm::WASM_SEC_CUSTOM) {
WasmObjectFile::ReadContext SectionCtx;
SectionCtx.Start = Ctx.Ptr;
SectionCtx.Ptr = Ctx.Ptr;
SectionCtx.End = Ctx.Ptr + Size;
Section.Name = readString(SectionCtx);
uint32_t SectionNameSize = SectionCtx.Ptr - SectionCtx.Start;
Ctx.Ptr += SectionNameSize;
Size -= SectionNameSize;
}
if (!Checker.isValidSectionOrder(Section.Type, Section.Name)) {
return make_error<StringError>("Out of order section type: " +
llvm::to_string(Section.Type),
object_error::parse_failed);
}
Section.Content = ArrayRef<uint8_t>(Ctx.Ptr, Size);
Ctx.Ptr += Size;
return Error::success();
}
WasmObjectFile::WasmObjectFile(MemoryBufferRef Buffer, Error &Err)
: ObjectFile(Binary::ID_Wasm, Buffer) {
ErrorAsOutParameter ErrAsOutParam(&Err);
Header.Magic = getData().substr(0, 4);
if (Header.Magic != StringRef("\0asm", 4)) {
Err =
make_error<StringError>("Bad magic number", object_error::parse_failed);
return;
}
ReadContext Ctx;
Ctx.Start = getPtr(0);
Ctx.Ptr = Ctx.Start + 4;
Ctx.End = Ctx.Start + getData().size();
if (Ctx.Ptr + 4 > Ctx.End) {
Err = make_error<StringError>("Missing version number",
object_error::parse_failed);
return;
}
Header.Version = readUint32(Ctx);
if (Header.Version != wasm::WasmVersion) {
Err = make_error<StringError>("Bad version number",
object_error::parse_failed);
return;
}
WasmSection Sec;
WasmSectionOrderChecker Checker;
while (Ctx.Ptr < Ctx.End) {
if ((Err = readSection(Sec, Ctx, Checker)))
return;
if ((Err = parseSection(Sec)))
return;
Sections.push_back(Sec);
}
}
Error WasmObjectFile::parseSection(WasmSection &Sec) {
ReadContext Ctx;
Ctx.Start = Sec.Content.data();
Ctx.End = Ctx.Start + Sec.Content.size();
Ctx.Ptr = Ctx.Start;
switch (Sec.Type) {
case wasm::WASM_SEC_CUSTOM:
return parseCustomSection(Sec, Ctx);
case wasm::WASM_SEC_TYPE:
return parseTypeSection(Ctx);
case wasm::WASM_SEC_IMPORT:
return parseImportSection(Ctx);
case wasm::WASM_SEC_FUNCTION:
return parseFunctionSection(Ctx);
case wasm::WASM_SEC_TABLE:
return parseTableSection(Ctx);
case wasm::WASM_SEC_MEMORY:
return parseMemorySection(Ctx);
case wasm::WASM_SEC_GLOBAL:
return parseGlobalSection(Ctx);
case wasm::WASM_SEC_EVENT:
return parseEventSection(Ctx);
case wasm::WASM_SEC_EXPORT:
return parseExportSection(Ctx);
case wasm::WASM_SEC_START:
return parseStartSection(Ctx);
case wasm::WASM_SEC_ELEM:
return parseElemSection(Ctx);
case wasm::WASM_SEC_CODE:
return parseCodeSection(Ctx);
case wasm::WASM_SEC_DATA:
return parseDataSection(Ctx);
default:
return make_error<GenericBinaryError>("Bad section type",
object_error::parse_failed);
}
}
Error WasmObjectFile::parseDylinkSection(ReadContext &Ctx) {
// See https://github.com/WebAssembly/tool-conventions/blob/master/DynamicLinking.md
DylinkInfo.MemorySize = readVaruint32(Ctx);
DylinkInfo.MemoryAlignment = readVaruint32(Ctx);
DylinkInfo.TableSize = readVaruint32(Ctx);
DylinkInfo.TableAlignment = readVaruint32(Ctx);
uint32_t Count = readVaruint32(Ctx);
while (Count--) {
DylinkInfo.Needed.push_back(readString(Ctx));
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("dylink section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseNameSection(ReadContext &Ctx) {
llvm::DenseSet<uint64_t> Seen;
if (Functions.size() != FunctionTypes.size()) {
return make_error<GenericBinaryError>("Names must come after code section",
object_error::parse_failed);
}
while (Ctx.Ptr < Ctx.End) {
uint8_t Type = readUint8(Ctx);
uint32_t Size = readVaruint32(Ctx);
const uint8_t *SubSectionEnd = Ctx.Ptr + Size;
switch (Type) {
case wasm::WASM_NAMES_FUNCTION: {
uint32_t Count = readVaruint32(Ctx);
while (Count--) {
uint32_t Index = readVaruint32(Ctx);
if (!Seen.insert(Index).second)
return make_error<GenericBinaryError>("Function named more than once",
object_error::parse_failed);
StringRef Name = readString(Ctx);
if (!isValidFunctionIndex(Index) || Name.empty())
return make_error<GenericBinaryError>("Invalid name entry",
object_error::parse_failed);
DebugNames.push_back(wasm::WasmFunctionName{Index, Name});
if (isDefinedFunctionIndex(Index))
getDefinedFunction(Index).DebugName = Name;
}
break;
}
// Ignore local names for now
case wasm::WASM_NAMES_LOCAL:
default:
Ctx.Ptr += Size;
break;
}
if (Ctx.Ptr != SubSectionEnd)
return make_error<GenericBinaryError>(
"Name sub-section ended prematurely", object_error::parse_failed);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Name section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseLinkingSection(ReadContext &Ctx) {
HasLinkingSection = true;
if (Functions.size() != FunctionTypes.size()) {
return make_error<GenericBinaryError>(
"Linking data must come after code section",
object_error::parse_failed);
}
LinkingData.Version = readVaruint32(Ctx);
if (LinkingData.Version != wasm::WasmMetadataVersion) {
return make_error<GenericBinaryError>(
"Unexpected metadata version: " + Twine(LinkingData.Version) +
" (Expected: " + Twine(wasm::WasmMetadataVersion) + ")",
object_error::parse_failed);
}
const uint8_t *OrigEnd = Ctx.End;
while (Ctx.Ptr < OrigEnd) {
Ctx.End = OrigEnd;
uint8_t Type = readUint8(Ctx);
uint32_t Size = readVaruint32(Ctx);
LLVM_DEBUG(dbgs() << "readSubsection type=" << int(Type) << " size=" << Size
<< "\n");
Ctx.End = Ctx.Ptr + Size;
switch (Type) {
case wasm::WASM_SYMBOL_TABLE:
if (Error Err = parseLinkingSectionSymtab(Ctx))
return Err;
break;
case wasm::WASM_SEGMENT_INFO: {
uint32_t Count = readVaruint32(Ctx);
if (Count > DataSegments.size())
return make_error<GenericBinaryError>("Too many segment names",
object_error::parse_failed);
for (uint32_t i = 0; i < Count; i++) {
DataSegments[i].Data.Name = readString(Ctx);
DataSegments[i].Data.Alignment = readVaruint32(Ctx);
DataSegments[i].Data.Flags = readVaruint32(Ctx);
}
break;
}
case wasm::WASM_INIT_FUNCS: {
uint32_t Count = readVaruint32(Ctx);
LinkingData.InitFunctions.reserve(Count);
for (uint32_t i = 0; i < Count; i++) {
wasm::WasmInitFunc Init;
Init.Priority = readVaruint32(Ctx);
Init.Symbol = readVaruint32(Ctx);
if (!isValidFunctionSymbol(Init.Symbol))
return make_error<GenericBinaryError>("Invalid function symbol: " +
Twine(Init.Symbol),
object_error::parse_failed);
LinkingData.InitFunctions.emplace_back(Init);
}
break;
}
case wasm::WASM_COMDAT_INFO:
if (Error Err = parseLinkingSectionComdat(Ctx))
return Err;
break;
default:
Ctx.Ptr += Size;
break;
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>(
"Linking sub-section ended prematurely", object_error::parse_failed);
}
if (Ctx.Ptr != OrigEnd)
return make_error<GenericBinaryError>("Linking section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseLinkingSectionSymtab(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
LinkingData.SymbolTable.reserve(Count);
Symbols.reserve(Count);
StringSet<> SymbolNames;
std::vector<wasm::WasmImport *> ImportedGlobals;
std::vector<wasm::WasmImport *> ImportedFunctions;
std::vector<wasm::WasmImport *> ImportedEvents;
ImportedGlobals.reserve(Imports.size());
ImportedFunctions.reserve(Imports.size());
ImportedEvents.reserve(Imports.size());
for (auto &I : Imports) {
if (I.Kind == wasm::WASM_EXTERNAL_FUNCTION)
ImportedFunctions.emplace_back(&I);
else if (I.Kind == wasm::WASM_EXTERNAL_GLOBAL)
ImportedGlobals.emplace_back(&I);
else if (I.Kind == wasm::WASM_EXTERNAL_EVENT)
ImportedEvents.emplace_back(&I);
}
while (Count--) {
wasm::WasmSymbolInfo Info;
const wasm::WasmSignature *Signature = nullptr;
const wasm::WasmGlobalType *GlobalType = nullptr;
const wasm::WasmEventType *EventType = nullptr;
Info.Kind = readUint8(Ctx);
Info.Flags = readVaruint32(Ctx);
bool IsDefined = (Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0;
switch (Info.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
Info.ElementIndex = readVaruint32(Ctx);
if (!isValidFunctionIndex(Info.ElementIndex) ||
IsDefined != isDefinedFunctionIndex(Info.ElementIndex))
return make_error<GenericBinaryError>("invalid function symbol index",
object_error::parse_failed);
if (IsDefined) {
Info.Name = readString(Ctx);
unsigned FuncIndex = Info.ElementIndex - NumImportedFunctions;
Signature = &Signatures[FunctionTypes[FuncIndex]];
wasm::WasmFunction &Function = Functions[FuncIndex];
if (Function.SymbolName.empty())
Function.SymbolName = Info.Name;
} else {
wasm::WasmImport &Import = *ImportedFunctions[Info.ElementIndex];
+ if ((Info.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
+ Info.Name = readString(Ctx);
+ else
+ Info.Name = Import.Field;
Signature = &Signatures[Import.SigIndex];
- Info.Name = Import.Field;
- Info.Module = Import.Module;
+ Info.ImportName = Import.Field;
+ Info.ImportModule = Import.Module;
}
break;
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
Info.ElementIndex = readVaruint32(Ctx);
if (!isValidGlobalIndex(Info.ElementIndex) ||
IsDefined != isDefinedGlobalIndex(Info.ElementIndex))
return make_error<GenericBinaryError>("invalid global symbol index",
object_error::parse_failed);
if (!IsDefined && (Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK) ==
wasm::WASM_SYMBOL_BINDING_WEAK)
return make_error<GenericBinaryError>("undefined weak global symbol",
object_error::parse_failed);
if (IsDefined) {
Info.Name = readString(Ctx);
unsigned GlobalIndex = Info.ElementIndex - NumImportedGlobals;
wasm::WasmGlobal &Global = Globals[GlobalIndex];
GlobalType = &Global.Type;
if (Global.SymbolName.empty())
Global.SymbolName = Info.Name;
} else {
wasm::WasmImport &Import = *ImportedGlobals[Info.ElementIndex];
- Info.Name = Import.Field;
+ if ((Info.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
+ Info.Name = readString(Ctx);
+ else
+ Info.Name = Import.Field;
GlobalType = &Import.Global;
+ Info.ImportName = Import.Field;
+ Info.ImportModule = Import.Module;
}
break;
case wasm::WASM_SYMBOL_TYPE_DATA:
Info.Name = readString(Ctx);
if (IsDefined) {
uint32_t Index = readVaruint32(Ctx);
if (Index >= DataSegments.size())
return make_error<GenericBinaryError>("invalid data symbol index",
object_error::parse_failed);
uint32_t Offset = readVaruint32(Ctx);
uint32_t Size = readVaruint32(Ctx);
if (Offset + Size > DataSegments[Index].Data.Content.size())
return make_error<GenericBinaryError>("invalid data symbol offset",
object_error::parse_failed);
Info.DataRef = wasm::WasmDataReference{Index, Offset, Size};
}
break;
case wasm::WASM_SYMBOL_TYPE_SECTION: {
if ((Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK) !=
wasm::WASM_SYMBOL_BINDING_LOCAL)
return make_error<GenericBinaryError>(
"Section symbols must have local binding",
object_error::parse_failed);
Info.ElementIndex = readVaruint32(Ctx);
// Use somewhat unique section name as symbol name.
StringRef SectionName = Sections[Info.ElementIndex].Name;
Info.Name = SectionName;
break;
}
case wasm::WASM_SYMBOL_TYPE_EVENT: {
Info.ElementIndex = readVaruint32(Ctx);
if (!isValidEventIndex(Info.ElementIndex) ||
IsDefined != isDefinedEventIndex(Info.ElementIndex))
return make_error<GenericBinaryError>("invalid event symbol index",
object_error::parse_failed);
if (!IsDefined && (Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK) ==
wasm::WASM_SYMBOL_BINDING_WEAK)
return make_error<GenericBinaryError>("undefined weak global symbol",
object_error::parse_failed);
if (IsDefined) {
Info.Name = readString(Ctx);
unsigned EventIndex = Info.ElementIndex - NumImportedEvents;
wasm::WasmEvent &Event = Events[EventIndex];
Signature = &Signatures[Event.Type.SigIndex];
EventType = &Event.Type;
if (Event.SymbolName.empty())
Event.SymbolName = Info.Name;
} else {
wasm::WasmImport &Import = *ImportedEvents[Info.ElementIndex];
+ if ((Info.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
+ Info.Name = readString(Ctx);
+ else
+ Info.Name = Import.Field;
EventType = &Import.Event;
Signature = &Signatures[EventType->SigIndex];
- Info.Name = Import.Field;
+ Info.ImportName = Import.Field;
+ Info.ImportModule = Import.Module;
}
break;
}
default:
return make_error<GenericBinaryError>("Invalid symbol type",
object_error::parse_failed);
}
if ((Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK) !=
wasm::WASM_SYMBOL_BINDING_LOCAL &&
!SymbolNames.insert(Info.Name).second)
return make_error<GenericBinaryError>("Duplicate symbol name " +
Twine(Info.Name),
object_error::parse_failed);
LinkingData.SymbolTable.emplace_back(Info);
Symbols.emplace_back(LinkingData.SymbolTable.back(), GlobalType, EventType,
Signature);
LLVM_DEBUG(dbgs() << "Adding symbol: " << Symbols.back() << "\n");
}
return Error::success();
}
Error WasmObjectFile::parseLinkingSectionComdat(ReadContext &Ctx) {
uint32_t ComdatCount = readVaruint32(Ctx);
StringSet<> ComdatSet;
for (unsigned ComdatIndex = 0; ComdatIndex < ComdatCount; ++ComdatIndex) {
StringRef Name = readString(Ctx);
if (Name.empty() || !ComdatSet.insert(Name).second)
return make_error<GenericBinaryError>("Bad/duplicate COMDAT name " +
Twine(Name),
object_error::parse_failed);
LinkingData.Comdats.emplace_back(Name);
uint32_t Flags = readVaruint32(Ctx);
if (Flags != 0)
return make_error<GenericBinaryError>("Unsupported COMDAT flags",
object_error::parse_failed);
uint32_t EntryCount = readVaruint32(Ctx);
while (EntryCount--) {
unsigned Kind = readVaruint32(Ctx);
unsigned Index = readVaruint32(Ctx);
switch (Kind) {
default:
return make_error<GenericBinaryError>("Invalid COMDAT entry type",
object_error::parse_failed);
case wasm::WASM_COMDAT_DATA:
if (Index >= DataSegments.size())
return make_error<GenericBinaryError>(
"COMDAT data index out of range", object_error::parse_failed);
if (DataSegments[Index].Data.Comdat != UINT32_MAX)
return make_error<GenericBinaryError>("Data segment in two COMDATs",
object_error::parse_failed);
DataSegments[Index].Data.Comdat = ComdatIndex;
break;
case wasm::WASM_COMDAT_FUNCTION:
if (!isDefinedFunctionIndex(Index))
return make_error<GenericBinaryError>(
"COMDAT function index out of range", object_error::parse_failed);
if (getDefinedFunction(Index).Comdat != UINT32_MAX)
return make_error<GenericBinaryError>("Function in two COMDATs",
object_error::parse_failed);
getDefinedFunction(Index).Comdat = ComdatIndex;
break;
}
}
}
return Error::success();
}
Error WasmObjectFile::parseRelocSection(StringRef Name, ReadContext &Ctx) {
uint32_t SectionIndex = readVaruint32(Ctx);
if (SectionIndex >= Sections.size())
return make_error<GenericBinaryError>("Invalid section index",
object_error::parse_failed);
WasmSection &Section = Sections[SectionIndex];
uint32_t RelocCount = readVaruint32(Ctx);
uint32_t EndOffset = Section.Content.size();
uint32_t PreviousOffset = 0;
while (RelocCount--) {
wasm::WasmRelocation Reloc = {};
Reloc.Type = readVaruint32(Ctx);
Reloc.Offset = readVaruint32(Ctx);
if (Reloc.Offset < PreviousOffset)
return make_error<GenericBinaryError>("Relocations not in offset order",
object_error::parse_failed);
PreviousOffset = Reloc.Offset;
Reloc.Index = readVaruint32(Ctx);
switch (Reloc.Type) {
case wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB:
case wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB:
case wasm::R_WEBASSEMBLY_TABLE_INDEX_I32:
if (!isValidFunctionSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation function index",
object_error::parse_failed);
break;
case wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB:
if (Reloc.Index >= Signatures.size())
return make_error<GenericBinaryError>("Bad relocation type index",
object_error::parse_failed);
break;
case wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB:
if (!isValidGlobalSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation global index",
object_error::parse_failed);
break;
case wasm::R_WEBASSEMBLY_EVENT_INDEX_LEB:
if (!isValidEventSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation event index",
object_error::parse_failed);
break;
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32:
if (!isValidDataSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation data index",
object_error::parse_failed);
Reloc.Addend = readVarint32(Ctx);
break;
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
if (!isValidFunctionSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation function index",
object_error::parse_failed);
Reloc.Addend = readVarint32(Ctx);
break;
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32:
if (!isValidSectionSymbol(Reloc.Index))
return make_error<GenericBinaryError>("Bad relocation section index",
object_error::parse_failed);
Reloc.Addend = readVarint32(Ctx);
break;
default:
return make_error<GenericBinaryError>("Bad relocation type: " +
Twine(Reloc.Type),
object_error::parse_failed);
}
// Relocations must fit inside the section, and must appear in order. They
// also shouldn't overlap a function/element boundary, but we don't bother
// to check that.
uint64_t Size = 5;
if (Reloc.Type == wasm::R_WEBASSEMBLY_TABLE_INDEX_I32 ||
Reloc.Type == wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32 ||
Reloc.Type == wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32 ||
Reloc.Type == wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32)
Size = 4;
if (Reloc.Offset + Size > EndOffset)
return make_error<GenericBinaryError>("Bad relocation offset",
object_error::parse_failed);
Section.Relocations.push_back(Reloc);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Reloc section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseCustomSection(WasmSection &Sec, ReadContext &Ctx) {
if (Sec.Name == "dylink") {
if (Error Err = parseDylinkSection(Ctx))
return Err;
} else if (Sec.Name == "name") {
if (Error Err = parseNameSection(Ctx))
return Err;
} else if (Sec.Name == "linking") {
if (Error Err = parseLinkingSection(Ctx))
return Err;
} else if (Sec.Name.startswith("reloc.")) {
if (Error Err = parseRelocSection(Sec.Name, Ctx))
return Err;
}
return Error::success();
}
Error WasmObjectFile::parseTypeSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
Signatures.reserve(Count);
while (Count--) {
wasm::WasmSignature Sig;
uint8_t Form = readUint8(Ctx);
if (Form != wasm::WASM_TYPE_FUNC) {
return make_error<GenericBinaryError>("Invalid signature type",
object_error::parse_failed);
}
uint32_t ParamCount = readVaruint32(Ctx);
Sig.Params.reserve(ParamCount);
while (ParamCount--) {
uint32_t ParamType = readUint8(Ctx);
Sig.Params.push_back(wasm::ValType(ParamType));
}
uint32_t ReturnCount = readVaruint32(Ctx);
if (ReturnCount) {
if (ReturnCount != 1) {
return make_error<GenericBinaryError>(
"Multiple return types not supported", object_error::parse_failed);
}
Sig.Returns.push_back(wasm::ValType(readUint8(Ctx)));
}
Signatures.push_back(std::move(Sig));
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Type section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseImportSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
Imports.reserve(Count);
for (uint32_t i = 0; i < Count; i++) {
wasm::WasmImport Im;
Im.Module = readString(Ctx);
Im.Field = readString(Ctx);
Im.Kind = readUint8(Ctx);
switch (Im.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
NumImportedFunctions++;
Im.SigIndex = readVaruint32(Ctx);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
NumImportedGlobals++;
Im.Global.Type = readUint8(Ctx);
Im.Global.Mutable = readVaruint1(Ctx);
break;
case wasm::WASM_EXTERNAL_MEMORY:
Im.Memory = readLimits(Ctx);
break;
case wasm::WASM_EXTERNAL_TABLE:
Im.Table = readTable(Ctx);
if (Im.Table.ElemType != wasm::WASM_TYPE_FUNCREF)
return make_error<GenericBinaryError>("Invalid table element type",
object_error::parse_failed);
break;
case wasm::WASM_EXTERNAL_EVENT:
NumImportedEvents++;
Im.Event.Attribute = readVarint32(Ctx);
Im.Event.SigIndex = readVarint32(Ctx);
break;
default:
return make_error<GenericBinaryError>("Unexpected import kind",
object_error::parse_failed);
}
Imports.push_back(Im);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Import section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseFunctionSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
FunctionTypes.reserve(Count);
uint32_t NumTypes = Signatures.size();
while (Count--) {
uint32_t Type = readVaruint32(Ctx);
if (Type >= NumTypes)
return make_error<GenericBinaryError>("Invalid function type",
object_error::parse_failed);
FunctionTypes.push_back(Type);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Function section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseTableSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
Tables.reserve(Count);
while (Count--) {
Tables.push_back(readTable(Ctx));
if (Tables.back().ElemType != wasm::WASM_TYPE_FUNCREF) {
return make_error<GenericBinaryError>("Invalid table element type",
object_error::parse_failed);
}
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Table section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseMemorySection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
Memories.reserve(Count);
while (Count--) {
Memories.push_back(readLimits(Ctx));
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Memory section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseGlobalSection(ReadContext &Ctx) {
GlobalSection = Sections.size();
uint32_t Count = readVaruint32(Ctx);
Globals.reserve(Count);
while (Count--) {
wasm::WasmGlobal Global;
Global.Index = NumImportedGlobals + Globals.size();
Global.Type.Type = readUint8(Ctx);
Global.Type.Mutable = readVaruint1(Ctx);
if (Error Err = readInitExpr(Global.InitExpr, Ctx))
return Err;
Globals.push_back(Global);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Global section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseEventSection(ReadContext &Ctx) {
EventSection = Sections.size();
uint32_t Count = readVarint32(Ctx);
Events.reserve(Count);
while (Count--) {
wasm::WasmEvent Event;
Event.Index = NumImportedEvents + Events.size();
Event.Type.Attribute = readVaruint32(Ctx);
Event.Type.SigIndex = readVarint32(Ctx);
Events.push_back(Event);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Event section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseExportSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
Exports.reserve(Count);
for (uint32_t i = 0; i < Count; i++) {
wasm::WasmExport Ex;
Ex.Name = readString(Ctx);
Ex.Kind = readUint8(Ctx);
Ex.Index = readVaruint32(Ctx);
switch (Ex.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
if (!isValidFunctionIndex(Ex.Index))
return make_error<GenericBinaryError>("Invalid function export",
object_error::parse_failed);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
if (!isValidGlobalIndex(Ex.Index))
return make_error<GenericBinaryError>("Invalid global export",
object_error::parse_failed);
break;
case wasm::WASM_EXTERNAL_EVENT:
if (!isValidEventIndex(Ex.Index))
return make_error<GenericBinaryError>("Invalid event export",
object_error::parse_failed);
break;
case wasm::WASM_EXTERNAL_MEMORY:
case wasm::WASM_EXTERNAL_TABLE:
break;
default:
return make_error<GenericBinaryError>("Unexpected export kind",
object_error::parse_failed);
}
Exports.push_back(Ex);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Export section ended prematurely",
object_error::parse_failed);
return Error::success();
}
bool WasmObjectFile::isValidFunctionIndex(uint32_t Index) const {
return Index < NumImportedFunctions + FunctionTypes.size();
}
bool WasmObjectFile::isDefinedFunctionIndex(uint32_t Index) const {
return Index >= NumImportedFunctions && isValidFunctionIndex(Index);
}
bool WasmObjectFile::isValidGlobalIndex(uint32_t Index) const {
return Index < NumImportedGlobals + Globals.size();
}
bool WasmObjectFile::isDefinedGlobalIndex(uint32_t Index) const {
return Index >= NumImportedGlobals && isValidGlobalIndex(Index);
}
bool WasmObjectFile::isValidEventIndex(uint32_t Index) const {
return Index < NumImportedEvents + Events.size();
}
bool WasmObjectFile::isDefinedEventIndex(uint32_t Index) const {
return Index >= NumImportedEvents && isValidEventIndex(Index);
}
bool WasmObjectFile::isValidFunctionSymbol(uint32_t Index) const {
return Index < Symbols.size() && Symbols[Index].isTypeFunction();
}
bool WasmObjectFile::isValidGlobalSymbol(uint32_t Index) const {
return Index < Symbols.size() && Symbols[Index].isTypeGlobal();
}
bool WasmObjectFile::isValidEventSymbol(uint32_t Index) const {
return Index < Symbols.size() && Symbols[Index].isTypeEvent();
}
bool WasmObjectFile::isValidDataSymbol(uint32_t Index) const {
return Index < Symbols.size() && Symbols[Index].isTypeData();
}
bool WasmObjectFile::isValidSectionSymbol(uint32_t Index) const {
return Index < Symbols.size() && Symbols[Index].isTypeSection();
}
wasm::WasmFunction &WasmObjectFile::getDefinedFunction(uint32_t Index) {
assert(isDefinedFunctionIndex(Index));
return Functions[Index - NumImportedFunctions];
}
wasm::WasmGlobal &WasmObjectFile::getDefinedGlobal(uint32_t Index) {
assert(isDefinedGlobalIndex(Index));
return Globals[Index - NumImportedGlobals];
}
wasm::WasmEvent &WasmObjectFile::getDefinedEvent(uint32_t Index) {
assert(isDefinedEventIndex(Index));
return Events[Index - NumImportedEvents];
}
Error WasmObjectFile::parseStartSection(ReadContext &Ctx) {
StartFunction = readVaruint32(Ctx);
if (!isValidFunctionIndex(StartFunction))
return make_error<GenericBinaryError>("Invalid start function",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseCodeSection(ReadContext &Ctx) {
CodeSection = Sections.size();
uint32_t FunctionCount = readVaruint32(Ctx);
if (FunctionCount != FunctionTypes.size()) {
return make_error<GenericBinaryError>("Invalid function count",
object_error::parse_failed);
}
while (FunctionCount--) {
wasm::WasmFunction Function;
const uint8_t *FunctionStart = Ctx.Ptr;
uint32_t Size = readVaruint32(Ctx);
const uint8_t *FunctionEnd = Ctx.Ptr + Size;
Function.CodeOffset = Ctx.Ptr - FunctionStart;
Function.Index = NumImportedFunctions + Functions.size();
Function.CodeSectionOffset = FunctionStart - Ctx.Start;
Function.Size = FunctionEnd - FunctionStart;
uint32_t NumLocalDecls = readVaruint32(Ctx);
Function.Locals.reserve(NumLocalDecls);
while (NumLocalDecls--) {
wasm::WasmLocalDecl Decl;
Decl.Count = readVaruint32(Ctx);
Decl.Type = readUint8(Ctx);
Function.Locals.push_back(Decl);
}
uint32_t BodySize = FunctionEnd - Ctx.Ptr;
Function.Body = ArrayRef<uint8_t>(Ctx.Ptr, BodySize);
// This will be set later when reading in the linking metadata section.
Function.Comdat = UINT32_MAX;
Ctx.Ptr += BodySize;
assert(Ctx.Ptr == FunctionEnd);
Functions.push_back(Function);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Code section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseElemSection(ReadContext &Ctx) {
uint32_t Count = readVaruint32(Ctx);
ElemSegments.reserve(Count);
while (Count--) {
wasm::WasmElemSegment Segment;
Segment.TableIndex = readVaruint32(Ctx);
if (Segment.TableIndex != 0) {
return make_error<GenericBinaryError>("Invalid TableIndex",
object_error::parse_failed);
}
if (Error Err = readInitExpr(Segment.Offset, Ctx))
return Err;
uint32_t NumElems = readVaruint32(Ctx);
while (NumElems--) {
Segment.Functions.push_back(readVaruint32(Ctx));
}
ElemSegments.push_back(Segment);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Elem section ended prematurely",
object_error::parse_failed);
return Error::success();
}
Error WasmObjectFile::parseDataSection(ReadContext &Ctx) {
DataSection = Sections.size();
uint32_t Count = readVaruint32(Ctx);
DataSegments.reserve(Count);
while (Count--) {
WasmSegment Segment;
Segment.Data.MemoryIndex = readVaruint32(Ctx);
if (Error Err = readInitExpr(Segment.Data.Offset, Ctx))
return Err;
uint32_t Size = readVaruint32(Ctx);
if (Size > (size_t)(Ctx.End - Ctx.Ptr))
return make_error<GenericBinaryError>("Invalid segment size",
object_error::parse_failed);
Segment.Data.Content = ArrayRef<uint8_t>(Ctx.Ptr, Size);
// The rest of these Data fields are set later, when reading in the linking
// metadata section.
Segment.Data.Alignment = 0;
Segment.Data.Flags = 0;
Segment.Data.Comdat = UINT32_MAX;
Segment.SectionOffset = Ctx.Ptr - Ctx.Start;
Ctx.Ptr += Size;
DataSegments.push_back(Segment);
}
if (Ctx.Ptr != Ctx.End)
return make_error<GenericBinaryError>("Data section ended prematurely",
object_error::parse_failed);
return Error::success();
}
const uint8_t *WasmObjectFile::getPtr(size_t Offset) const {
return reinterpret_cast<const uint8_t *>(getData().data() + Offset);
}
const wasm::WasmObjectHeader &WasmObjectFile::getHeader() const {
return Header;
}
void WasmObjectFile::moveSymbolNext(DataRefImpl &Symb) const { Symb.d.a++; }
uint32_t WasmObjectFile::getSymbolFlags(DataRefImpl Symb) const {
uint32_t Result = SymbolRef::SF_None;
const WasmSymbol &Sym = getWasmSymbol(Symb);
LLVM_DEBUG(dbgs() << "getSymbolFlags: ptr=" << &Sym << " " << Sym << "\n");
if (Sym.isBindingWeak())
Result |= SymbolRef::SF_Weak;
if (!Sym.isBindingLocal())
Result |= SymbolRef::SF_Global;
if (Sym.isHidden())
Result |= SymbolRef::SF_Hidden;
if (!Sym.isDefined())
Result |= SymbolRef::SF_Undefined;
if (Sym.isTypeFunction())
Result |= SymbolRef::SF_Executable;
return Result;
}
basic_symbol_iterator WasmObjectFile::symbol_begin() const {
DataRefImpl Ref;
Ref.d.a = 0;
return BasicSymbolRef(Ref, this);
}
basic_symbol_iterator WasmObjectFile::symbol_end() const {
DataRefImpl Ref;
Ref.d.a = Symbols.size();
return BasicSymbolRef(Ref, this);
}
const WasmSymbol &WasmObjectFile::getWasmSymbol(const DataRefImpl &Symb) const {
return Symbols[Symb.d.a];
}
const WasmSymbol &WasmObjectFile::getWasmSymbol(const SymbolRef &Symb) const {
return getWasmSymbol(Symb.getRawDataRefImpl());
}
Expected<StringRef> WasmObjectFile::getSymbolName(DataRefImpl Symb) const {
return getWasmSymbol(Symb).Info.Name;
}
Expected<uint64_t> WasmObjectFile::getSymbolAddress(DataRefImpl Symb) const {
return getSymbolValue(Symb);
}
uint64_t WasmObjectFile::getWasmSymbolValue(const WasmSymbol &Sym) const {
switch (Sym.Info.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
case wasm::WASM_SYMBOL_TYPE_EVENT:
return Sym.Info.ElementIndex;
case wasm::WASM_SYMBOL_TYPE_DATA: {
// The value of a data symbol is the segment offset, plus the symbol
// offset within the segment.
uint32_t SegmentIndex = Sym.Info.DataRef.Segment;
const wasm::WasmDataSegment &Segment = DataSegments[SegmentIndex].Data;
assert(Segment.Offset.Opcode == wasm::WASM_OPCODE_I32_CONST);
return Segment.Offset.Value.Int32 + Sym.Info.DataRef.Offset;
}
case wasm::WASM_SYMBOL_TYPE_SECTION:
return 0;
}
llvm_unreachable("invalid symbol type");
}
uint64_t WasmObjectFile::getSymbolValueImpl(DataRefImpl Symb) const {
return getWasmSymbolValue(getWasmSymbol(Symb));
}
uint32_t WasmObjectFile::getSymbolAlignment(DataRefImpl Symb) const {
llvm_unreachable("not yet implemented");
return 0;
}
uint64_t WasmObjectFile::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
llvm_unreachable("not yet implemented");
return 0;
}
Expected<SymbolRef::Type>
WasmObjectFile::getSymbolType(DataRefImpl Symb) const {
const WasmSymbol &Sym = getWasmSymbol(Symb);
switch (Sym.Info.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
return SymbolRef::ST_Function;
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
return SymbolRef::ST_Other;
case wasm::WASM_SYMBOL_TYPE_DATA:
return SymbolRef::ST_Data;
case wasm::WASM_SYMBOL_TYPE_SECTION:
return SymbolRef::ST_Debug;
case wasm::WASM_SYMBOL_TYPE_EVENT:
return SymbolRef::ST_Other;
}
llvm_unreachable("Unknown WasmSymbol::SymbolType");
return SymbolRef::ST_Other;
}
Expected<section_iterator>
WasmObjectFile::getSymbolSection(DataRefImpl Symb) const {
const WasmSymbol &Sym = getWasmSymbol(Symb);
if (Sym.isUndefined())
return section_end();
DataRefImpl Ref;
switch (Sym.Info.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
Ref.d.a = CodeSection;
break;
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
Ref.d.a = GlobalSection;
break;
case wasm::WASM_SYMBOL_TYPE_DATA:
Ref.d.a = DataSection;
break;
case wasm::WASM_SYMBOL_TYPE_SECTION:
Ref.d.a = Sym.Info.ElementIndex;
break;
case wasm::WASM_SYMBOL_TYPE_EVENT:
Ref.d.a = EventSection;
break;
default:
llvm_unreachable("Unknown WasmSymbol::SymbolType");
}
return section_iterator(SectionRef(Ref, this));
}
void WasmObjectFile::moveSectionNext(DataRefImpl &Sec) const { Sec.d.a++; }
std::error_code WasmObjectFile::getSectionName(DataRefImpl Sec,
StringRef &Res) const {
const WasmSection &S = Sections[Sec.d.a];
#define ECase(X) \
case wasm::WASM_SEC_##X: \
Res = #X; \
break
switch (S.Type) {
ECase(TYPE);
ECase(IMPORT);
ECase(FUNCTION);
ECase(TABLE);
ECase(MEMORY);
ECase(GLOBAL);
ECase(EVENT);
ECase(EXPORT);
ECase(START);
ECase(ELEM);
ECase(CODE);
ECase(DATA);
case wasm::WASM_SEC_CUSTOM:
Res = S.Name;
break;
default:
return object_error::invalid_section_index;
}
#undef ECase
return std::error_code();
}
uint64_t WasmObjectFile::getSectionAddress(DataRefImpl Sec) const { return 0; }
uint64_t WasmObjectFile::getSectionIndex(DataRefImpl Sec) const {
return Sec.d.a;
}
uint64_t WasmObjectFile::getSectionSize(DataRefImpl Sec) const {
const WasmSection &S = Sections[Sec.d.a];
return S.Content.size();
}
std::error_code WasmObjectFile::getSectionContents(DataRefImpl Sec,
StringRef &Res) const {
const WasmSection &S = Sections[Sec.d.a];
// This will never fail since wasm sections can never be empty (user-sections
// must have a name and non-user sections each have a defined structure).
Res = StringRef(reinterpret_cast<const char *>(S.Content.data()),
S.Content.size());
return std::error_code();
}
uint64_t WasmObjectFile::getSectionAlignment(DataRefImpl Sec) const {
return 1;
}
bool WasmObjectFile::isSectionCompressed(DataRefImpl Sec) const {
return false;
}
bool WasmObjectFile::isSectionText(DataRefImpl Sec) const {
return getWasmSection(Sec).Type == wasm::WASM_SEC_CODE;
}
bool WasmObjectFile::isSectionData(DataRefImpl Sec) const {
return getWasmSection(Sec).Type == wasm::WASM_SEC_DATA;
}
bool WasmObjectFile::isSectionBSS(DataRefImpl Sec) const { return false; }
bool WasmObjectFile::isSectionVirtual(DataRefImpl Sec) const { return false; }
bool WasmObjectFile::isSectionBitcode(DataRefImpl Sec) const { return false; }
relocation_iterator WasmObjectFile::section_rel_begin(DataRefImpl Ref) const {
DataRefImpl RelocRef;
RelocRef.d.a = Ref.d.a;
RelocRef.d.b = 0;
return relocation_iterator(RelocationRef(RelocRef, this));
}
relocation_iterator WasmObjectFile::section_rel_end(DataRefImpl Ref) const {
const WasmSection &Sec = getWasmSection(Ref);
DataRefImpl RelocRef;
RelocRef.d.a = Ref.d.a;
RelocRef.d.b = Sec.Relocations.size();
return relocation_iterator(RelocationRef(RelocRef, this));
}
void WasmObjectFile::moveRelocationNext(DataRefImpl &Rel) const { Rel.d.b++; }
uint64_t WasmObjectFile::getRelocationOffset(DataRefImpl Ref) const {
const wasm::WasmRelocation &Rel = getWasmRelocation(Ref);
return Rel.Offset;
}
symbol_iterator WasmObjectFile::getRelocationSymbol(DataRefImpl Ref) const {
const wasm::WasmRelocation &Rel = getWasmRelocation(Ref);
if (Rel.Type == wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB)
return symbol_end();
DataRefImpl Sym;
Sym.d.a = Rel.Index;
Sym.d.b = 0;
return symbol_iterator(SymbolRef(Sym, this));
}
uint64_t WasmObjectFile::getRelocationType(DataRefImpl Ref) const {
const wasm::WasmRelocation &Rel = getWasmRelocation(Ref);
return Rel.Type;
}
void WasmObjectFile::getRelocationTypeName(
DataRefImpl Ref, SmallVectorImpl<char> &Result) const {
const wasm::WasmRelocation &Rel = getWasmRelocation(Ref);
StringRef Res = "Unknown";
#define WASM_RELOC(name, value) \
case wasm::name: \
Res = #name; \
break;
switch (Rel.Type) {
#include "llvm/BinaryFormat/WasmRelocs.def"
}
#undef WASM_RELOC
Result.append(Res.begin(), Res.end());
}
section_iterator WasmObjectFile::section_begin() const {
DataRefImpl Ref;
Ref.d.a = 0;
return section_iterator(SectionRef(Ref, this));
}
section_iterator WasmObjectFile::section_end() const {
DataRefImpl Ref;
Ref.d.a = Sections.size();
return section_iterator(SectionRef(Ref, this));
}
uint8_t WasmObjectFile::getBytesInAddress() const { return 4; }
StringRef WasmObjectFile::getFileFormatName() const { return "WASM"; }
Triple::ArchType WasmObjectFile::getArch() const { return Triple::wasm32; }
SubtargetFeatures WasmObjectFile::getFeatures() const {
return SubtargetFeatures();
}
bool WasmObjectFile::isRelocatableObject() const { return HasLinkingSection; }
bool WasmObjectFile::isSharedObject() const { return HasDylinkSection; }
const WasmSection &WasmObjectFile::getWasmSection(DataRefImpl Ref) const {
assert(Ref.d.a < Sections.size());
return Sections[Ref.d.a];
}
const WasmSection &
WasmObjectFile::getWasmSection(const SectionRef &Section) const {
return getWasmSection(Section.getRawDataRefImpl());
}
const wasm::WasmRelocation &
WasmObjectFile::getWasmRelocation(const RelocationRef &Ref) const {
return getWasmRelocation(Ref.getRawDataRefImpl());
}
const wasm::WasmRelocation &
WasmObjectFile::getWasmRelocation(DataRefImpl Ref) const {
assert(Ref.d.a < Sections.size());
const WasmSection &Sec = Sections[Ref.d.a];
assert(Ref.d.b < Sec.Relocations.size());
return Sec.Relocations[Ref.d.b];
}
int WasmSectionOrderChecker::getSectionOrder(unsigned ID,
StringRef CustomSectionName) {
switch (ID) {
case wasm::WASM_SEC_CUSTOM:
return StringSwitch<unsigned>(CustomSectionName)
.Case("dylink", WASM_SEC_ORDER_DYLINK)
.Case("linking", WASM_SEC_ORDER_LINKING)
.StartsWith("reloc.", WASM_SEC_ORDER_RELOC)
.Case("name", WASM_SEC_ORDER_NAME)
.Case("producers", WASM_SEC_ORDER_PRODUCERS)
.Default(-1);
case wasm::WASM_SEC_TYPE:
return WASM_SEC_ORDER_TYPE;
case wasm::WASM_SEC_IMPORT:
return WASM_SEC_ORDER_IMPORT;
case wasm::WASM_SEC_FUNCTION:
return WASM_SEC_ORDER_FUNCTION;
case wasm::WASM_SEC_TABLE:
return WASM_SEC_ORDER_TABLE;
case wasm::WASM_SEC_MEMORY:
return WASM_SEC_ORDER_MEMORY;
case wasm::WASM_SEC_GLOBAL:
return WASM_SEC_ORDER_GLOBAL;
case wasm::WASM_SEC_EXPORT:
return WASM_SEC_ORDER_EXPORT;
case wasm::WASM_SEC_START:
return WASM_SEC_ORDER_START;
case wasm::WASM_SEC_ELEM:
return WASM_SEC_ORDER_ELEM;
case wasm::WASM_SEC_CODE:
return WASM_SEC_ORDER_CODE;
case wasm::WASM_SEC_DATA:
return WASM_SEC_ORDER_DATA;
case wasm::WASM_SEC_DATACOUNT:
return WASM_SEC_ORDER_DATACOUNT;
case wasm::WASM_SEC_EVENT:
return WASM_SEC_ORDER_EVENT;
default:
llvm_unreachable("invalid section");
}
}
bool WasmSectionOrderChecker::isValidSectionOrder(unsigned ID,
StringRef CustomSectionName) {
int Order = getSectionOrder(ID, CustomSectionName);
if (Order == -1) // Skip unknown sections
return true;
// There can be multiple "reloc." sections. Otherwise there shouldn't be any
// duplicate section orders.
bool IsValid = (LastOrder == Order && Order == WASM_SEC_ORDER_RELOC) ||
LastOrder < Order;
LastOrder = Order;
return IsValid;
}
Index: vendor/llvm/dist-release_80/lib/Support/Unix/Threading.inc
===================================================================
--- vendor/llvm/dist-release_80/lib/Support/Unix/Threading.inc (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Support/Unix/Threading.inc (revision 344166)
@@ -1,214 +1,220 @@
//===- Unix/Threading.inc - Unix Threading Implementation ----- -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides the Unix specific implementation of Threading functions.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#if defined(__APPLE__)
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#endif
#include <pthread.h>
#if defined(__FreeBSD__) || defined(__OpenBSD__)
#include <pthread_np.h> // For pthread_getthreadid_np() / pthread_set_name_np()
#endif
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <errno.h>
#include <sys/sysctl.h>
#include <sys/user.h>
#include <unistd.h>
#endif
#if defined(__NetBSD__)
#include <lwp.h> // For _lwp_self()
#endif
#if defined(__linux__)
#include <sys/syscall.h> // For syscall codes
#include <unistd.h> // For syscall()
#endif
namespace {
struct ThreadInfo {
void(*UserFn)(void *);
void *UserData;
};
}
static void *ExecuteOnThread_Dispatch(void *Arg) {
ThreadInfo *TI = reinterpret_cast<ThreadInfo*>(Arg);
TI->UserFn(TI->UserData);
return nullptr;
}
void llvm::llvm_execute_on_thread(void(*Fn)(void*), void *UserData,
unsigned RequestedStackSize) {
ThreadInfo Info = { Fn, UserData };
pthread_attr_t Attr;
pthread_t Thread;
// Construct the attributes object.
if (::pthread_attr_init(&Attr) != 0)
return;
// Set the requested stack size, if given.
if (RequestedStackSize != 0) {
if (::pthread_attr_setstacksize(&Attr, RequestedStackSize) != 0)
goto error;
}
// Construct and execute the thread.
if (::pthread_create(&Thread, &Attr, ExecuteOnThread_Dispatch, &Info) != 0)
goto error;
// Wait for the thread and clean up.
::pthread_join(Thread, nullptr);
error:
::pthread_attr_destroy(&Attr);
}
uint64_t llvm::get_threadid() {
#if defined(__APPLE__)
// Calling "mach_thread_self()" bumps the reference count on the thread
// port, so we need to deallocate it. mach_task_self() doesn't bump the ref
// count.
thread_port_t Self = mach_thread_self();
mach_port_deallocate(mach_task_self(), Self);
return Self;
#elif defined(__FreeBSD__)
return uint64_t(pthread_getthreadid_np());
#elif defined(__NetBSD__)
return uint64_t(_lwp_self());
#elif defined(__ANDROID__)
return uint64_t(gettid());
#elif defined(__linux__)
return uint64_t(syscall(SYS_gettid));
#else
return uint64_t(pthread_self());
#endif
}
static constexpr uint32_t get_max_thread_name_length_impl() {
#if defined(__NetBSD__)
return PTHREAD_MAX_NAMELEN_NP;
#elif defined(__APPLE__)
return 64;
#elif defined(__linux__)
#if HAVE_PTHREAD_SETNAME_NP
return 16;
#else
return 0;
#endif
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
return 16;
#elif defined(__OpenBSD__)
return 32;
#else
return 0;
#endif
}
uint32_t llvm::get_max_thread_name_length() {
return get_max_thread_name_length_impl();
}
void llvm::set_thread_name(const Twine &Name) {
// Make sure the input is null terminated.
SmallString<64> Storage;
StringRef NameStr = Name.toNullTerminatedStringRef(Storage);
// Truncate from the beginning, not the end, if the specified name is too
// long. For one, this ensures that the resulting string is still null
// terminated, but additionally the end of a long thread name will usually
// be more unique than the beginning, since a common pattern is for similar
// threads to share a common prefix.
// Note that the name length includes the null terminator.
if (get_max_thread_name_length() > 0)
NameStr = NameStr.take_back(get_max_thread_name_length() - 1);
(void)NameStr;
#if defined(__linux__)
#if (defined(__GLIBC__) && defined(_GNU_SOURCE)) || defined(__ANDROID__)
#if HAVE_PTHREAD_SETNAME_NP
::pthread_setname_np(::pthread_self(), NameStr.data());
#endif
#endif
#elif defined(__FreeBSD__) || defined(__OpenBSD__)
::pthread_set_name_np(::pthread_self(), NameStr.data());
#elif defined(__NetBSD__)
::pthread_setname_np(::pthread_self(), "%s",
const_cast<char *>(NameStr.data()));
#elif defined(__APPLE__)
::pthread_setname_np(NameStr.data());
#endif
}
void llvm::get_thread_name(SmallVectorImpl<char> &Name) {
Name.clear();
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
int pid = ::getpid();
uint64_t tid = get_threadid();
struct kinfo_proc *kp = nullptr, *nkp;
size_t len = 0;
int error;
int ctl[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID | KERN_PROC_INC_THREAD,
(int)pid };
while (1) {
error = sysctl(ctl, 4, kp, &len, nullptr, 0);
if (kp == nullptr || (error != 0 && errno == ENOMEM)) {
// Add extra space in case threads are added before next call.
len += sizeof(*kp) + len / 10;
nkp = (struct kinfo_proc *)::realloc(kp, len);
if (nkp == nullptr) {
free(kp);
return;
}
kp = nkp;
continue;
}
if (error != 0)
len = 0;
break;
}
for (size_t i = 0; i < len / sizeof(*kp); i++) {
if (kp[i].ki_tid == (lwpid_t)tid) {
Name.append(kp[i].ki_tdname, kp[i].ki_tdname + strlen(kp[i].ki_tdname));
break;
}
}
free(kp);
return;
#elif defined(__NetBSD__)
constexpr uint32_t len = get_max_thread_name_length_impl();
char buf[len];
::pthread_getname_np(::pthread_self(), buf, len);
Name.append(buf, buf + strlen(buf));
+#elif defined(__OpenBSD__)
+ constexpr uint32_t len = get_max_thread_name_length_impl();
+ char buf[len];
+ ::pthread_get_name_np(::pthread_self(), buf, len);
+
+ Name.append(buf, buf + strlen(buf));
#elif defined(__linux__)
#if HAVE_PTHREAD_GETNAME_NP
constexpr uint32_t len = get_max_thread_name_length_impl();
char Buffer[len] = {'\0'}; // FIXME: working around MSan false positive.
if (0 == ::pthread_getname_np(::pthread_self(), Buffer, len))
Name.append(Buffer, Buffer + strlen(Buffer));
#endif
#endif
}
Index: vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.cpp (revision 344166)
@@ -1,5541 +1,5582 @@
//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the AArch64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>
using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"
static cl::opt<unsigned> TBZDisplacementBits(
"aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
static cl::opt<unsigned> CBZDisplacementBits(
"aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
static cl::opt<unsigned>
BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of Bcc instructions (DEBUG)"));
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
: AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP,
AArch64::CATCHRET),
RI(STI.getTargetTriple()), Subtarget(STI) {}
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
const MachineBasicBlock &MBB = *MI.getParent();
const MachineFunction *MF = MBB.getParent();
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
if (MI.getOpcode() == AArch64::INLINEASM)
return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
// FIXME: We currently only handle pseudoinstructions that don't get expanded
// before the assembly printer.
unsigned NumBytes = 0;
const MCInstrDesc &Desc = MI.getDesc();
switch (Desc.getOpcode()) {
default:
// Anything not explicitly designated otherwise is a normal 4-byte insn.
NumBytes = 4;
break;
case TargetOpcode::DBG_VALUE:
case TargetOpcode::EH_LABEL:
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
NumBytes = 0;
break;
case TargetOpcode::STACKMAP:
// The upper bound for a stackmap intrinsic is the full length of its shadow
NumBytes = StackMapOpers(&MI).getNumPatchBytes();
assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
break;
case TargetOpcode::PATCHPOINT:
// The size of the patchpoint intrinsic is the number of bytes requested
NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
break;
case AArch64::TLSDESC_CALLSEQ:
// This gets lowered to an instruction sequence which takes 16 bytes
NumBytes = 16;
break;
case AArch64::JumpTableDest32:
case AArch64::JumpTableDest16:
case AArch64::JumpTableDest8:
NumBytes = 12;
break;
case AArch64::SPACE:
NumBytes = MI.getOperand(1).getImm();
break;
}
return NumBytes;
}
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
SmallVectorImpl<MachineOperand> &Cond) {
// Block ends with fall-through condbranch.
switch (LastInst->getOpcode()) {
default:
llvm_unreachable("Unknown branch instruction?");
case AArch64::Bcc:
Target = LastInst->getOperand(1).getMBB();
Cond.push_back(LastInst->getOperand(0));
break;
case AArch64::CBZW:
case AArch64::CBZX:
case AArch64::CBNZW:
case AArch64::CBNZX:
Target = LastInst->getOperand(1).getMBB();
Cond.push_back(MachineOperand::CreateImm(-1));
Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
Cond.push_back(LastInst->getOperand(0));
break;
case AArch64::TBZW:
case AArch64::TBZX:
case AArch64::TBNZW:
case AArch64::TBNZX:
Target = LastInst->getOperand(2).getMBB();
Cond.push_back(MachineOperand::CreateImm(-1));
Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
Cond.push_back(LastInst->getOperand(0));
Cond.push_back(LastInst->getOperand(1));
}
}
static unsigned getBranchDisplacementBits(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("unexpected opcode!");
case AArch64::B:
return 64;
case AArch64::TBNZW:
case AArch64::TBZW:
case AArch64::TBNZX:
case AArch64::TBZX:
return TBZDisplacementBits;
case AArch64::CBNZW:
case AArch64::CBZW:
case AArch64::CBNZX:
case AArch64::CBZX:
return CBZDisplacementBits;
case AArch64::Bcc:
return BCCDisplacementBits;
}
}
bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
int64_t BrOffset) const {
unsigned Bits = getBranchDisplacementBits(BranchOp);
assert(Bits >= 3 && "max branch displacement must be enough to jump"
"over conditional branch expansion");
return isIntN(Bits, BrOffset / 4);
}
MachineBasicBlock *
AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
llvm_unreachable("unexpected opcode!");
case AArch64::B:
return MI.getOperand(0).getMBB();
case AArch64::TBZW:
case AArch64::TBNZW:
case AArch64::TBZX:
case AArch64::TBNZX:
return MI.getOperand(2).getMBB();
case AArch64::CBZW:
case AArch64::CBNZW:
case AArch64::CBZX:
case AArch64::CBNZX:
case AArch64::Bcc:
return MI.getOperand(1).getMBB();
}
}
// Branch analysis.
bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return false;
if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = &*I;
// If there is only one terminator instruction, process it.
unsigned LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (isUncondBranchOpcode(LastOpc)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
if (isCondBranchOpcode(LastOpc)) {
// Block ends with fall-through condbranch.
parseCondBranch(LastInst, TBB, Cond);
return false;
}
return true; // Can't handle indirect branch.
}
// Get the instruction before it if it is a terminator.
MachineInstr *SecondLastInst = &*I;
unsigned SecondLastOpc = SecondLastInst->getOpcode();
// If AllowModify is true and the block ends with two or more unconditional
// branches, delete all but the first unconditional branch.
if (AllowModify && isUncondBranchOpcode(LastOpc)) {
while (isUncondBranchOpcode(SecondLastOpc)) {
LastInst->eraseFromParent();
LastInst = SecondLastInst;
LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
// Return now the only terminator is an unconditional branch.
TBB = LastInst->getOperand(0).getMBB();
return false;
} else {
SecondLastInst = &*I;
SecondLastOpc = SecondLastInst->getOpcode();
}
}
}
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
// If the block ends with a B and a Bcc, handle it.
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
parseCondBranch(SecondLastInst, TBB, Cond);
FBB = LastInst->getOperand(0).getMBB();
return false;
}
// If the block ends with two unconditional branches, handle it. The second
// one is not executed, so remove it.
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return false;
}
// ...likewise if it ends with an indirect branch followed by an unconditional
// branch.
if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return true;
}
// Otherwise, can't handle this.
return true;
}
bool AArch64InstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const {
if (Cond[0].getImm() != -1) {
// Regular Bcc
AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
} else {
// Folded compare-and-branch
switch (Cond[1].getImm()) {
default:
llvm_unreachable("Unknown conditional branch!");
case AArch64::CBZW:
Cond[1].setImm(AArch64::CBNZW);
break;
case AArch64::CBNZW:
Cond[1].setImm(AArch64::CBZW);
break;
case AArch64::CBZX:
Cond[1].setImm(AArch64::CBNZX);
break;
case AArch64::CBNZX:
Cond[1].setImm(AArch64::CBZX);
break;
case AArch64::TBZW:
Cond[1].setImm(AArch64::TBNZW);
break;
case AArch64::TBNZW:
Cond[1].setImm(AArch64::TBZW);
break;
case AArch64::TBZX:
Cond[1].setImm(AArch64::TBNZX);
break;
case AArch64::TBNZX:
Cond[1].setImm(AArch64::TBZX);
break;
}
}
return false;
}
unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return 0;
if (!isUncondBranchOpcode(I->getOpcode()) &&
!isCondBranchOpcode(I->getOpcode()))
return 0;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
if (I == MBB.begin()) {
if (BytesRemoved)
*BytesRemoved = 4;
return 1;
}
--I;
if (!isCondBranchOpcode(I->getOpcode())) {
if (BytesRemoved)
*BytesRemoved = 4;
return 1;
}
// Remove the branch.
I->eraseFromParent();
if (BytesRemoved)
*BytesRemoved = 8;
return 2;
}
void AArch64InstrInfo::instantiateCondBranch(
MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const {
if (Cond[0].getImm() != -1) {
// Regular Bcc
BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
} else {
// Folded compare-and-branch
// Note that we use addOperand instead of addReg to keep the flags.
const MachineInstrBuilder MIB =
BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
if (Cond.size() > 3)
MIB.addImm(Cond[3].getImm());
MIB.addMBB(TBB);
}
}
unsigned AArch64InstrInfo::insertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
// Shouldn't be a fall through.
assert(TBB && "insertBranch must not be told to insert a fallthrough");
if (!FBB) {
if (Cond.empty()) // Unconditional branch?
BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
else
instantiateCondBranch(MBB, DL, TBB, Cond);
if (BytesAdded)
*BytesAdded = 4;
return 1;
}
// Two-way conditional branch.
instantiateCondBranch(MBB, DL, TBB, Cond);
BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
if (BytesAdded)
*BytesAdded = 8;
return 2;
}
// Find the original register that VReg is copied from.
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
while (TargetRegisterInfo::isVirtualRegister(VReg)) {
const MachineInstr *DefMI = MRI.getVRegDef(VReg);
if (!DefMI->isFullCopy())
return VReg;
VReg = DefMI->getOperand(1).getReg();
}
return VReg;
}
// Determine if VReg is defined by an instruction that can be folded into a
// csel instruction. If so, return the folded opcode, and the replacement
// register.
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
unsigned *NewVReg = nullptr) {
VReg = removeCopies(MRI, VReg);
if (!TargetRegisterInfo::isVirtualRegister(VReg))
return 0;
bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
const MachineInstr *DefMI = MRI.getVRegDef(VReg);
unsigned Opc = 0;
unsigned SrcOpNum = 0;
switch (DefMI->getOpcode()) {
case AArch64::ADDSXri:
case AArch64::ADDSWri:
// if NZCV is used, do not fold.
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to ADDXri and ADDWri.
LLVM_FALLTHROUGH;
case AArch64::ADDXri:
case AArch64::ADDWri:
// add x, 1 -> csinc.
if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
DefMI->getOperand(3).getImm() != 0)
return 0;
SrcOpNum = 1;
Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
break;
case AArch64::ORNXrr:
case AArch64::ORNWrr: {
// not x -> csinv, represented as orn dst, xzr, src.
unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
return 0;
SrcOpNum = 2;
Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
break;
}
case AArch64::SUBSXrr:
case AArch64::SUBSWrr:
// if NZCV is used, do not fold.
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to SUBXrr and SUBWrr.
LLVM_FALLTHROUGH;
case AArch64::SUBXrr:
case AArch64::SUBWrr: {
// neg x -> csneg, represented as sub dst, xzr, src.
unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
return 0;
SrcOpNum = 2;
Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
break;
}
default:
return 0;
}
assert(Opc && SrcOpNum && "Missing parameters");
if (NewVReg)
*NewVReg = DefMI->getOperand(SrcOpNum).getReg();
return Opc;
}
bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles,
int &FalseCycles) const {
// Check register classes.
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC =
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
if (!RC)
return false;
// Expanding cbz/tbz requires an extra cycle of latency on the condition.
unsigned ExtraCondLat = Cond.size() != 1;
// GPRs are handled by csel.
// FIXME: Fold in x+1, -x, and ~x when applicable.
if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
// Single-cycle csel, csinc, csinv, and csneg.
CondCycles = 1 + ExtraCondLat;
TrueCycles = FalseCycles = 1;
if (canFoldIntoCSel(MRI, TrueReg))
TrueCycles = 0;
else if (canFoldIntoCSel(MRI, FalseReg))
FalseCycles = 0;
return true;
}
// Scalar floating point is handled by fcsel.
// FIXME: Form fabs, fmin, and fmax when applicable.
if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
AArch64::FPR32RegClass.hasSubClassEq(RC)) {
CondCycles = 5 + ExtraCondLat;
TrueCycles = FalseCycles = 2;
return true;
}
// Can't do vectors.
return false;
}
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
// Parse the condition code, see parseCondBranch() above.
AArch64CC::CondCode CC;
switch (Cond.size()) {
default:
llvm_unreachable("Unknown condition opcode in Cond");
case 1: // b.cc
CC = AArch64CC::CondCode(Cond[0].getImm());
break;
case 3: { // cbz/cbnz
// We must insert a compare against 0.
bool Is64Bit;
switch (Cond[1].getImm()) {
default:
llvm_unreachable("Unknown branch opcode in Cond");
case AArch64::CBZW:
Is64Bit = false;
CC = AArch64CC::EQ;
break;
case AArch64::CBZX:
Is64Bit = true;
CC = AArch64CC::EQ;
break;
case AArch64::CBNZW:
Is64Bit = false;
CC = AArch64CC::NE;
break;
case AArch64::CBNZX:
Is64Bit = true;
CC = AArch64CC::NE;
break;
}
unsigned SrcReg = Cond[2].getReg();
if (Is64Bit) {
// cmp reg, #0 is actually subs xzr, reg, #0.
MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
.addReg(SrcReg)
.addImm(0)
.addImm(0);
} else {
MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
.addReg(SrcReg)
.addImm(0)
.addImm(0);
}
break;
}
case 4: { // tbz/tbnz
// We must insert a tst instruction.
switch (Cond[1].getImm()) {
default:
llvm_unreachable("Unknown branch opcode in Cond");
case AArch64::TBZW:
case AArch64::TBZX:
CC = AArch64CC::EQ;
break;
case AArch64::TBNZW:
case AArch64::TBNZX:
CC = AArch64CC::NE;
break;
}
// cmp reg, #foo is actually ands xzr, reg, #1<<foo.
if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
.addReg(Cond[2].getReg())
.addImm(
AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
else
BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
.addReg(Cond[2].getReg())
.addImm(
AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
break;
}
}
unsigned Opc = 0;
const TargetRegisterClass *RC = nullptr;
bool TryFold = false;
if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
RC = &AArch64::GPR64RegClass;
Opc = AArch64::CSELXr;
TryFold = true;
} else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
RC = &AArch64::GPR32RegClass;
Opc = AArch64::CSELWr;
TryFold = true;
} else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
RC = &AArch64::FPR64RegClass;
Opc = AArch64::FCSELDrrr;
} else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
RC = &AArch64::FPR32RegClass;
Opc = AArch64::FCSELSrrr;
}
assert(RC && "Unsupported regclass");
// Try folding simple instructions into the csel.
if (TryFold) {
unsigned NewVReg = 0;
unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
if (FoldedOpc) {
// The folded opcodes csinc, csinc and csneg apply the operation to
// FalseReg, so we need to invert the condition.
CC = AArch64CC::getInvertedCondCode(CC);
TrueReg = FalseReg;
} else
FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
// Fold the operation. Leave any dead instructions for DCE to clean up.
if (FoldedOpc) {
FalseReg = NewVReg;
Opc = FoldedOpc;
// The extends the live range of NewVReg.
MRI.clearKillFlags(NewVReg);
}
}
// Pull all virtual register into the appropriate class.
MRI.constrainRegClass(TrueReg, RC);
MRI.constrainRegClass(FalseReg, RC);
// Insert the csel.
BuildMI(MBB, I, DL, get(Opc), DstReg)
.addReg(TrueReg)
.addReg(FalseReg)
.addImm(CC);
}
/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
uint64_t Imm = MI.getOperand(1).getImm();
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
}
// FIXME: this implementation should be micro-architecture dependent, so a
// micro-architecture target hook should be introduced here in future.
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
if (!Subtarget.hasCustomCheapAsMoveHandling())
return MI.isAsCheapAsAMove();
const unsigned Opcode = MI.getOpcode();
// Firstly, check cases gated by features.
if (Subtarget.hasZeroCycleZeroingFP()) {
if (Opcode == AArch64::FMOVH0 ||
Opcode == AArch64::FMOVS0 ||
Opcode == AArch64::FMOVD0)
return true;
}
if (Subtarget.hasZeroCycleZeroingGP()) {
if (Opcode == TargetOpcode::COPY &&
(MI.getOperand(1).getReg() == AArch64::WZR ||
MI.getOperand(1).getReg() == AArch64::XZR))
return true;
}
// Secondly, check cases specific to sub-targets.
if (Subtarget.hasExynosCheapAsMoveHandling()) {
if (isExynosCheapAsMove(MI))
return true;
return MI.isAsCheapAsAMove();
}
// Finally, check generic cases.
switch (Opcode) {
default:
return false;
// add/sub on register without shift
case AArch64::ADDWri:
case AArch64::ADDXri:
case AArch64::SUBWri:
case AArch64::SUBXri:
return (MI.getOperand(3).getImm() == 0);
// logical ops on immediate
case AArch64::ANDWri:
case AArch64::ANDXri:
case AArch64::EORWri:
case AArch64::EORXri:
case AArch64::ORRWri:
case AArch64::ORRXri:
return true;
// logical ops on register without shift
case AArch64::ANDWrr:
case AArch64::ANDXrr:
case AArch64::BICWrr:
case AArch64::BICXrr:
case AArch64::EONWrr:
case AArch64::EONXrr:
case AArch64::EORWrr:
case AArch64::EORXrr:
case AArch64::ORNWrr:
case AArch64::ORNXrr:
case AArch64::ORRWrr:
case AArch64::ORRXrr:
return true;
// If MOVi32imm or MOVi64imm can be expanded into ORRWri or
// ORRXri, it is as cheap as MOV
case AArch64::MOVi32imm:
return canBeExpandedToORR(MI, 32);
case AArch64::MOVi64imm:
return canBeExpandedToORR(MI, 64);
}
llvm_unreachable("Unknown opcode to check as cheap as a move!");
}
bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
case AArch64::ADDWrs:
case AArch64::ADDXrs:
case AArch64::ADDSWrs:
case AArch64::ADDSXrs: {
unsigned Imm = MI.getOperand(3).getImm();
unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
if (ShiftVal == 0)
return true;
return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
}
case AArch64::ADDWrx:
case AArch64::ADDXrx:
case AArch64::ADDXrx64:
case AArch64::ADDSWrx:
case AArch64::ADDSXrx:
case AArch64::ADDSXrx64: {
unsigned Imm = MI.getOperand(3).getImm();
switch (AArch64_AM::getArithExtendType(Imm)) {
default:
return false;
case AArch64_AM::UXTB:
case AArch64_AM::UXTH:
case AArch64_AM::UXTW:
case AArch64_AM::UXTX:
return AArch64_AM::getArithShiftValue(Imm) <= 4;
}
}
case AArch64::SUBWrs:
case AArch64::SUBSWrs: {
unsigned Imm = MI.getOperand(3).getImm();
unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
return ShiftVal == 0 ||
(AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
}
case AArch64::SUBXrs:
case AArch64::SUBSXrs: {
unsigned Imm = MI.getOperand(3).getImm();
unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
return ShiftVal == 0 ||
(AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
}
case AArch64::SUBWrx:
case AArch64::SUBXrx:
case AArch64::SUBXrx64:
case AArch64::SUBSWrx:
case AArch64::SUBSXrx:
case AArch64::SUBSXrx64: {
unsigned Imm = MI.getOperand(3).getImm();
switch (AArch64_AM::getArithExtendType(Imm)) {
default:
return false;
case AArch64_AM::UXTB:
case AArch64_AM::UXTH:
case AArch64_AM::UXTW:
case AArch64_AM::UXTX:
return AArch64_AM::getArithShiftValue(Imm) == 0;
}
}
case AArch64::LDRBBroW:
case AArch64::LDRBBroX:
case AArch64::LDRBroW:
case AArch64::LDRBroX:
case AArch64::LDRDroW:
case AArch64::LDRDroX:
case AArch64::LDRHHroW:
case AArch64::LDRHHroX:
case AArch64::LDRHroW:
case AArch64::LDRHroX:
case AArch64::LDRQroW:
case AArch64::LDRQroX:
case AArch64::LDRSBWroW:
case AArch64::LDRSBWroX:
case AArch64::LDRSBXroW:
case AArch64::LDRSBXroX:
case AArch64::LDRSHWroW:
case AArch64::LDRSHWroX:
case AArch64::LDRSHXroW:
case AArch64::LDRSHXroX:
case AArch64::LDRSWroW:
case AArch64::LDRSWroX:
case AArch64::LDRSroW:
case AArch64::LDRSroX:
case AArch64::LDRWroW:
case AArch64::LDRWroX:
case AArch64::LDRXroW:
case AArch64::LDRXroX:
case AArch64::PRFMroW:
case AArch64::PRFMroX:
case AArch64::STRBBroW:
case AArch64::STRBBroX:
case AArch64::STRBroW:
case AArch64::STRBroX:
case AArch64::STRDroW:
case AArch64::STRDroX:
case AArch64::STRHHroW:
case AArch64::STRHHroX:
case AArch64::STRHroW:
case AArch64::STRHroX:
case AArch64::STRQroW:
case AArch64::STRQroX:
case AArch64::STRSroW:
case AArch64::STRSroX:
case AArch64::STRWroW:
case AArch64::STRWroX:
case AArch64::STRXroW:
case AArch64::STRXroX: {
unsigned IsSigned = MI.getOperand(3).getImm();
return !IsSigned;
}
}
}
bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
unsigned Opc = MI.getOpcode();
switch (Opc) {
default:
return false;
case AArch64::SEH_StackAlloc:
case AArch64::SEH_SaveFPLR:
case AArch64::SEH_SaveFPLR_X:
case AArch64::SEH_SaveReg:
case AArch64::SEH_SaveReg_X:
case AArch64::SEH_SaveRegP:
case AArch64::SEH_SaveRegP_X:
case AArch64::SEH_SaveFReg:
case AArch64::SEH_SaveFReg_X:
case AArch64::SEH_SaveFRegP:
case AArch64::SEH_SaveFRegP_X:
case AArch64::SEH_SetFP:
case AArch64::SEH_AddFP:
case AArch64::SEH_Nop:
case AArch64::SEH_PrologEnd:
case AArch64::SEH_EpilogStart:
case AArch64::SEH_EpilogEnd:
return true;
}
}
bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const {
switch (MI.getOpcode()) {
default:
return false;
case AArch64::SBFMXri: // aka sxtw
case AArch64::UBFMXri: // aka uxtw
// Check for the 32 -> 64 bit extension case, these instructions can do
// much more.
if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
return false;
// This is a signed or unsigned 32 -> 64 bit extension.
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
SubIdx = AArch64::sub_32;
return true;
}
}
bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned WidthA = 0, WidthB = 0;
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// Retrieve the base, offset from the base and width. Width
// is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
// base are identical, and the offset of a lower memory access +
// the width doesn't overlap the offset of a higher memory access,
// then the memory accesses are different.
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowOffset + LowWidth <= HighOffset)
return true;
}
}
return false;
}
bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
return true;
switch (MI.getOpcode()) {
case AArch64::HINT:
// CSDB hints are scheduling barriers.
if (MI.getOperand(0).getImm() == 0x14)
return true;
break;
case AArch64::DSB:
case AArch64::ISB:
// DSB and ISB also are scheduling barriers.
return true;
default:;
}
return isSEHInstruction(MI);
}
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
// The first operand can be a frame index where we'd normally expect a
// register.
assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
if (!MI.getOperand(1).isReg())
return false;
switch (MI.getOpcode()) {
default:
break;
case AArch64::SUBSWrr:
case AArch64::SUBSWrs:
case AArch64::SUBSWrx:
case AArch64::SUBSXrr:
case AArch64::SUBSXrs:
case AArch64::SUBSXrx:
case AArch64::ADDSWrr:
case AArch64::ADDSWrs:
case AArch64::ADDSWrx:
case AArch64::ADDSXrr:
case AArch64::ADDSXrs:
case AArch64::ADDSXrx:
// Replace SUBSWrr with SUBWrr if NZCV is not used.
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = MI.getOperand(2).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
case AArch64::SUBSWri:
case AArch64::ADDSWri:
case AArch64::SUBSXri:
case AArch64::ADDSXri:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME: In order to convert CmpValue to 0 or 1
CmpValue = MI.getOperand(2).getImm() != 0;
return true;
case AArch64::ANDSWri:
case AArch64::ANDSXri:
// ANDS does not use the same encoding scheme as the others xxxS
// instructions.
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME:The return val type of decodeLogicalImmediate is uint64_t,
// while the type of CmpValue is int. When converting uint64_t to int,
// the high 32 bits of uint64_t will be lost.
// In fact it causes a bug in spec2006-483.xalancbmk
// CmpValue is only used to compare with zero in OptimizeCompareInstr
CmpValue = AArch64_AM::decodeLogicalImmediate(
MI.getOperand(2).getImm(),
MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
return true;
}
return false;
}
static bool UpdateOperandRegClass(MachineInstr &Instr) {
MachineBasicBlock *MBB = Instr.getParent();
assert(MBB && "Can't get MachineBasicBlock here");
MachineFunction *MF = MBB->getParent();
assert(MF && "Can't get MachineFunction here");
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
++OpIdx) {
MachineOperand &MO = Instr.getOperand(OpIdx);
const TargetRegisterClass *OpRegCstraints =
Instr.getRegClassConstraint(OpIdx, TII, TRI);
// If there's no constraint, there's nothing to do.
if (!OpRegCstraints)
continue;
// If the operand is a frame index, there's nothing to do here.
// A frame index operand will resolve correctly during PEI.
if (MO.isFI())
continue;
assert(MO.isReg() &&
"Operand has register constraints without being a register!");
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (!OpRegCstraints->contains(Reg))
return false;
} else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
!MRI->constrainRegClass(Reg, OpRegCstraints))
return false;
}
return true;
}
/// Return the opcode that does not set flags when possible - otherwise
/// return the original opcode. The caller is responsible to do the actual
/// substitution and legality checking.
static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
// Don't convert all compare instructions, because for some the zero register
// encoding becomes the sp register.
bool MIDefinesZeroReg = false;
if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
MIDefinesZeroReg = true;
switch (MI.getOpcode()) {
default:
return MI.getOpcode();
case AArch64::ADDSWrr:
return AArch64::ADDWrr;
case AArch64::ADDSWri:
return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
case AArch64::ADDSWrs:
return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
case AArch64::ADDSWrx:
return AArch64::ADDWrx;
case AArch64::ADDSXrr:
return AArch64::ADDXrr;
case AArch64::ADDSXri:
return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
case AArch64::ADDSXrs:
return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
case AArch64::ADDSXrx:
return AArch64::ADDXrx;
case AArch64::SUBSWrr:
return AArch64::SUBWrr;
case AArch64::SUBSWri:
return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
case AArch64::SUBSWrs:
return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
case AArch64::SUBSWrx:
return AArch64::SUBWrx;
case AArch64::SUBSXrr:
return AArch64::SUBXrr;
case AArch64::SUBSXri:
return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
case AArch64::SUBSXrs:
return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
case AArch64::SUBSXrx:
return AArch64::SUBXrx;
}
}
enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 };
/// True when condition flags are accessed (either by writing or reading)
/// on the instruction trace starting at From and ending at To.
///
/// Note: If From and To are from different blocks it's assumed CC are accessed
/// on the path.
static bool areCFlagsAccessedBetweenInstrs(
MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
// Early exit if To is at the beginning of the BB.
if (To == To->getParent()->begin())
return true;
// Check whether the instructions are in the same basic block
// If not, assume the condition flags might get modified somewhere.
if (To->getParent() != From->getParent())
return true;
// From must be above To.
assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
[From](MachineInstr &MI) {
return MI.getIterator() == From;
}) != To->getParent()->rend());
// We iterate backward starting \p To until we hit \p From.
for (--To; To != From; --To) {
const MachineInstr &Instr = *To;
if (((AccessToCheck & AK_Write) &&
Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
return true;
}
return false;
}
/// Try to optimize a compare instruction. A compare instruction is an
/// instruction which produces AArch64::NZCV. It can be truly compare
/// instruction
/// when there are no uses of its destination register.
///
/// The following steps are tried in order:
/// 1. Convert CmpInstr into an unconditional version.
/// 2. Remove CmpInstr if above there is an instruction producing a needed
/// condition code or an instruction which can be converted into such an
/// instruction.
/// Only comparison with zero is supported.
bool AArch64InstrInfo::optimizeCompareInstr(
MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
int CmpValue, const MachineRegisterInfo *MRI) const {
assert(CmpInstr.getParent());
assert(MRI);
// Replace SUBSWrr with SUBWrr if NZCV is not used.
int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
if (DeadNZCVIdx != -1) {
if (CmpInstr.definesRegister(AArch64::WZR) ||
CmpInstr.definesRegister(AArch64::XZR)) {
CmpInstr.eraseFromParent();
return true;
}
unsigned Opc = CmpInstr.getOpcode();
unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
if (NewOpc == Opc)
return false;
const MCInstrDesc &MCID = get(NewOpc);
CmpInstr.setDesc(MCID);
CmpInstr.RemoveOperand(DeadNZCVIdx);
bool succeeded = UpdateOperandRegClass(CmpInstr);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
return true;
}
// Continue only if we have a "ri" where immediate is zero.
// FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
// function.
assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
if (CmpValue != 0 || SrcReg2 != 0)
return false;
// CmpInstr is a Compare instruction if destination register is not used.
if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
return substituteCmpToZero(CmpInstr, SrcReg, MRI);
}
/// Get opcode of S version of Instr.
/// If Instr is S version its opcode is returned.
/// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
/// or we are not interested in it.
static unsigned sForm(MachineInstr &Instr) {
switch (Instr.getOpcode()) {
default:
return AArch64::INSTRUCTION_LIST_END;
case AArch64::ADDSWrr:
case AArch64::ADDSWri:
case AArch64::ADDSXrr:
case AArch64::ADDSXri:
case AArch64::SUBSWrr:
case AArch64::SUBSWri:
case AArch64::SUBSXrr:
case AArch64::SUBSXri:
return Instr.getOpcode();
case AArch64::ADDWrr:
return AArch64::ADDSWrr;
case AArch64::ADDWri:
return AArch64::ADDSWri;
case AArch64::ADDXrr:
return AArch64::ADDSXrr;
case AArch64::ADDXri:
return AArch64::ADDSXri;
case AArch64::ADCWr:
return AArch64::ADCSWr;
case AArch64::ADCXr:
return AArch64::ADCSXr;
case AArch64::SUBWrr:
return AArch64::SUBSWrr;
case AArch64::SUBWri:
return AArch64::SUBSWri;
case AArch64::SUBXrr:
return AArch64::SUBSXrr;
case AArch64::SUBXri:
return AArch64::SUBSXri;
case AArch64::SBCWr:
return AArch64::SBCSWr;
case AArch64::SBCXr:
return AArch64::SBCSXr;
case AArch64::ANDWri:
return AArch64::ANDSWri;
case AArch64::ANDXri:
return AArch64::ANDSXri;
}
}
/// Check if AArch64::NZCV should be alive in successors of MBB.
static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
for (auto *BB : MBB->successors())
if (BB->isLiveIn(AArch64::NZCV))
return true;
return false;
}
namespace {
struct UsedNZCV {
bool N = false;
bool Z = false;
bool C = false;
bool V = false;
UsedNZCV() = default;
UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
this->N |= UsedFlags.N;
this->Z |= UsedFlags.Z;
this->C |= UsedFlags.C;
this->V |= UsedFlags.V;
return *this;
}
};
} // end anonymous namespace
/// Find a condition code used by the instruction.
/// Returns AArch64CC::Invalid if either the instruction does not use condition
/// codes or we don't optimize CmpInstr in the presence of such instructions.
static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
switch (Instr.getOpcode()) {
default:
return AArch64CC::Invalid;
case AArch64::Bcc: {
int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
assert(Idx >= 2);
return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
}
case AArch64::CSINVWr:
case AArch64::CSINVXr:
case AArch64::CSINCWr:
case AArch64::CSINCXr:
case AArch64::CSELWr:
case AArch64::CSELXr:
case AArch64::CSNEGWr:
case AArch64::CSNEGXr:
case AArch64::FCSELSrrr:
case AArch64::FCSELDrrr: {
int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
assert(Idx >= 1);
return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
}
}
}
static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
assert(CC != AArch64CC::Invalid);
UsedNZCV UsedFlags;
switch (CC) {
default:
break;
case AArch64CC::EQ: // Z set
case AArch64CC::NE: // Z clear
UsedFlags.Z = true;
break;
case AArch64CC::HI: // Z clear and C set
case AArch64CC::LS: // Z set or C clear
UsedFlags.Z = true;
LLVM_FALLTHROUGH;
case AArch64CC::HS: // C set
case AArch64CC::LO: // C clear
UsedFlags.C = true;
break;
case AArch64CC::MI: // N set
case AArch64CC::PL: // N clear
UsedFlags.N = true;
break;
case AArch64CC::VS: // V set
case AArch64CC::VC: // V clear
UsedFlags.V = true;
break;
case AArch64CC::GT: // Z clear, N and V the same
case AArch64CC::LE: // Z set, N and V differ
UsedFlags.Z = true;
LLVM_FALLTHROUGH;
case AArch64CC::GE: // N and V the same
case AArch64CC::LT: // N and V differ
UsedFlags.N = true;
UsedFlags.V = true;
break;
}
return UsedFlags;
}
static bool isADDSRegImm(unsigned Opcode) {
return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
}
static bool isSUBSRegImm(unsigned Opcode) {
return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
}
/// Check if CmpInstr can be substituted by MI.
///
/// CmpInstr can be substituted:
/// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
/// - and, MI and CmpInstr are from the same MachineBB
/// - and, condition flags are not alive in successors of the CmpInstr parent
/// - and, if MI opcode is the S form there must be no defs of flags between
/// MI and CmpInstr
/// or if MI opcode is not the S form there must be neither defs of flags
/// nor uses of flags between MI and CmpInstr.
/// - and C/V flags are not used after CmpInstr
static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
const TargetRegisterInfo *TRI) {
assert(MI);
assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
assert(CmpInstr);
const unsigned CmpOpcode = CmpInstr->getOpcode();
if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
return false;
if (MI->getParent() != CmpInstr->getParent())
return false;
if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
return false;
AccessKind AccessToCheck = AK_Write;
if (sForm(*MI) != MI->getOpcode())
AccessToCheck = AK_All;
if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
return false;
UsedNZCV NZCVUsedAfterCmp;
for (auto I = std::next(CmpInstr->getIterator()),
E = CmpInstr->getParent()->instr_end();
I != E; ++I) {
const MachineInstr &Instr = *I;
if (Instr.readsRegister(AArch64::NZCV, TRI)) {
AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
return false;
NZCVUsedAfterCmp |= getUsedNZCV(CC);
}
if (Instr.modifiesRegister(AArch64::NZCV, TRI))
break;
}
return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
}
/// Substitute an instruction comparing to zero with another instruction
/// which produces needed condition flags.
///
/// Return true on success.
bool AArch64InstrInfo::substituteCmpToZero(
MachineInstr &CmpInstr, unsigned SrcReg,
const MachineRegisterInfo *MRI) const {
assert(MRI);
// Get the unique definition of SrcReg.
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
if (!MI)
return false;
const TargetRegisterInfo *TRI = &getRegisterInfo();
unsigned NewOpc = sForm(*MI);
if (NewOpc == AArch64::INSTRUCTION_LIST_END)
return false;
if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
return false;
// Update the instruction to set NZCV.
MI->setDesc(get(NewOpc));
CmpInstr.eraseFromParent();
bool succeeded = UpdateOperandRegClass(*MI);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
MI->addRegisterDefined(AArch64::NZCV, TRI);
return true;
}
bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD &&
MI.getOpcode() != AArch64::CATCHRET)
return false;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
if (MI.getOpcode() == AArch64::CATCHRET) {
// Skip to the first instruction before the epilog.
const TargetInstrInfo *TII =
MBB.getParent()->getSubtarget().getInstrInfo();
MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
auto MBBI = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI);
while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) &&
FirstEpilogSEH != MBB.begin())
FirstEpilogSEH = std::prev(FirstEpilogSEH);
if (FirstEpilogSEH != MBB.begin())
FirstEpilogSEH = std::next(FirstEpilogSEH);
BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP))
.addReg(AArch64::X0, RegState::Define)
.addMBB(TargetMBB);
BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri))
.addReg(AArch64::X0, RegState::Define)
.addReg(AArch64::X0)
.addMBB(TargetMBB)
.addImm(0);
return true;
}
unsigned Reg = MI.getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MI.memoperands_begin())->getValue());
const TargetMachine &TM = MBB.getParent()->getTarget();
unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
const unsigned char MO_NC = AArch64II::MO_NC;
if ((OpFlags & AArch64II::MO_GOT) != 0) {
BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
.addGlobalAddress(GV, 0, OpFlags);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
} else if (TM.getCodeModel() == CodeModel::Large) {
BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC)
.addImm(0);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC)
.addImm(16);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC)
.addImm(32);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G3)
.addImm(48);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
} else if (TM.getCodeModel() == CodeModel::Tiny) {
BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
.addGlobalAddress(GV, 0, OpFlags);
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, LoFlags)
.addMemOperand(*MI.memoperands_begin());
}
MBB.erase(MI);
return true;
}
// Return true if this instruction simply sets its single destination register
// to zero. This is equivalent to a register rename of the zero-register.
bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
break;
case AArch64::MOVZWi:
case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
assert(MI.getDesc().getNumOperands() == 3 &&
MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
return true;
}
break;
case AArch64::ANDWri: // and Rd, Rzr, #imm
return MI.getOperand(1).getReg() == AArch64::WZR;
case AArch64::ANDXri:
return MI.getOperand(1).getReg() == AArch64::XZR;
case TargetOpcode::COPY:
return MI.getOperand(1).getReg() == AArch64::WZR;
}
return false;
}
// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// GPR32 copies will by lowered to ORRXrs
unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::GPR32RegClass.contains(DstReg) ||
AArch64::GPR64RegClass.contains(DstReg));
}
case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
if (MI.getOperand(1).getReg() == AArch64::XZR) {
assert(MI.getDesc().getNumOperands() == 4 &&
MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
return true;
}
break;
case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
if (MI.getOperand(2).getImm() == 0) {
assert(MI.getDesc().getNumOperands() == 4 &&
MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
return true;
}
break;
}
return false;
}
// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// FPR64 copies will by lowered to ORR.16b
unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::FPR64RegClass.contains(DstReg) ||
AArch64::FPR128RegClass.contains(DstReg));
}
case AArch64::ORRv16i8:
if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
"invalid ORRv16i8 operands");
return true;
}
break;
}
return false;
}
unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::LDRWui:
case AArch64::LDRXui:
case AArch64::LDRBui:
case AArch64::LDRHui:
case AArch64::LDRSui:
case AArch64::LDRDui:
case AArch64::LDRQui:
if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::STRWui:
case AArch64::STRXui:
case AArch64::STRBui:
case AArch64::STRHui:
case AArch64::STRSui:
case AArch64::STRDui:
case AArch64::STRQui:
if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
/// Check all MachineMemOperands for a hint to suppress pairing.
bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) {
return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
return MMO->getFlags() & MOSuppressPair;
});
}
/// Set a flag on the first MachineMemOperand to suppress pairing.
void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) {
if (MI.memoperands_empty())
return;
(*MI.memoperands_begin())->setFlags(MOSuppressPair);
}
/// Check all MachineMemOperands for a hint that the load/store is strided.
bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) {
return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
return MMO->getFlags() & MOStridedAccess;
});
}
bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) {
switch (Opc) {
default:
return false;
case AArch64::STURSi:
case AArch64::STURDi:
case AArch64::STURQi:
case AArch64::STURBBi:
case AArch64::STURHHi:
case AArch64::STURWi:
case AArch64::STURXi:
case AArch64::LDURSi:
case AArch64::LDURDi:
case AArch64::LDURQi:
case AArch64::LDURWi:
case AArch64::LDURXi:
case AArch64::LDURSWi:
case AArch64::LDURHHi:
case AArch64::LDURBBi:
case AArch64::LDURSBWi:
case AArch64::LDURSHWi:
return true;
}
}
bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
// Scaled instructions.
case AArch64::STRSui:
case AArch64::STRDui:
case AArch64::STRQui:
case AArch64::STRXui:
case AArch64::STRWui:
case AArch64::LDRSui:
case AArch64::LDRDui:
case AArch64::LDRQui:
case AArch64::LDRXui:
case AArch64::LDRWui:
case AArch64::LDRSWui:
// Unscaled instructions.
case AArch64::STURSi:
case AArch64::STURDi:
case AArch64::STURQi:
case AArch64::STURWi:
case AArch64::STURXi:
case AArch64::LDURSi:
case AArch64::LDURDi:
case AArch64::LDURQi:
case AArch64::LDURWi:
case AArch64::LDURXi:
case AArch64::LDURSWi:
return true;
}
}
unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc,
bool &Is64Bit) {
switch (Opc) {
default:
llvm_unreachable("Opcode has no flag setting equivalent!");
// 32-bit cases:
case AArch64::ADDWri:
Is64Bit = false;
return AArch64::ADDSWri;
case AArch64::ADDWrr:
Is64Bit = false;
return AArch64::ADDSWrr;
case AArch64::ADDWrs:
Is64Bit = false;
return AArch64::ADDSWrs;
case AArch64::ADDWrx:
Is64Bit = false;
return AArch64::ADDSWrx;
case AArch64::ANDWri:
Is64Bit = false;
return AArch64::ANDSWri;
case AArch64::ANDWrr:
Is64Bit = false;
return AArch64::ANDSWrr;
case AArch64::ANDWrs:
Is64Bit = false;
return AArch64::ANDSWrs;
case AArch64::BICWrr:
Is64Bit = false;
return AArch64::BICSWrr;
case AArch64::BICWrs:
Is64Bit = false;
return AArch64::BICSWrs;
case AArch64::SUBWri:
Is64Bit = false;
return AArch64::SUBSWri;
case AArch64::SUBWrr:
Is64Bit = false;
return AArch64::SUBSWrr;
case AArch64::SUBWrs:
Is64Bit = false;
return AArch64::SUBSWrs;
case AArch64::SUBWrx:
Is64Bit = false;
return AArch64::SUBSWrx;
// 64-bit cases:
case AArch64::ADDXri:
Is64Bit = true;
return AArch64::ADDSXri;
case AArch64::ADDXrr:
Is64Bit = true;
return AArch64::ADDSXrr;
case AArch64::ADDXrs:
Is64Bit = true;
return AArch64::ADDSXrs;
case AArch64::ADDXrx:
Is64Bit = true;
return AArch64::ADDSXrx;
case AArch64::ANDXri:
Is64Bit = true;
return AArch64::ANDSXri;
case AArch64::ANDXrr:
Is64Bit = true;
return AArch64::ANDSXrr;
case AArch64::ANDXrs:
Is64Bit = true;
return AArch64::ANDSXrs;
case AArch64::BICXrr:
Is64Bit = true;
return AArch64::BICSXrr;
case AArch64::BICXrs:
Is64Bit = true;
return AArch64::BICSXrs;
case AArch64::SUBXri:
Is64Bit = true;
return AArch64::SUBSXri;
case AArch64::SUBXrr:
Is64Bit = true;
return AArch64::SUBSXrr;
case AArch64::SUBXrs:
Is64Bit = true;
return AArch64::SUBSXrs;
case AArch64::SUBXrx:
Is64Bit = true;
return AArch64::SUBSXrx;
}
}
// Is this a candidate for ld/st merging or pairing? For example, we don't
// touch volatiles or load/stores that have a hint to avoid pair formation.
bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
// If this is a volatile load/store, don't mess with it.
if (MI.hasOrderedMemoryRef())
return false;
// Make sure this is a reg/fi+imm (as opposed to an address reloc).
assert((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) &&
"Expected a reg or frame index operand.");
if (!MI.getOperand(2).isImm())
return false;
// Can't merge/pair if the instruction modifies the base register.
// e.g., ldr x0, [x0]
// This case will never occur with an FI base.
if (MI.getOperand(1).isReg()) {
unsigned BaseReg = MI.getOperand(1).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (MI.modifiesRegister(BaseReg, TRI))
return false;
}
// Check if this load/store has a hint to avoid pair formation.
// MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
if (isLdStPairSuppressed(MI))
return false;
// On some CPUs quad load/store pairs are slower than two single load/stores.
if (Subtarget.isPaired128Slow()) {
switch (MI.getOpcode()) {
default:
break;
case AArch64::LDURQi:
case AArch64::STURQi:
case AArch64::LDRQui:
case AArch64::STRQui:
return false;
}
}
return true;
}
bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
unsigned Width;
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
}
bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
// Handle only loads/stores with base register followed by immediate offset.
if (LdSt.getNumExplicitOperands() == 3) {
// Non-paired instruction (e.g., ldr x1, [x0, #8]).
if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
!LdSt.getOperand(2).isImm())
return false;
} else if (LdSt.getNumExplicitOperands() == 4) {
// Paired instruction (e.g., ldp x1, x2, [x0, #8]).
if (!LdSt.getOperand(1).isReg() ||
(!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) ||
!LdSt.getOperand(3).isImm())
return false;
} else
return false;
// Get the scaling factor for the instruction and set the width for the
// instruction.
unsigned Scale = 0;
int64_t Dummy1, Dummy2;
// If this returns false, then it's an instruction we don't want to handle.
if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
return false;
// Compute the offset. Offset is calculated as the immediate operand
// multiplied by the scaling factor. Unscaled instructions have scaling factor
// set to 1.
if (LdSt.getNumExplicitOperands() == 3) {
BaseOp = &LdSt.getOperand(1);
Offset = LdSt.getOperand(2).getImm() * Scale;
} else {
assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
BaseOp = &LdSt.getOperand(2);
Offset = LdSt.getOperand(3).getImm() * Scale;
}
assert((BaseOp->isReg() || BaseOp->isFI()) &&
"getMemOperandWithOffset only supports base "
"operands of type register or frame index.");
return true;
}
MachineOperand &
AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1);
assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
return OfsOp;
}
bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,
unsigned &Width, int64_t &MinOffset,
int64_t &MaxOffset) const {
switch (Opcode) {
// Not a memory operation or something we want to handle.
default:
Scale = Width = 0;
MinOffset = MaxOffset = 0;
return false;
case AArch64::STRWpost:
case AArch64::LDRWpost:
Width = 32;
Scale = 4;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURQi:
case AArch64::STURQi:
Width = 16;
Scale = 1;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURXi:
case AArch64::LDURDi:
case AArch64::STURXi:
case AArch64::STURDi:
Width = 8;
Scale = 1;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURWi:
case AArch64::LDURSi:
case AArch64::LDURSWi:
case AArch64::STURWi:
case AArch64::STURSi:
Width = 4;
Scale = 1;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURHi:
case AArch64::LDURHHi:
case AArch64::LDURSHXi:
case AArch64::LDURSHWi:
case AArch64::STURHi:
case AArch64::STURHHi:
Width = 2;
Scale = 1;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURBi:
case AArch64::LDURBBi:
case AArch64::LDURSBXi:
case AArch64::LDURSBWi:
case AArch64::STURBi:
case AArch64::STURBBi:
Width = 1;
Scale = 1;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDPQi:
case AArch64::LDNPQi:
case AArch64::STPQi:
case AArch64::STNPQi:
Scale = 16;
Width = 32;
MinOffset = -64;
MaxOffset = 63;
break;
case AArch64::LDRQui:
case AArch64::STRQui:
Scale = Width = 16;
MinOffset = 0;
MaxOffset = 4095;
break;
case AArch64::LDPXi:
case AArch64::LDPDi:
case AArch64::LDNPXi:
case AArch64::LDNPDi:
case AArch64::STPXi:
case AArch64::STPDi:
case AArch64::STNPXi:
case AArch64::STNPDi:
Scale = 8;
Width = 16;
MinOffset = -64;
MaxOffset = 63;
break;
case AArch64::LDRXui:
case AArch64::LDRDui:
case AArch64::STRXui:
case AArch64::STRDui:
Scale = Width = 8;
MinOffset = 0;
MaxOffset = 4095;
break;
case AArch64::LDPWi:
case AArch64::LDPSi:
case AArch64::LDNPWi:
case AArch64::LDNPSi:
case AArch64::STPWi:
case AArch64::STPSi:
case AArch64::STNPWi:
case AArch64::STNPSi:
Scale = 4;
Width = 8;
MinOffset = -64;
MaxOffset = 63;
break;
case AArch64::LDRWui:
case AArch64::LDRSui:
case AArch64::LDRSWui:
case AArch64::STRWui:
case AArch64::STRSui:
Scale = Width = 4;
MinOffset = 0;
MaxOffset = 4095;
break;
case AArch64::LDRHui:
case AArch64::LDRHHui:
case AArch64::STRHui:
case AArch64::STRHHui:
Scale = Width = 2;
MinOffset = 0;
MaxOffset = 4095;
break;
case AArch64::LDRBui:
case AArch64::LDRBBui:
case AArch64::STRBui:
case AArch64::STRBBui:
Scale = Width = 1;
MinOffset = 0;
MaxOffset = 4095;
break;
}
return true;
}
static unsigned getOffsetStride(unsigned Opc) {
switch (Opc) {
default:
return 0;
case AArch64::LDURQi:
case AArch64::STURQi:
return 16;
case AArch64::LDURXi:
case AArch64::LDURDi:
case AArch64::STURXi:
case AArch64::STURDi:
return 8;
case AArch64::LDURWi:
case AArch64::LDURSi:
case AArch64::LDURSWi:
case AArch64::STURWi:
case AArch64::STURSi:
return 4;
}
}
// Scale the unscaled offsets. Returns false if the unscaled offset can't be
// scaled.
static bool scaleOffset(unsigned Opc, int64_t &Offset) {
unsigned OffsetStride = getOffsetStride(Opc);
if (OffsetStride == 0)
return false;
// If the byte-offset isn't a multiple of the stride, we can't scale this
// offset.
if (Offset % OffsetStride != 0)
return false;
// Convert the byte-offset used by unscaled into an "element" offset used
// by the scaled pair load/store instructions.
Offset /= OffsetStride;
return true;
}
// Unscale the scaled offsets. Returns false if the scaled offset can't be
// unscaled.
static bool unscaleOffset(unsigned Opc, int64_t &Offset) {
unsigned OffsetStride = getOffsetStride(Opc);
if (OffsetStride == 0)
return false;
// Convert the "element" offset used by scaled pair load/store instructions
// into the byte-offset used by unscaled.
Offset *= OffsetStride;
return true;
}
static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
if (FirstOpc == SecondOpc)
return true;
// We can also pair sign-ext and zero-ext instructions.
switch (FirstOpc) {
default:
return false;
case AArch64::LDRWui:
case AArch64::LDURWi:
return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
case AArch64::LDRSWui:
case AArch64::LDURSWi:
return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
}
// These instructions can't be paired based on their opcodes.
return false;
}
static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
int64_t Offset1, unsigned Opcode1, int FI2,
int64_t Offset2, unsigned Opcode2) {
// Accesses through fixed stack object frame indices may access a different
// fixed stack slot. Check that the object offsets + offsets match.
if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) {
int64_t ObjectOffset1 = MFI.getObjectOffset(FI1);
int64_t ObjectOffset2 = MFI.getObjectOffset(FI2);
assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.");
// Get the byte-offset from the object offset.
if (!unscaleOffset(Opcode1, Offset1) || !unscaleOffset(Opcode2, Offset2))
return false;
ObjectOffset1 += Offset1;
ObjectOffset2 += Offset2;
// Get the "element" index in the object.
if (!scaleOffset(Opcode1, ObjectOffset1) ||
!scaleOffset(Opcode2, ObjectOffset2))
return false;
return ObjectOffset1 + 1 == ObjectOffset2;
}
return FI1 == FI2;
}
/// Detect opportunities for ldp/stp formation.
///
/// Only called for LdSt for which getMemOperandWithOffset returns true.
bool AArch64InstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
MachineOperand &BaseOp2,
unsigned NumLoads) const {
MachineInstr &FirstLdSt = *BaseOp1.getParent();
MachineInstr &SecondLdSt = *BaseOp2.getParent();
if (BaseOp1.getType() != BaseOp2.getType())
return false;
assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
"Only base registers and frame indices are supported.");
// Check for both base regs and base FI.
if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
return false;
// Only cluster up to a single pair.
if (NumLoads > 1)
return false;
if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
return false;
// Can we pair these instructions based on their opcodes?
unsigned FirstOpc = FirstLdSt.getOpcode();
unsigned SecondOpc = SecondLdSt.getOpcode();
if (!canPairLdStOpc(FirstOpc, SecondOpc))
return false;
// Can't merge volatiles or load/stores that have a hint to avoid pair
// formation, for example.
if (!isCandidateToMergeOrPair(FirstLdSt) ||
!isCandidateToMergeOrPair(SecondLdSt))
return false;
// isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
return false;
int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
return false;
// Pairwise instructions have a 7-bit signed offset field.
if (Offset1 > 63 || Offset1 < -64)
return false;
// The caller should already have ordered First/SecondLdSt by offset.
// Note: except for non-equal frame index bases
if (BaseOp1.isFI()) {
assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) &&
"Caller should have ordered offsets.");
const MachineFrameInfo &MFI =
FirstLdSt.getParent()->getParent()->getFrameInfo();
return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc,
BaseOp2.getIndex(), Offset2, SecondOpc);
}
assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&
"Caller should have ordered offsets.");
return Offset1 + 1 == Offset2;
}
static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
unsigned Reg, unsigned SubIdx,
unsigned State,
const TargetRegisterInfo *TRI) {
if (!SubIdx)
return MIB.addReg(Reg, State);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
unsigned NumRegs) {
// We really want the positive remainder mod 32 here, that happens to be
// easily obtainable with a mask.
return ((DestReg - SrcReg) & 0x1f) < NumRegs;
}
void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc,
unsigned Opcode,
ArrayRef<unsigned> Indices) const {
assert(Subtarget.hasNEON() && "Unexpected register copy without NEON");
const TargetRegisterInfo *TRI = &getRegisterInfo();
uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
unsigned NumRegs = Indices.size();
int SubReg = 0, End = NumRegs, Incr = 1;
if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
SubReg = NumRegs - 1;
End = -1;
Incr = -1;
}
for (; SubReg != End; SubReg += Incr) {
const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
}
}
+void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc,
+ unsigned Opcode, unsigned ZeroReg,
+ llvm::ArrayRef<unsigned> Indices) const {
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ unsigned NumRegs = Indices.size();
+
+#ifndef NDEBUG
+ uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
+ uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
+ assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&
+ "GPR reg sequences should not be able to overlap");
+#endif
+
+ for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) {
+ const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
+ AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
+ MIB.addReg(ZeroReg);
+ AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
+ MIB.addImm(0);
+ }
+}
+
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
if (AArch64::GPR32spRegClass.contains(DestReg) &&
(AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
// If either operand is WSP, expand to ADD #0.
if (Subtarget.hasZeroCycleRegMove()) {
// Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
&AArch64::GPR64spRegClass);
unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
&AArch64::GPR64spRegClass);
// This instruction is reading and writing X registers. This may upset
// the register scavenger and machine verifier, so we need to indicate
// that we are reading an undefined value from SrcRegX, but a proper
// value from SrcReg.
BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
.addReg(SrcRegX, RegState::Undef)
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
.addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
} else {
BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc))
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
}
} else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) {
BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
} else {
if (Subtarget.hasZeroCycleRegMove()) {
// Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
&AArch64::GPR64spRegClass);
unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
&AArch64::GPR64spRegClass);
// This instruction is reading and writing X registers. This may upset
// the register scavenger and machine verifier, so we need to indicate
// that we are reading an undefined value from SrcRegX, but a proper
// value from SrcReg.
BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
.addReg(AArch64::XZR)
.addReg(SrcRegX, RegState::Undef)
.addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
} else {
// Otherwise, expand to ORR WZR.
BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
.addReg(AArch64::WZR)
.addReg(SrcReg, getKillRegState(KillSrc));
}
}
return;
}
if (AArch64::GPR64spRegClass.contains(DestReg) &&
(AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
// If either operand is SP, expand to ADD #0.
BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc))
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
} else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) {
BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg)
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
} else {
// Otherwise, expand to ORR XZR.
BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
.addReg(AArch64::XZR)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
// Copy a DDDD register quad by copying the individual sub-registers.
if (AArch64::DDDDRegClass.contains(DestReg) &&
AArch64::DDDDRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
AArch64::dsub2, AArch64::dsub3};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
Indices);
return;
}
// Copy a DDD register triple by copying the individual sub-registers.
if (AArch64::DDDRegClass.contains(DestReg) &&
AArch64::DDDRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
AArch64::dsub2};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
Indices);
return;
}
// Copy a DD register pair by copying the individual sub-registers.
if (AArch64::DDRegClass.contains(DestReg) &&
AArch64::DDRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
Indices);
return;
}
// Copy a QQQQ register quad by copying the individual sub-registers.
if (AArch64::QQQQRegClass.contains(DestReg) &&
AArch64::QQQQRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
AArch64::qsub2, AArch64::qsub3};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
Indices);
return;
}
// Copy a QQQ register triple by copying the individual sub-registers.
if (AArch64::QQQRegClass.contains(DestReg) &&
AArch64::QQQRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
AArch64::qsub2};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
Indices);
return;
}
// Copy a QQ register pair by copying the individual sub-registers.
if (AArch64::QQRegClass.contains(DestReg) &&
AArch64::QQRegClass.contains(SrcReg)) {
static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1};
copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
Indices);
+ return;
+ }
+
+ if (AArch64::XSeqPairsClassRegClass.contains(DestReg) &&
+ AArch64::XSeqPairsClassRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64};
+ copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs,
+ AArch64::XZR, Indices);
+ return;
+ }
+
+ if (AArch64::WSeqPairsClassRegClass.contains(DestReg) &&
+ AArch64::WSeqPairsClassRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32};
+ copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs,
+ AArch64::WZR, Indices);
return;
}
if (AArch64::FPR128RegClass.contains(DestReg) &&
AArch64::FPR128RegClass.contains(SrcReg)) {
if (Subtarget.hasNEON()) {
BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
.addReg(SrcReg)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
BuildMI(MBB, I, DL, get(AArch64::STRQpre))
.addReg(AArch64::SP, RegState::Define)
.addReg(SrcReg, getKillRegState(KillSrc))
.addReg(AArch64::SP)
.addImm(-16);
BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
.addReg(AArch64::SP, RegState::Define)
.addReg(DestReg, RegState::Define)
.addReg(AArch64::SP)
.addImm(16);
}
return;
}
if (AArch64::FPR64RegClass.contains(DestReg) &&
AArch64::FPR64RegClass.contains(SrcReg)) {
if (Subtarget.hasNEON()) {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
&AArch64::FPR128RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
&AArch64::FPR128RegClass);
BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
.addReg(SrcReg)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
if (AArch64::FPR32RegClass.contains(DestReg) &&
AArch64::FPR32RegClass.contains(SrcReg)) {
if (Subtarget.hasNEON()) {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
&AArch64::FPR128RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
&AArch64::FPR128RegClass);
BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
.addReg(SrcReg)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
if (AArch64::FPR16RegClass.contains(DestReg) &&
AArch64::FPR16RegClass.contains(SrcReg)) {
if (Subtarget.hasNEON()) {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
&AArch64::FPR128RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
&AArch64::FPR128RegClass);
BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
.addReg(SrcReg)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
&AArch64::FPR32RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
&AArch64::FPR32RegClass);
BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
if (AArch64::FPR8RegClass.contains(DestReg) &&
AArch64::FPR8RegClass.contains(SrcReg)) {
if (Subtarget.hasNEON()) {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
&AArch64::FPR128RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
&AArch64::FPR128RegClass);
BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
.addReg(SrcReg)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
&AArch64::FPR32RegClass);
SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
&AArch64::FPR32RegClass);
BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
// Copies between GPR64 and FPR64.
if (AArch64::FPR64RegClass.contains(DestReg) &&
AArch64::GPR64RegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (AArch64::GPR64RegClass.contains(DestReg) &&
AArch64::FPR64RegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
// Copies between GPR32 and FPR32.
if (AArch64::FPR32RegClass.contains(DestReg) &&
AArch64::GPR32RegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (AArch64::GPR32RegClass.contains(DestReg) &&
AArch64::FPR32RegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (DestReg == AArch64::NZCV) {
assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
BuildMI(MBB, I, DL, get(AArch64::MSR))
.addImm(AArch64SysReg::NZCV)
.addReg(SrcReg, getKillRegState(KillSrc))
.addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
return;
}
if (SrcReg == AArch64::NZCV) {
assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
.addImm(AArch64SysReg::NZCV)
.addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
return;
}
llvm_unreachable("unimplemented reg-to-reg copy");
}
static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
const MCInstrDesc &MCID,
unsigned SrcReg, bool IsKill,
unsigned SubIdx0, unsigned SubIdx1, int FI,
MachineMemOperand *MMO) {
unsigned SrcReg0 = SrcReg;
unsigned SrcReg1 = SrcReg;
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
SubIdx0 = 0;
SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
SubIdx1 = 0;
}
BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
.addReg(SrcReg0, getKillRegState(IsKill), SubIdx0)
.addReg(SrcReg1, getKillRegState(IsKill), SubIdx1)
.addFrameIndex(FI)
.addImm(0)
.addMemOperand(MMO);
}
void AArch64InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
bool isKill, int FI, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
MachineMemOperand *MMO = MF.getMachineMemOperand(
PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
unsigned Opc = 0;
bool Offset = true;
switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::STRBui;
break;
case 2:
if (AArch64::FPR16RegClass.hasSubClassEq(RC))
Opc = AArch64::STRHui;
break;
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRWui;
if (TargetRegisterInfo::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
else
assert(SrcReg != AArch64::WSP);
} else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
Opc = AArch64::STRSui;
break;
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRXui;
if (TargetRegisterInfo::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
else
assert(SrcReg != AArch64::SP);
} else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRDui;
} else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
get(AArch64::STPWi), SrcReg, isKill,
AArch64::sube32, AArch64::subo32, FI, MMO);
return;
}
break;
case 16:
if (AArch64::FPR128RegClass.hasSubClassEq(RC))
Opc = AArch64::STRQui;
else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Twov1d;
Offset = false;
} else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
get(AArch64::STPXi), SrcReg, isKill,
AArch64::sube64, AArch64::subo64, FI, MMO);
return;
}
break;
case 24:
if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Threev1d;
Offset = false;
}
break;
case 32:
if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Fourv1d;
Offset = false;
} else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Twov2d;
Offset = false;
}
break;
case 48:
if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Threev2d;
Offset = false;
}
break;
case 64:
if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
Opc = AArch64::ST1Fourv2d;
Offset = false;
}
break;
}
assert(Opc && "Unknown register class");
const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI);
if (Offset)
MI.addImm(0);
MI.addMemOperand(MMO);
}
static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
const MCInstrDesc &MCID,
unsigned DestReg, unsigned SubIdx0,
unsigned SubIdx1, int FI,
MachineMemOperand *MMO) {
unsigned DestReg0 = DestReg;
unsigned DestReg1 = DestReg;
bool IsUndef = true;
if (TargetRegisterInfo::isPhysicalRegister(DestReg)) {
DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
SubIdx0 = 0;
DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
SubIdx1 = 0;
IsUndef = false;
}
BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
.addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0)
.addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1)
.addFrameIndex(FI)
.addImm(0)
.addMemOperand(MMO);
}
void AArch64InstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
int FI, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
MachineMemOperand *MMO = MF.getMachineMemOperand(
PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
unsigned Opc = 0;
bool Offset = true;
switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRBui;
break;
case 2:
if (AArch64::FPR16RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRHui;
break;
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRWui;
if (TargetRegisterInfo::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
else
assert(DestReg != AArch64::WSP);
} else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRSui;
break;
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRXui;
if (TargetRegisterInfo::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
else
assert(DestReg != AArch64::SP);
} else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRDui;
} else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
get(AArch64::LDPWi), DestReg, AArch64::sube32,
AArch64::subo32, FI, MMO);
return;
}
break;
case 16:
if (AArch64::FPR128RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRQui;
else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Twov1d;
Offset = false;
} else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
get(AArch64::LDPXi), DestReg, AArch64::sube64,
AArch64::subo64, FI, MMO);
return;
}
break;
case 24:
if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Threev1d;
Offset = false;
}
break;
case 32:
if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Fourv1d;
Offset = false;
} else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Twov2d;
Offset = false;
}
break;
case 48:
if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Threev2d;
Offset = false;
}
break;
case 64:
if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
Opc = AArch64::LD1Fourv2d;
Offset = false;
}
break;
}
assert(Opc && "Unknown register class");
const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
.addReg(DestReg, getDefRegState(true))
.addFrameIndex(FI);
if (Offset)
MI.addImm(0);
MI.addMemOperand(MMO);
}
void llvm::emitFrameOffset(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, int Offset,
const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool SetNZCV,
bool NeedsWinCFI) {
if (DestReg == SrcReg && Offset == 0)
return;
assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
"SP increment/decrement not 16-byte aligned");
bool isSub = Offset < 0;
if (isSub)
Offset = -Offset;
// FIXME: If the offset won't fit in 24-bits, compute the offset into a
// scratch register. If DestReg is a virtual register, use it as the
// scratch register; otherwise, create a new virtual register (to be
// replaced by the scavenger at the end of PEI). That case can be optimized
// slightly if DestReg is SP which is always 16-byte aligned, so the scratch
// register can be loaded with offset%8 and the add/sub can use an extending
// instruction with LSL#3.
// Currently the function handles any offsets but generates a poor sequence
// of code.
// assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
unsigned Opc;
if (SetNZCV)
Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
else
Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
const unsigned MaxEncoding = 0xfff;
const unsigned ShiftSize = 12;
const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
while (((unsigned)Offset) >= (1 << ShiftSize)) {
unsigned ThisVal;
if (((unsigned)Offset) > MaxEncodableValue) {
ThisVal = MaxEncodableValue;
} else {
ThisVal = Offset & MaxEncodableValue;
}
assert((ThisVal >> ShiftSize) <= MaxEncoding &&
"Encoding cannot handle value that big");
BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
.addReg(SrcReg)
.addImm(ThisVal >> ShiftSize)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
.setMIFlag(Flag);
if (NeedsWinCFI && SrcReg == AArch64::SP && DestReg == AArch64::SP)
BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
.addImm(ThisVal)
.setMIFlag(Flag);
SrcReg = DestReg;
Offset -= ThisVal;
if (Offset == 0)
return;
}
BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
.addReg(SrcReg)
.addImm(Offset)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
.setMIFlag(Flag);
if (NeedsWinCFI) {
if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
(SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
if (Offset == 0)
BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).
setMIFlag(Flag);
else
BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP)).
addImm(Offset).setMIFlag(Flag);
} else if (DestReg == AArch64::SP) {
BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)).
addImm(Offset).setMIFlag(Flag);
}
}
}
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
// %0 = COPY %sp; GPR64all:%0
//
// We explicitly chose GPR64all for the virtual register so such a copy might
// be eliminated by RegisterCoalescer. However, that may not be possible, and
// %0 may even spill. We can't spill %sp, and since it is in the GPR64all
// register class, TargetInstrInfo::foldMemoryOperand() is going to try.
//
// To prevent that, we are going to constrain the %0 register class here.
//
// <rdar://problem/11522048>
//
if (MI.isFullCopy()) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
if (SrcReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
return nullptr;
}
if (DstReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
return nullptr;
}
}
// Handle the case where a copy is being spilled or filled but the source
// and destination register class don't match. For example:
//
// %0 = COPY %xzr; GPR64common:%0
//
// In this case we can still safely fold away the COPY and generate the
// following spill code:
//
// STRXui %xzr, %stack.0
//
// This also eliminates spilled cross register class COPYs (e.g. between x and
// d regs) of the same size. For example:
//
// %0 = COPY %1; GPR64:%0, FPR64:%1
//
// will be filled as
//
// LDRDui %0, fi<#0>
//
// instead of
//
// LDRXui %Temp, fi<#0>
// %0 = FMOV %Temp
//
if (MI.isCopy() && Ops.size() == 1 &&
// Make sure we're only folding the explicit COPY defs/uses.
(Ops[0] == 0 || Ops[0] == 1)) {
bool IsSpill = Ops[0] == 0;
bool IsFill = !IsSpill;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineBasicBlock &MBB = *MI.getParent();
const MachineOperand &DstMO = MI.getOperand(0);
const MachineOperand &SrcMO = MI.getOperand(1);
unsigned DstReg = DstMO.getReg();
unsigned SrcReg = SrcMO.getReg();
// This is slightly expensive to compute for physical regs since
// getMinimalPhysRegClass is slow.
auto getRegClass = [&](unsigned Reg) {
return TargetRegisterInfo::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
: TRI.getMinimalPhysRegClass(Reg);
};
if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
"Mismatched register size in non subreg COPY");
if (IsSpill)
storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
getRegClass(SrcReg), &TRI);
else
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
getRegClass(DstReg), &TRI);
return &*--InsertPt;
}
// Handle cases like spilling def of:
//
// %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
//
// where the physical register source can be widened and stored to the full
// virtual reg destination stack slot, in this case producing:
//
// STRXui %xzr, %stack.0
//
if (IsSpill && DstMO.isUndef() &&
TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
assert(SrcMO.getSubReg() == 0 &&
"Unexpected subreg on physical register");
const TargetRegisterClass *SpillRC;
unsigned SpillSubreg;
switch (DstMO.getSubReg()) {
default:
SpillRC = nullptr;
break;
case AArch64::sub_32:
case AArch64::ssub:
if (AArch64::GPR32RegClass.contains(SrcReg)) {
SpillRC = &AArch64::GPR64RegClass;
SpillSubreg = AArch64::sub_32;
} else if (AArch64::FPR32RegClass.contains(SrcReg)) {
SpillRC = &AArch64::FPR64RegClass;
SpillSubreg = AArch64::ssub;
} else
SpillRC = nullptr;
break;
case AArch64::dsub:
if (AArch64::FPR64RegClass.contains(SrcReg)) {
SpillRC = &AArch64::FPR128RegClass;
SpillSubreg = AArch64::dsub;
} else
SpillRC = nullptr;
break;
}
if (SpillRC)
if (unsigned WidenedSrcReg =
TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
FrameIndex, SpillRC, &TRI);
return &*--InsertPt;
}
}
// Handle cases like filling use of:
//
// %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
//
// where we can load the full virtual reg source stack slot, into the subreg
// destination, in this case producing:
//
// LDRWui %0:sub_32<def,read-undef>, %stack.0
//
if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
const TargetRegisterClass *FillRC;
switch (DstMO.getSubReg()) {
default:
FillRC = nullptr;
break;
case AArch64::sub_32:
FillRC = &AArch64::GPR32RegClass;
break;
case AArch64::ssub:
FillRC = &AArch64::FPR32RegClass;
break;
case AArch64::dsub:
FillRC = &AArch64::FPR64RegClass;
break;
}
if (FillRC) {
assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
TRI.getRegSizeInBits(*FillRC) &&
"Mismatched regclass size on folded subreg COPY");
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
MachineInstr &LoadMI = *--InsertPt;
MachineOperand &LoadDst = LoadMI.getOperand(0);
assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
LoadDst.setSubReg(DstMO.getSubReg());
LoadDst.setIsUndef();
return &LoadMI;
}
}
}
// Cannot fold.
return nullptr;
}
int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
bool *OutUseUnscaledOp,
unsigned *OutUnscaledOp,
int *EmittableOffset) {
int Scale = 1;
bool IsSigned = false;
// The ImmIdx should be changed case by case if it is not 2.
unsigned ImmIdx = 2;
unsigned UnscaledOp = 0;
// Set output values in case of early exit.
if (EmittableOffset)
*EmittableOffset = 0;
if (OutUseUnscaledOp)
*OutUseUnscaledOp = false;
if (OutUnscaledOp)
*OutUnscaledOp = 0;
switch (MI.getOpcode()) {
default:
llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
// Vector spills/fills can't take an immediate offset.
case AArch64::LD1Twov2d:
case AArch64::LD1Threev2d:
case AArch64::LD1Fourv2d:
case AArch64::LD1Twov1d:
case AArch64::LD1Threev1d:
case AArch64::LD1Fourv1d:
case AArch64::ST1Twov2d:
case AArch64::ST1Threev2d:
case AArch64::ST1Fourv2d:
case AArch64::ST1Twov1d:
case AArch64::ST1Threev1d:
case AArch64::ST1Fourv1d:
return AArch64FrameOffsetCannotUpdate;
case AArch64::PRFMui:
Scale = 8;
UnscaledOp = AArch64::PRFUMi;
break;
case AArch64::LDRXui:
Scale = 8;
UnscaledOp = AArch64::LDURXi;
break;
case AArch64::LDRWui:
Scale = 4;
UnscaledOp = AArch64::LDURWi;
break;
case AArch64::LDRBui:
Scale = 1;
UnscaledOp = AArch64::LDURBi;
break;
case AArch64::LDRHui:
Scale = 2;
UnscaledOp = AArch64::LDURHi;
break;
case AArch64::LDRSui:
Scale = 4;
UnscaledOp = AArch64::LDURSi;
break;
case AArch64::LDRDui:
Scale = 8;
UnscaledOp = AArch64::LDURDi;
break;
case AArch64::LDRQui:
Scale = 16;
UnscaledOp = AArch64::LDURQi;
break;
case AArch64::LDRBBui:
Scale = 1;
UnscaledOp = AArch64::LDURBBi;
break;
case AArch64::LDRHHui:
Scale = 2;
UnscaledOp = AArch64::LDURHHi;
break;
case AArch64::LDRSBXui:
Scale = 1;
UnscaledOp = AArch64::LDURSBXi;
break;
case AArch64::LDRSBWui:
Scale = 1;
UnscaledOp = AArch64::LDURSBWi;
break;
case AArch64::LDRSHXui:
Scale = 2;
UnscaledOp = AArch64::LDURSHXi;
break;
case AArch64::LDRSHWui:
Scale = 2;
UnscaledOp = AArch64::LDURSHWi;
break;
case AArch64::LDRSWui:
Scale = 4;
UnscaledOp = AArch64::LDURSWi;
break;
case AArch64::STRXui:
Scale = 8;
UnscaledOp = AArch64::STURXi;
break;
case AArch64::STRWui:
Scale = 4;
UnscaledOp = AArch64::STURWi;
break;
case AArch64::STRBui:
Scale = 1;
UnscaledOp = AArch64::STURBi;
break;
case AArch64::STRHui:
Scale = 2;
UnscaledOp = AArch64::STURHi;
break;
case AArch64::STRSui:
Scale = 4;
UnscaledOp = AArch64::STURSi;
break;
case AArch64::STRDui:
Scale = 8;
UnscaledOp = AArch64::STURDi;
break;
case AArch64::STRQui:
Scale = 16;
UnscaledOp = AArch64::STURQi;
break;
case AArch64::STRBBui:
Scale = 1;
UnscaledOp = AArch64::STURBBi;
break;
case AArch64::STRHHui:
Scale = 2;
UnscaledOp = AArch64::STURHHi;
break;
case AArch64::LDPXi:
case AArch64::LDPDi:
case AArch64::STPXi:
case AArch64::STPDi:
case AArch64::LDNPXi:
case AArch64::LDNPDi:
case AArch64::STNPXi:
case AArch64::STNPDi:
ImmIdx = 3;
IsSigned = true;
Scale = 8;
break;
case AArch64::LDPQi:
case AArch64::STPQi:
case AArch64::LDNPQi:
case AArch64::STNPQi:
ImmIdx = 3;
IsSigned = true;
Scale = 16;
break;
case AArch64::LDPWi:
case AArch64::LDPSi:
case AArch64::STPWi:
case AArch64::STPSi:
case AArch64::LDNPWi:
case AArch64::LDNPSi:
case AArch64::STNPWi:
case AArch64::STNPSi:
ImmIdx = 3;
IsSigned = true;
Scale = 4;
break;
case AArch64::LDURXi:
case AArch64::LDURWi:
case AArch64::LDURBi:
case AArch64::LDURHi:
case AArch64::LDURSi:
case AArch64::LDURDi:
case AArch64::LDURQi:
case AArch64::LDURHHi:
case AArch64::LDURBBi:
case AArch64::LDURSBXi:
case AArch64::LDURSBWi:
case AArch64::LDURSHXi:
case AArch64::LDURSHWi:
case AArch64::LDURSWi:
case AArch64::STURXi:
case AArch64::STURWi:
case AArch64::STURBi:
case AArch64::STURHi:
case AArch64::STURSi:
case AArch64::STURDi:
case AArch64::STURQi:
case AArch64::STURBBi:
case AArch64::STURHHi:
Scale = 1;
break;
}
Offset += MI.getOperand(ImmIdx).getImm() * Scale;
bool useUnscaledOp = false;
// If the offset doesn't match the scale, we rewrite the instruction to
// use the unscaled instruction instead. Likewise, if we have a negative
// offset (and have an unscaled op to use).
if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
useUnscaledOp = true;
// Use an unscaled addressing mode if the instruction has a negative offset
// (or if the instruction is already using an unscaled addressing mode).
unsigned MaskBits;
if (IsSigned) {
// ldp/stp instructions.
MaskBits = 7;
Offset /= Scale;
} else if (UnscaledOp == 0 || useUnscaledOp) {
MaskBits = 9;
IsSigned = true;
Scale = 1;
} else {
MaskBits = 12;
IsSigned = false;
Offset /= Scale;
}
// Attempt to fold address computation.
int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
if (Offset >= MinOff && Offset <= MaxOff) {
if (EmittableOffset)
*EmittableOffset = Offset;
Offset = 0;
} else {
int NewOff = Offset < 0 ? MinOff : MaxOff;
if (EmittableOffset)
*EmittableOffset = NewOff;
Offset = (Offset - NewOff) * Scale;
}
if (OutUseUnscaledOp)
*OutUseUnscaledOp = useUnscaledOp;
if (OutUnscaledOp)
*OutUnscaledOp = UnscaledOp;
return AArch64FrameOffsetCanUpdate |
(Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
}
bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const AArch64InstrInfo *TII) {
unsigned Opcode = MI.getOpcode();
unsigned ImmIdx = FrameRegIdx + 1;
if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
Offset += MI.getOperand(ImmIdx).getImm();
emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
MI.getOperand(0).getReg(), FrameReg, Offset, TII,
MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
MI.eraseFromParent();
Offset = 0;
return true;
}
int NewOffset;
unsigned UnscaledOp;
bool UseUnscaledOp;
int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
&UnscaledOp, &NewOffset);
if (Status & AArch64FrameOffsetCanUpdate) {
if (Status & AArch64FrameOffsetIsLegal)
// Replace the FrameIndex with FrameReg.
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
if (UseUnscaledOp)
MI.setDesc(TII->get(UnscaledOp));
MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
return Offset == 0;
}
return false;
}
void AArch64InstrInfo::getNoop(MCInst &NopInst) const {
NopInst.setOpcode(AArch64::HINT);
NopInst.addOperand(MCOperand::createImm(0));
}
// AArch64 supports MachineCombiner.
bool AArch64InstrInfo::useMachineCombiner() const { return true; }
// True when Opc sets flag
static bool isCombineInstrSettingFlag(unsigned Opc) {
switch (Opc) {
case AArch64::ADDSWrr:
case AArch64::ADDSWri:
case AArch64::ADDSXrr:
case AArch64::ADDSXri:
case AArch64::SUBSWrr:
case AArch64::SUBSXrr:
// Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
case AArch64::SUBSWri:
case AArch64::SUBSXri:
return true;
default:
break;
}
return false;
}
// 32b Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate32(unsigned Opc) {
switch (Opc) {
case AArch64::ADDWrr:
case AArch64::ADDWri:
case AArch64::SUBWrr:
case AArch64::ADDSWrr:
case AArch64::ADDSWri:
case AArch64::SUBSWrr:
// Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
case AArch64::SUBWri:
case AArch64::SUBSWri:
return true;
default:
break;
}
return false;
}
// 64b Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate64(unsigned Opc) {
switch (Opc) {
case AArch64::ADDXrr:
case AArch64::ADDXri:
case AArch64::SUBXrr:
case AArch64::ADDSXrr:
case AArch64::ADDSXri:
case AArch64::SUBSXrr:
// Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
case AArch64::SUBXri:
case AArch64::SUBSXri:
return true;
default:
break;
}
return false;
}
// FP Opcodes that can be combined with a FMUL
static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
switch (Inst.getOpcode()) {
default:
break;
case AArch64::FADDSrr:
case AArch64::FADDDrr:
case AArch64::FADDv2f32:
case AArch64::FADDv2f64:
case AArch64::FADDv4f32:
case AArch64::FSUBSrr:
case AArch64::FSUBDrr:
case AArch64::FSUBv2f32:
case AArch64::FSUBv2f64:
case AArch64::FSUBv4f32:
TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
return (Options.UnsafeFPMath ||
Options.AllowFPOpFusion == FPOpFusion::Fast);
}
return false;
}
// Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate(unsigned Opc) {
return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
}
//
// Utility routine that checks if \param MO is defined by an
// \param CombineOpc instruction in the basic block \param MBB
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned CombineOpc, unsigned ZeroReg = 0,
bool CheckZeroReg = false) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineInstr *MI = nullptr;
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
MI = MRI.getUniqueVRegDef(MO.getReg());
// And it needs to be in the trace (otherwise, it won't have a depth).
if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
return false;
// Must only used by the user we combine with.
if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
return false;
if (CheckZeroReg) {
assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
// The third input reg must be zero.
if (MI->getOperand(3).getReg() != ZeroReg)
return false;
}
return true;
}
//
// Is \param MO defined by an integer multiply and can be combined?
static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned MulOpc, unsigned ZeroReg) {
return canCombine(MBB, MO, MulOpc, ZeroReg, true);
}
//
// Is \param MO defined by a floating-point multiply and can be combined?
static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned MulOpc) {
return canCombine(MBB, MO, MulOpc);
}
// TODO: There are many more machine instruction opcodes to match:
// 1. Other data types (integer, vectors)
// 2. Other math / logic operations (xor, or)
// 3. Other forms of the same operation (intrinsics and other variants)
bool AArch64InstrInfo::isAssociativeAndCommutative(
const MachineInstr &Inst) const {
switch (Inst.getOpcode()) {
case AArch64::FADDDrr:
case AArch64::FADDSrr:
case AArch64::FADDv2f32:
case AArch64::FADDv2f64:
case AArch64::FADDv4f32:
case AArch64::FMULDrr:
case AArch64::FMULSrr:
case AArch64::FMULX32:
case AArch64::FMULX64:
case AArch64::FMULXv2f32:
case AArch64::FMULXv2f64:
case AArch64::FMULXv4f32:
case AArch64::FMULv2f32:
case AArch64::FMULv2f64:
case AArch64::FMULv4f32:
return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
default:
return false;
}
}
/// Find instructions that can be turned into madd.
static bool getMaddPatterns(MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) {
unsigned Opc = Root.getOpcode();
MachineBasicBlock &MBB = *Root.getParent();
bool Found = false;
if (!isCombineInstrCandidate(Opc))
return false;
if (isCombineInstrSettingFlag(Opc)) {
int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
// When NZCV is live bail out.
if (Cmp_NZCV == -1)
return false;
unsigned NewOpc = convertToNonFlagSettingOpc(Root);
// When opcode can't change bail out.
// CHECKME: do we miss any cases for opcode conversion?
if (NewOpc == Opc)
return false;
Opc = NewOpc;
}
switch (Opc) {
default:
break;
case AArch64::ADDWrr:
assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
"ADDWrr does not have register operands");
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
Found = true;
}
break;
case AArch64::ADDXrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
Found = true;
}
break;
case AArch64::SUBWrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
Found = true;
}
break;
case AArch64::SUBXrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
Found = true;
}
break;
case AArch64::ADDWri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
Found = true;
}
break;
case AArch64::ADDXri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
Found = true;
}
break;
case AArch64::SUBWri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
Found = true;
}
break;
case AArch64::SUBXri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
Found = true;
}
break;
}
return Found;
}
/// Floating-Point Support
/// Find instructions that can be turned into madd.
static bool getFMAPatterns(MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) {
if (!isCombineInstrCandidateFP(Root))
return false;
MachineBasicBlock &MBB = *Root.getParent();
bool Found = false;
switch (Root.getOpcode()) {
default:
assert(false && "Unsupported FP instruction in combiner\n");
break;
case AArch64::FADDSrr:
assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
"FADDWrr does not have register operands");
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv1i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv1i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
Found = true;
}
break;
case AArch64::FADDDrr:
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv1i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv1i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
Found = true;
}
break;
case AArch64::FADDv2f32:
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2f32)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2f32)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
Found = true;
}
break;
case AArch64::FADDv2f64:
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2f64)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2f64)) {
Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
Found = true;
}
break;
case AArch64::FADDv4f32:
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv4i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv4f32)) {
Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv4i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv4f32)) {
Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
Found = true;
}
break;
case AArch64::FSUBSrr:
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv1i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULSrr)) {
Patterns.push_back(MachineCombinerPattern::FNMULSUBS_OP1);
Found = true;
}
break;
case AArch64::FSUBDrr:
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv1i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULDrr)) {
Patterns.push_back(MachineCombinerPattern::FNMULSUBD_OP1);
Found = true;
}
break;
case AArch64::FSUBv2f32:
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2f32)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2f32)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP1);
Found = true;
}
break;
case AArch64::FSUBv2f64:
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv2f64)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2i64_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv2f64)) {
Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP1);
Found = true;
}
break;
case AArch64::FSUBv4f32:
if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv4i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(2),
AArch64::FMULv4f32)) {
Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
Found = true;
}
if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv4i32_indexed)) {
Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP1);
Found = true;
} else if (canCombineWithFMUL(MBB, Root.getOperand(1),
AArch64::FMULv4f32)) {
Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP1);
Found = true;
}
break;
}
return Found;
}
/// Return true when a code sequence can improve throughput. It
/// should be called only for instructions in loops.
/// \param Pattern - combiner pattern
bool AArch64InstrInfo::isThroughputPattern(
MachineCombinerPattern Pattern) const {
switch (Pattern) {
default:
break;
case MachineCombinerPattern::FMULADDS_OP1:
case MachineCombinerPattern::FMULADDS_OP2:
case MachineCombinerPattern::FMULSUBS_OP1:
case MachineCombinerPattern::FMULSUBS_OP2:
case MachineCombinerPattern::FMULADDD_OP1:
case MachineCombinerPattern::FMULADDD_OP2:
case MachineCombinerPattern::FMULSUBD_OP1:
case MachineCombinerPattern::FMULSUBD_OP2:
case MachineCombinerPattern::FNMULSUBS_OP1:
case MachineCombinerPattern::FNMULSUBD_OP1:
case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
case MachineCombinerPattern::FMLAv2f32_OP2:
case MachineCombinerPattern::FMLAv2f32_OP1:
case MachineCombinerPattern::FMLAv2f64_OP1:
case MachineCombinerPattern::FMLAv2f64_OP2:
case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
case MachineCombinerPattern::FMLAv4f32_OP1:
case MachineCombinerPattern::FMLAv4f32_OP2:
case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
case MachineCombinerPattern::FMLSv2f32_OP2:
case MachineCombinerPattern::FMLSv2f64_OP2:
case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
case MachineCombinerPattern::FMLSv4f32_OP2:
return true;
} // end switch (Pattern)
return false;
}
/// Return true when there is potentially a faster code sequence for an
/// instruction chain ending in \p Root. All potential patterns are listed in
/// the \p Pattern vector. Pattern should be sorted in priority order since the
/// pattern evaluator stops checking as soon as it finds a faster sequence.
bool AArch64InstrInfo::getMachineCombinerPatterns(
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
// Integer patterns
if (getMaddPatterns(Root, Patterns))
return true;
// Floating point patterns
if (getFMAPatterns(Root, Patterns))
return true;
return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
}
enum class FMAInstKind { Default, Indexed, Accumulator };
/// genFusedMultiply - Generate fused multiply instructions.
/// This function supports both integer and floating point instructions.
/// A typical example:
/// F|MUL I=A,B,0
/// F|ADD R,I,C
/// ==> F|MADD R,A,B,C
/// \param MF Containing MachineFunction
/// \param MRI Register information
/// \param TII Target information
/// \param Root is the F|ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
/// the F|MUL. In the example above IdxMulOpd is 1.
/// \param MaddOpc the opcode fo the f|madd instruction
/// \param RC Register class of operands
/// \param kind of fma instruction (addressing mode) to be generated
/// \param ReplacedAddend is the result register from the instruction
/// replacing the non-combined operand, if any.
static MachineInstr *
genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
unsigned MaddOpc, const TargetRegisterClass *RC,
FMAInstKind kind = FMAInstKind::Default,
const unsigned *ReplacedAddend = nullptr) {
assert(IdxMulOpd == 1 || IdxMulOpd == 2);
unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
unsigned ResultReg = Root.getOperand(0).getReg();
unsigned SrcReg0 = MUL->getOperand(1).getReg();
bool Src0IsKill = MUL->getOperand(1).isKill();
unsigned SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill();
unsigned SrcReg2;
bool Src2IsKill;
if (ReplacedAddend) {
// If we just generated a new addend, we must be it's only use.
SrcReg2 = *ReplacedAddend;
Src2IsKill = true;
} else {
SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
}
if (TargetRegisterInfo::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
MRI.constrainRegClass(SrcReg2, RC);
MachineInstrBuilder MIB;
if (kind == FMAInstKind::Default)
MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
.addReg(SrcReg0, getKillRegState(Src0IsKill))
.addReg(SrcReg1, getKillRegState(Src1IsKill))
.addReg(SrcReg2, getKillRegState(Src2IsKill));
else if (kind == FMAInstKind::Indexed)
MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
.addReg(SrcReg2, getKillRegState(Src2IsKill))
.addReg(SrcReg0, getKillRegState(Src0IsKill))
.addReg(SrcReg1, getKillRegState(Src1IsKill))
.addImm(MUL->getOperand(3).getImm());
else if (kind == FMAInstKind::Accumulator)
MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
.addReg(SrcReg2, getKillRegState(Src2IsKill))
.addReg(SrcReg0, getKillRegState(Src0IsKill))
.addReg(SrcReg1, getKillRegState(Src1IsKill));
else
assert(false && "Invalid FMA instruction kind \n");
// Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
InsInstrs.push_back(MIB);
return MUL;
}
/// genMaddR - Generate madd instruction and combine mul and add using
/// an extra virtual register
/// Example - an ADD intermediate needs to be stored in a register:
/// MUL I=A,B,0
/// ADD R,I,Imm
/// ==> ORR V, ZR, Imm
/// ==> MADD R,A,B,V
/// \param MF Containing MachineFunction
/// \param MRI Register information
/// \param TII Target information
/// \param Root is the ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
/// the MUL. In the example above IdxMulOpd is 1.
/// \param MaddOpc the opcode fo the madd instruction
/// \param VR is a virtual register that holds the value of an ADD operand
/// (V in the example above).
/// \param RC Register class of operands
static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
const TargetRegisterClass *RC) {
assert(IdxMulOpd == 1 || IdxMulOpd == 2);
MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
unsigned ResultReg = Root.getOperand(0).getReg();
unsigned SrcReg0 = MUL->getOperand(1).getReg();
bool Src0IsKill = MUL->getOperand(1).isKill();
unsigned SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill();
if (TargetRegisterInfo::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
if (TargetRegisterInfo::isVirtualRegister(VR))
MRI.constrainRegClass(VR, RC);
MachineInstrBuilder MIB =
BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
.addReg(SrcReg0, getKillRegState(Src0IsKill))
.addReg(SrcReg1, getKillRegState(Src1IsKill))
.addReg(VR);
// Insert the MADD
InsInstrs.push_back(MIB);
return MUL;
}
/// When getMachineCombinerPatterns() finds potential patterns,
/// this function generates the instructions that could replace the
/// original code sequence
void AArch64InstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
MachineBasicBlock &MBB = *Root.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
MachineInstr *MUL;
const TargetRegisterClass *RC;
unsigned Opc;
switch (Pattern) {
default:
// Reassociate instructions.
TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
DelInstrs, InstrIdxForVirtReg);
return;
case MachineCombinerPattern::MULADDW_OP1:
case MachineCombinerPattern::MULADDX_OP1:
// MUL I=A,B,0
// ADD R,I,C
// ==> MADD R,A,B,C
// --- Create(MADD);
if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
Opc = AArch64::MADDWrrr;
RC = &AArch64::GPR32RegClass;
} else {
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
break;
case MachineCombinerPattern::MULADDW_OP2:
case MachineCombinerPattern::MULADDX_OP2:
// MUL I=A,B,0
// ADD R,C,I
// ==> MADD R,A,B,C
// --- Create(MADD);
if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
Opc = AArch64::MADDWrrr;
RC = &AArch64::GPR32RegClass;
} else {
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
case MachineCombinerPattern::MULADDWI_OP1:
case MachineCombinerPattern::MULADDXI_OP1: {
// MUL I=A,B,0
// ADD R,I,Imm
// ==> ORR V, ZR, Imm
// ==> MADD R,A,B,V
// --- Create(MADD);
const TargetRegisterClass *OrrRC;
unsigned BitSize, OrrOpc, ZeroReg;
if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
OrrOpc = AArch64::ORRWri;
OrrRC = &AArch64::GPR32spRegClass;
BitSize = 32;
ZeroReg = AArch64::WZR;
Opc = AArch64::MADDWrrr;
RC = &AArch64::GPR32RegClass;
} else {
OrrOpc = AArch64::ORRXri;
OrrRC = &AArch64::GPR64spRegClass;
BitSize = 64;
ZeroReg = AArch64::XZR;
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
unsigned NewVR = MRI.createVirtualRegister(OrrRC);
uint64_t Imm = Root.getOperand(2).getImm();
if (Root.getOperand(3).isImm()) {
unsigned Val = Root.getOperand(3).getImm();
Imm = Imm << Val;
}
uint64_t UImm = SignExtend64(Imm, BitSize);
uint64_t Encoding;
if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
.addReg(ZeroReg)
.addImm(Encoding);
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
}
break;
}
case MachineCombinerPattern::MULSUBW_OP1:
case MachineCombinerPattern::MULSUBX_OP1: {
// MUL I=A,B,0
// SUB R,I, C
// ==> SUB V, 0, C
// ==> MADD R,A,B,V // = -C + A*B
// --- Create(MADD);
const TargetRegisterClass *SubRC;
unsigned SubOpc, ZeroReg;
if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
SubOpc = AArch64::SUBWrr;
SubRC = &AArch64::GPR32spRegClass;
ZeroReg = AArch64::WZR;
Opc = AArch64::MADDWrrr;
RC = &AArch64::GPR32RegClass;
} else {
SubOpc = AArch64::SUBXrr;
SubRC = &AArch64::GPR64spRegClass;
ZeroReg = AArch64::XZR;
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
unsigned NewVR = MRI.createVirtualRegister(SubRC);
// SUB NewVR, 0, C
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
.addReg(ZeroReg)
.add(Root.getOperand(2));
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
break;
}
case MachineCombinerPattern::MULSUBW_OP2:
case MachineCombinerPattern::MULSUBX_OP2:
// MUL I=A,B,0
// SUB R,C,I
// ==> MSUB R,A,B,C (computes C - A*B)
// --- Create(MSUB);
if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
Opc = AArch64::MSUBWrrr;
RC = &AArch64::GPR32RegClass;
} else {
Opc = AArch64::MSUBXrrr;
RC = &AArch64::GPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
case MachineCombinerPattern::MULSUBWI_OP1:
case MachineCombinerPattern::MULSUBXI_OP1: {
// MUL I=A,B,0
// SUB R,I, Imm
// ==> ORR V, ZR, -Imm
// ==> MADD R,A,B,V // = -Imm + A*B
// --- Create(MADD);
const TargetRegisterClass *OrrRC;
unsigned BitSize, OrrOpc, ZeroReg;
if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
OrrOpc = AArch64::ORRWri;
OrrRC = &AArch64::GPR32spRegClass;
BitSize = 32;
ZeroReg = AArch64::WZR;
Opc = AArch64::MADDWrrr;
RC = &AArch64::GPR32RegClass;
} else {
OrrOpc = AArch64::ORRXri;
OrrRC = &AArch64::GPR64spRegClass;
BitSize = 64;
ZeroReg = AArch64::XZR;
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
unsigned NewVR = MRI.createVirtualRegister(OrrRC);
uint64_t Imm = Root.getOperand(2).getImm();
if (Root.getOperand(3).isImm()) {
unsigned Val = Root.getOperand(3).getImm();
Imm = Imm << Val;
}
uint64_t UImm = SignExtend64(-Imm, BitSize);
uint64_t Encoding;
if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
.addReg(ZeroReg)
.addImm(Encoding);
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
}
break;
}
// Floating Point Support
case MachineCombinerPattern::FMULADDS_OP1:
case MachineCombinerPattern::FMULADDD_OP1:
// MUL I=A,B,0
// ADD R,I,C
// ==> MADD R,A,B,C
// --- Create(MADD);
if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
Opc = AArch64::FMADDSrrr;
RC = &AArch64::FPR32RegClass;
} else {
Opc = AArch64::FMADDDrrr;
RC = &AArch64::FPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
break;
case MachineCombinerPattern::FMULADDS_OP2:
case MachineCombinerPattern::FMULADDD_OP2:
// FMUL I=A,B,0
// FADD R,C,I
// ==> FMADD R,A,B,C
// --- Create(FMADD);
if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
Opc = AArch64::FMADDSrrr;
RC = &AArch64::FPR32RegClass;
} else {
Opc = AArch64::FMADDDrrr;
RC = &AArch64::FPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
Opc = AArch64::FMLAv1i32_indexed;
RC = &AArch64::FPR32RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
Opc = AArch64::FMLAv1i32_indexed;
RC = &AArch64::FPR32RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
Opc = AArch64::FMLAv1i64_indexed;
RC = &AArch64::FPR64RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
Opc = AArch64::FMLAv1i64_indexed;
RC = &AArch64::FPR64RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
case MachineCombinerPattern::FMLAv2f32_OP1:
RC = &AArch64::FPR64RegClass;
if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
Opc = AArch64::FMLAv2i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv2f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
case MachineCombinerPattern::FMLAv2f32_OP2:
RC = &AArch64::FPR64RegClass;
if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
Opc = AArch64::FMLAv2i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv2f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
case MachineCombinerPattern::FMLAv2f64_OP1:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
Opc = AArch64::FMLAv2i64_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv2f64;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
case MachineCombinerPattern::FMLAv2f64_OP2:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
Opc = AArch64::FMLAv2i64_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv2f64;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
case MachineCombinerPattern::FMLAv4f32_OP1:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
Opc = AArch64::FMLAv4i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv4f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
case MachineCombinerPattern::FMLAv4f32_OP2:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
Opc = AArch64::FMLAv4i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLAv4f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMULSUBS_OP1:
case MachineCombinerPattern::FMULSUBD_OP1: {
// FMUL I=A,B,0
// FSUB R,I,C
// ==> FNMSUB R,A,B,C // = -C + A*B
// --- Create(FNMSUB);
if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
Opc = AArch64::FNMSUBSrrr;
RC = &AArch64::FPR32RegClass;
} else {
Opc = AArch64::FNMSUBDrrr;
RC = &AArch64::FPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
break;
}
case MachineCombinerPattern::FNMULSUBS_OP1:
case MachineCombinerPattern::FNMULSUBD_OP1: {
// FNMUL I=A,B,0
// FSUB R,I,C
// ==> FNMADD R,A,B,C // = -A*B - C
// --- Create(FNMADD);
if (Pattern == MachineCombinerPattern::FNMULSUBS_OP1) {
Opc = AArch64::FNMADDSrrr;
RC = &AArch64::FPR32RegClass;
} else {
Opc = AArch64::FNMADDDrrr;
RC = &AArch64::FPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
break;
}
case MachineCombinerPattern::FMULSUBS_OP2:
case MachineCombinerPattern::FMULSUBD_OP2: {
// FMUL I=A,B,0
// FSUB R,C,I
// ==> FMSUB R,A,B,C (computes C - A*B)
// --- Create(FMSUB);
if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
Opc = AArch64::FMSUBSrrr;
RC = &AArch64::FPR32RegClass;
} else {
Opc = AArch64::FMSUBDrrr;
RC = &AArch64::FPR64RegClass;
}
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
}
case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
Opc = AArch64::FMLSv1i32_indexed;
RC = &AArch64::FPR32RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
Opc = AArch64::FMLSv1i64_indexed;
RC = &AArch64::FPR64RegClass;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
break;
case MachineCombinerPattern::FMLSv2f32_OP2:
case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
RC = &AArch64::FPR64RegClass;
if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
Opc = AArch64::FMLSv2i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLSv2f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLSv2f64_OP2:
case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
Opc = AArch64::FMLSv2i64_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLSv2f64;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLSv4f32_OP2:
case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
RC = &AArch64::FPR128RegClass;
if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
Opc = AArch64::FMLSv4i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Indexed);
} else {
Opc = AArch64::FMLSv4f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
FMAInstKind::Accumulator);
}
break;
case MachineCombinerPattern::FMLSv2f32_OP1:
case MachineCombinerPattern::FMLSv2i32_indexed_OP1: {
RC = &AArch64::FPR64RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR)
.add(Root.getOperand(2));
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP1) {
Opc = AArch64::FMLAv2i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed, &NewVR);
} else {
Opc = AArch64::FMLAv2f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator, &NewVR);
}
break;
}
case MachineCombinerPattern::FMLSv4f32_OP1:
case MachineCombinerPattern::FMLSv4i32_indexed_OP1: {
RC = &AArch64::FPR128RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR)
.add(Root.getOperand(2));
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP1) {
Opc = AArch64::FMLAv4i32_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed, &NewVR);
} else {
Opc = AArch64::FMLAv4f32;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator, &NewVR);
}
break;
}
case MachineCombinerPattern::FMLSv2f64_OP1:
case MachineCombinerPattern::FMLSv2i64_indexed_OP1: {
RC = &AArch64::FPR128RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR)
.add(Root.getOperand(2));
InsInstrs.push_back(MIB1);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP1) {
Opc = AArch64::FMLAv2i64_indexed;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Indexed, &NewVR);
} else {
Opc = AArch64::FMLAv2f64;
MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
FMAInstKind::Accumulator, &NewVR);
}
break;
}
} // end switch (Pattern)
// Record MUL and ADD/SUB for deletion
DelInstrs.push_back(MUL);
DelInstrs.push_back(&Root);
}
/// Replace csincr-branch sequence by simple conditional branch
///
/// Examples:
/// 1. \code
/// csinc w9, wzr, wzr, <condition code>
/// tbnz w9, #0, 0x44
/// \endcode
/// to
/// \code
/// b.<inverted condition code>
/// \endcode
///
/// 2. \code
/// csinc w9, wzr, wzr, <condition code>
/// tbz w9, #0, 0x44
/// \endcode
/// to
/// \code
/// b.<condition code>
/// \endcode
///
/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
/// compare's constant operand is power of 2.
///
/// Examples:
/// \code
/// and w8, w8, #0x400
/// cbnz w8, L1
/// \endcode
/// to
/// \code
/// tbnz w8, #10, L1
/// \endcode
///
/// \param MI Conditional Branch
/// \return True when the simple conditional branch is generated
///
bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
bool IsNegativeBranch = false;
bool IsTestAndBranch = false;
unsigned TargetBBInMI = 0;
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unknown branch instruction?");
case AArch64::Bcc:
return false;
case AArch64::CBZW:
case AArch64::CBZX:
TargetBBInMI = 1;
break;
case AArch64::CBNZW:
case AArch64::CBNZX:
TargetBBInMI = 1;
IsNegativeBranch = true;
break;
case AArch64::TBZW:
case AArch64::TBZX:
TargetBBInMI = 2;
IsTestAndBranch = true;
break;
case AArch64::TBNZW:
case AArch64::TBNZX:
TargetBBInMI = 2;
IsNegativeBranch = true;
IsTestAndBranch = true;
break;
}
// So we increment a zero register and test for bits other
// than bit 0? Conservatively bail out in case the verifier
// missed this case.
if (IsTestAndBranch && MI.getOperand(1).getImm())
return false;
// Find Definition.
assert(MI.getParent() && "Incomplete machine instruciton\n");
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
unsigned VReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(VReg))
return false;
MachineInstr *DefMI = MRI->getVRegDef(VReg);
// Look through COPY instructions to find definition.
while (DefMI->isCopy()) {
unsigned CopyVReg = DefMI->getOperand(1).getReg();
if (!MRI->hasOneNonDBGUse(CopyVReg))
return false;
if (!MRI->hasOneDef(CopyVReg))
return false;
DefMI = MRI->getVRegDef(CopyVReg);
}
switch (DefMI->getOpcode()) {
default:
return false;
// Fold AND into a TBZ/TBNZ if constant operand is power of 2.
case AArch64::ANDWri:
case AArch64::ANDXri: {
if (IsTestAndBranch)
return false;
if (DefMI->getParent() != MBB)
return false;
if (!MRI->hasOneNonDBGUse(VReg))
return false;
bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
if (!isPowerOf2_64(Mask))
return false;
MachineOperand &MO = DefMI->getOperand(1);
unsigned NewReg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(NewReg))
return false;
assert(!MRI->def_empty(NewReg) && "Register must be defined.");
MachineBasicBlock &RefToMBB = *MBB;
MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
DebugLoc DL = MI.getDebugLoc();
unsigned Imm = Log2_64(Mask);
unsigned Opc = (Imm < 32)
? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
: (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
.addReg(NewReg)
.addImm(Imm)
.addMBB(TBB);
// Register lives on to the CBZ now.
MO.setIsKill(false);
// For immediate smaller than 32, we need to use the 32-bit
// variant (W) in all cases. Indeed the 64-bit variant does not
// allow to encode them.
// Therefore, if the input register is 64-bit, we need to take the
// 32-bit sub-part.
if (!Is32Bit && Imm < 32)
NewMI->getOperand(0).setSubReg(AArch64::sub_32);
MI.eraseFromParent();
return true;
}
// Look for CSINC
case AArch64::CSINCWr:
case AArch64::CSINCXr: {
if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
DefMI->getOperand(2).getReg() == AArch64::WZR) &&
!(DefMI->getOperand(1).getReg() == AArch64::XZR &&
DefMI->getOperand(2).getReg() == AArch64::XZR))
return false;
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
return false;
AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
// Convert only when the condition code is not modified between
// the CSINC and the branch. The CC may be used by other
// instructions in between.
if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
return false;
MachineBasicBlock &RefToMBB = *MBB;
MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
DebugLoc DL = MI.getDebugLoc();
if (IsNegativeBranch)
CC = AArch64CC::getInvertedCondCode(CC);
BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
MI.eraseFromParent();
return true;
}
}
}
std::pair<unsigned, unsigned>
AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
const unsigned Mask = AArch64II::MO_FRAGMENT;
return std::make_pair(TF & Mask, TF & ~Mask);
}
ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
using namespace AArch64II;
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"},
{MO_G3, "aarch64-g3"}, {MO_G2, "aarch64-g2"},
{MO_G1, "aarch64-g1"}, {MO_G0, "aarch64-g0"},
{MO_HI12, "aarch64-hi12"}};
return makeArrayRef(TargetFlags);
}
ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
using namespace AArch64II;
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_COFFSTUB, "aarch64-coffstub"},
{MO_GOT, "aarch64-got"}, {MO_NC, "aarch64-nc"},
{MO_S, "aarch64-s"}, {MO_TLS, "aarch64-tls"},
{MO_DLLIMPORT, "aarch64-dllimport"}};
return makeArrayRef(TargetFlags);
}
ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
{{MOSuppressPair, "aarch64-suppress-pair"},
{MOStridedAccess, "aarch64-strided-access"}};
return makeArrayRef(TargetFlags);
}
/// Constants defining how certain sequences should be outlined.
/// This encompasses how an outlined function should be called, and what kind of
/// frame should be emitted for that outlined function.
///
/// \p MachineOutlinerDefault implies that the function should be called with
/// a save and restore of LR to the stack.
///
/// That is,
///
/// I1 Save LR OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION I1
/// I3 Restore LR I2
/// I3
/// RET
///
/// * Call construction overhead: 3 (save + BL + restore)
/// * Frame construction overhead: 1 (ret)
/// * Requires stack fixups? Yes
///
/// \p MachineOutlinerTailCall implies that the function is being created from
/// a sequence of instructions ending in a return.
///
/// That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> B OUTLINED_FUNCTION I1
/// RET I2
/// RET
///
/// * Call construction overhead: 1 (B)
/// * Frame construction overhead: 0 (Return included in sequence)
/// * Requires stack fixups? No
///
/// \p MachineOutlinerNoLRSave implies that the function should be called using
/// a BL instruction, but doesn't require LR to be saved and restored. This
/// happens when LR is known to be dead.
///
/// That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION I1
/// I3 I2
/// I3
/// RET
///
/// * Call construction overhead: 1 (BL)
/// * Frame construction overhead: 1 (RET)
/// * Requires stack fixups? No
///
/// \p MachineOutlinerThunk implies that the function is being created from
/// a sequence of instructions ending in a call. The outlined function is
/// called with a BL instruction, and the outlined function tail-calls the
/// original call destination.
///
/// That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION I1
/// BL f I2
/// B f
/// * Call construction overhead: 1 (BL)
/// * Frame construction overhead: 0
/// * Requires stack fixups? No
///
/// \p MachineOutlinerRegSave implies that the function should be called with a
/// save and restore of LR to an available register. This allows us to avoid
/// stack fixups. Note that this outlining variant is compatible with the
/// NoLRSave case.
///
/// That is,
///
/// I1 Save LR OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION I1
/// I3 Restore LR I2
/// I3
/// RET
///
/// * Call construction overhead: 3 (save + BL + restore)
/// * Frame construction overhead: 1 (ret)
/// * Requires stack fixups? No
enum MachineOutlinerClass {
MachineOutlinerDefault, /// Emit a save, restore, call, and return.
MachineOutlinerTailCall, /// Only emit a branch.
MachineOutlinerNoLRSave, /// Emit a call and return.
MachineOutlinerThunk, /// Emit a call and tail-call.
MachineOutlinerRegSave /// Same as default, but save to a register.
};
enum MachineOutlinerMBBFlags {
LRUnavailableSomewhere = 0x2,
HasCalls = 0x4,
UnsafeRegsDead = 0x8
};
unsigned
AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
assert(C.LRUWasSet && "LRU wasn't set?");
MachineFunction *MF = C.getMF();
const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
MF->getSubtarget().getRegisterInfo());
// Check if there is an available register across the sequence that we can
// use.
for (unsigned Reg : AArch64::GPR64RegClass) {
if (!ARI->isReservedReg(*MF, Reg) &&
Reg != AArch64::LR && // LR is not reserved, but don't use it.
Reg != AArch64::X16 && // X16 is not guaranteed to be preserved.
Reg != AArch64::X17 && // Ditto for X17.
C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
return Reg;
}
// No suitable register. Return 0.
return 0u;
}
outliner::OutlinedFunction
AArch64InstrInfo::getOutliningCandidateInfo(
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
unsigned SequenceSize =
std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
[this](unsigned Sum, const MachineInstr &MI) {
return Sum + getInstSizeInBytes(MI);
});
// Properties about candidate MBBs that hold for all of them.
unsigned FlagsSetInAll = 0xF;
// Compute liveness information for each candidate, and set FlagsSetInAll.
const TargetRegisterInfo &TRI = getRegisterInfo();
std::for_each(RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
[&FlagsSetInAll](outliner::Candidate &C) {
FlagsSetInAll &= C.Flags;
});
// According to the AArch64 Procedure Call Standard, the following are
// undefined on entry/exit from a function call:
//
// * Registers x16, x17, (and thus w16, w17)
// * Condition codes (and thus the NZCV register)
//
// Because if this, we can't outline any sequence of instructions where
// one
// of these registers is live into/across it. Thus, we need to delete
// those
// candidates.
auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
// If the unsafe registers in this block are all dead, then we don't need
// to compute liveness here.
if (C.Flags & UnsafeRegsDead)
return false;
C.initLRU(TRI);
LiveRegUnits LRU = C.LRU;
return (!LRU.available(AArch64::W16) || !LRU.available(AArch64::W17) ||
!LRU.available(AArch64::NZCV));
};
// Are there any candidates where those registers are live?
if (!(FlagsSetInAll & UnsafeRegsDead)) {
// Erase every candidate that violates the restrictions above. (It could be
// true that we have viable candidates, so it's not worth bailing out in
// the case that, say, 1 out of 20 candidates violate the restructions.)
RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
RepeatedSequenceLocs.end(),
CantGuaranteeValueAcrossCall),
RepeatedSequenceLocs.end());
// If the sequence doesn't have enough candidates left, then we're done.
if (RepeatedSequenceLocs.size() < 2)
return outliner::OutlinedFunction();
}
// At this point, we have only "safe" candidates to outline. Figure out
// frame + call instruction information.
unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
// Helper lambda which sets call information for every candidate.
auto SetCandidateCallInfo =
[&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
for (outliner::Candidate &C : RepeatedSequenceLocs)
C.setCallInfo(CallID, NumBytesForCall);
};
unsigned FrameID = MachineOutlinerDefault;
unsigned NumBytesToCreateFrame = 4;
bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) {
return C.getMF()->getFunction().hasFnAttribute("branch-target-enforcement");
});
// Returns true if an instructions is safe to fix up, false otherwise.
auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) {
if (MI.isCall())
return true;
if (!MI.modifiesRegister(AArch64::SP, &TRI) &&
!MI.readsRegister(AArch64::SP, &TRI))
return true;
// Any modification of SP will break our code to save/restore LR.
// FIXME: We could handle some instructions which add a constant
// offset to SP, with a bit more work.
if (MI.modifiesRegister(AArch64::SP, &TRI))
return false;
// At this point, we have a stack instruction that we might need to
// fix up. We'll handle it if it's a load or store.
if (MI.mayLoadOrStore()) {
MachineOperand *Base; // Filled with the base operand of MI.
int64_t Offset; // Filled with the offset of MI.
// Does it allow us to offset the base operand and is the base the
// register SP?
if (!getMemOperandWithOffset(MI, Base, Offset, &TRI) || !Base->isReg() ||
Base->getReg() != AArch64::SP)
return false;
// Find the minimum/maximum offset for this instruction and check
// if fixing it up would be in range.
int64_t MinOffset,
MaxOffset; // Unscaled offsets for the instruction.
unsigned Scale; // The scale to multiply the offsets by.
unsigned DummyWidth;
getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
Offset += 16; // Update the offset to what it would be if we outlined.
if (Offset < MinOffset * Scale || Offset > MaxOffset * Scale)
return false;
// It's in range, so we can outline it.
return true;
}
// FIXME: Add handling for instructions like "add x0, sp, #8".
// We can't fix it up, so don't outline it.
return false;
};
// True if it's possible to fix up each stack instruction in this sequence.
// Important for frames/call variants that modify the stack.
bool AllStackInstrsSafe = std::all_of(
FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup);
// If the last instruction in any candidate is a terminator, then we should
// tail call all of the candidates.
if (RepeatedSequenceLocs[0].back()->isTerminator()) {
FrameID = MachineOutlinerTailCall;
NumBytesToCreateFrame = 0;
SetCandidateCallInfo(MachineOutlinerTailCall, 4);
}
else if (LastInstrOpcode == AArch64::BL ||
(LastInstrOpcode == AArch64::BLR && !HasBTI)) {
// FIXME: Do we need to check if the code after this uses the value of LR?
FrameID = MachineOutlinerThunk;
NumBytesToCreateFrame = 0;
SetCandidateCallInfo(MachineOutlinerThunk, 4);
}
else {
// We need to decide how to emit calls + frames. We can always emit the same
// frame if we don't need to save to the stack. If we have to save to the
// stack, then we need a different frame.
unsigned NumBytesNoStackCalls = 0;
std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
for (outliner::Candidate &C : RepeatedSequenceLocs) {
C.initLRU(TRI);
// Is LR available? If so, we don't need a save.
if (C.LRU.available(AArch64::LR)) {
NumBytesNoStackCalls += 4;
C.setCallInfo(MachineOutlinerNoLRSave, 4);
CandidatesWithoutStackFixups.push_back(C);
}
// Is an unused register available? If so, we won't modify the stack, so
// we can outline with the same frame type as those that don't save LR.
else if (findRegisterToSaveLRTo(C)) {
NumBytesNoStackCalls += 12;
C.setCallInfo(MachineOutlinerRegSave, 12);
CandidatesWithoutStackFixups.push_back(C);
}
// Is SP used in the sequence at all? If not, we don't have to modify
// the stack, so we are guaranteed to get the same frame.
else if (C.UsedInSequence.available(AArch64::SP)) {
NumBytesNoStackCalls += 12;
C.setCallInfo(MachineOutlinerDefault, 12);
CandidatesWithoutStackFixups.push_back(C);
}
// If we outline this, we need to modify the stack. Pretend we don't
// outline this by saving all of its bytes.
else {
NumBytesNoStackCalls += SequenceSize;
}
}
// If there are no places where we have to save LR, then note that we
// don't have to update the stack. Otherwise, give every candidate the
// default call type, as long as it's safe to do so.
if (!AllStackInstrsSafe ||
NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) {
RepeatedSequenceLocs = CandidatesWithoutStackFixups;
FrameID = MachineOutlinerNoLRSave;
} else {
SetCandidateCallInfo(MachineOutlinerDefault, 12);
}
// If we dropped all of the candidates, bail out here.
if (RepeatedSequenceLocs.size() < 2) {
RepeatedSequenceLocs.clear();
return outliner::OutlinedFunction();
}
}
// Does every candidate's MBB contain a call? If so, then we might have a call
// in the range.
if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
// Check if the range contains a call. These require a save + restore of the
// link register.
bool ModStackToSaveLR = false;
if (std::any_of(FirstCand.front(), FirstCand.back(),
[](const MachineInstr &MI) { return MI.isCall(); }))
ModStackToSaveLR = true;
// Handle the last instruction separately. If this is a tail call, then the
// last instruction is a call. We don't want to save + restore in this case.
// However, it could be possible that the last instruction is a call without
// it being valid to tail call this sequence. We should consider this as
// well.
else if (FrameID != MachineOutlinerThunk &&
FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
ModStackToSaveLR = true;
if (ModStackToSaveLR) {
// We can't fix up the stack. Bail out.
if (!AllStackInstrsSafe) {
RepeatedSequenceLocs.clear();
return outliner::OutlinedFunction();
}
// Save + restore LR.
NumBytesToCreateFrame += 8;
}
}
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
NumBytesToCreateFrame, FrameID);
}
bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
const Function &F = MF.getFunction();
// Can F be deduplicated by the linker? If it can, don't outline from it.
if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// Don't outline from functions with section markings; the program could
// expect that all the code is in the named section.
// FIXME: Allow outlining from multiple functions with the same section
// marking.
if (F.hasSection())
return false;
// Outlining from functions with redzones is unsafe since the outliner may
// modify the stack. Check if hasRedZone is true or unknown; if yes, don't
// outline from it.
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
if (!AFI || AFI->hasRedZone().getValueOr(true))
return false;
// It's safe to outline from MF.
return true;
}
bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
unsigned &Flags) const {
// Check if LR is available through all of the MBB. If it's not, then set
// a flag.
assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
"Suitable Machine Function for outlining must track liveness");
LiveRegUnits LRU(getRegisterInfo());
std::for_each(MBB.rbegin(), MBB.rend(),
[&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
// Check if each of the unsafe registers are available...
bool W16AvailableInBlock = LRU.available(AArch64::W16);
bool W17AvailableInBlock = LRU.available(AArch64::W17);
bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV);
// If all of these are dead (and not live out), we know we don't have to check
// them later.
if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock)
Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
// Now, add the live outs to the set.
LRU.addLiveOuts(MBB);
// If any of these registers is available in the MBB, but also a live out of
// the block, then we know outlining is unsafe.
if (W16AvailableInBlock && !LRU.available(AArch64::W16))
return false;
if (W17AvailableInBlock && !LRU.available(AArch64::W17))
return false;
if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV))
return false;
// Check if there's a call inside this MachineBasicBlock. If there is, then
// set a flag.
if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
Flags |= MachineOutlinerMBBFlags::HasCalls;
MachineFunction *MF = MBB.getParent();
// In the event that we outline, we may have to save LR. If there is an
// available register in the MBB, then we'll always save LR there. Check if
// this is true.
bool CanSaveLR = false;
const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
MF->getSubtarget().getRegisterInfo());
// Check if there is an available register across the sequence that we can
// use.
for (unsigned Reg : AArch64::GPR64RegClass) {
if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR &&
Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) {
CanSaveLR = true;
break;
}
}
// Check if we have a register we can save LR to, and if LR was used
// somewhere. If both of those things are true, then we need to evaluate the
// safety of outlining stack instructions later.
if (!CanSaveLR && !LRU.available(AArch64::LR))
Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
return true;
}
outliner::InstrType
AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
unsigned Flags) const {
MachineInstr &MI = *MIT;
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
// Don't outline LOHs.
if (FuncInfo->getLOHRelated().count(&MI))
return outliner::InstrType::Illegal;
// Don't allow debug values to impact outlining type.
if (MI.isDebugInstr() || MI.isIndirectDebugValue())
return outliner::InstrType::Invisible;
// At this point, KILL instructions don't really tell us much so we can go
// ahead and skip over them.
if (MI.isKill())
return outliner::InstrType::Invisible;
// Is this a terminator for a basic block?
if (MI.isTerminator()) {
// Is this the end of a function?
if (MI.getParent()->succ_empty())
return outliner::InstrType::Legal;
// It's not, so don't outline it.
return outliner::InstrType::Illegal;
}
// Make sure none of the operands are un-outlinable.
for (const MachineOperand &MOP : MI.operands()) {
if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
MOP.isTargetIndex())
return outliner::InstrType::Illegal;
// If it uses LR or W30 explicitly, then don't touch it.
if (MOP.isReg() && !MOP.isImplicit() &&
(MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30))
return outliner::InstrType::Illegal;
}
// Special cases for instructions that can always be outlined, but will fail
// the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
// be outlined because they don't require a *specific* value to be in LR.
if (MI.getOpcode() == AArch64::ADRP)
return outliner::InstrType::Legal;
// If MI is a call we might be able to outline it. We don't want to outline
// any calls that rely on the position of items on the stack. When we outline
// something containing a call, we have to emit a save and restore of LR in
// the outlined function. Currently, this always happens by saving LR to the
// stack. Thus, if we outline, say, half the parameters for a function call
// plus the call, then we'll break the callee's expectations for the layout
// of the stack.
//
// FIXME: Allow calls to functions which construct a stack frame, as long
// as they don't access arguments on the stack.
// FIXME: Figure out some way to analyze functions defined in other modules.
// We should be able to compute the memory usage based on the IR calling
// convention, even if we can't see the definition.
if (MI.isCall()) {
// Get the function associated with the call. Look at each operand and find
// the one that represents the callee and get its name.
const Function *Callee = nullptr;
for (const MachineOperand &MOP : MI.operands()) {
if (MOP.isGlobal()) {
Callee = dyn_cast<Function>(MOP.getGlobal());
break;
}
}
// Never outline calls to mcount. There isn't any rule that would require
// this, but the Linux kernel's "ftrace" feature depends on it.
if (Callee && Callee->getName() == "\01_mcount")
return outliner::InstrType::Illegal;
// If we don't know anything about the callee, assume it depends on the
// stack layout of the caller. In that case, it's only legal to outline
// as a tail-call. Whitelist the call instructions we know about so we
// don't get unexpected results with call pseudo-instructions.
auto UnknownCallOutlineType = outliner::InstrType::Illegal;
if (MI.getOpcode() == AArch64::BLR || MI.getOpcode() == AArch64::BL)
UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
if (!Callee)
return UnknownCallOutlineType;
// We have a function we have information about. Check it if it's something
// can safely outline.
MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
// We don't know what's going on with the callee at all. Don't touch it.
if (!CalleeMF)
return UnknownCallOutlineType;
// Check if we know anything about the callee saves on the function. If we
// don't, then don't touch it, since that implies that we haven't
// computed anything about its stack frame yet.
MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
MFI.getNumObjects() > 0)
return UnknownCallOutlineType;
// At this point, we can say that CalleeMF ought to not pass anything on the
// stack. Therefore, we can outline it.
return outliner::InstrType::Legal;
}
// Don't outline positions.
if (MI.isPosition())
return outliner::InstrType::Illegal;
// Don't touch the link register or W30.
if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) ||
MI.modifiesRegister(AArch64::W30, &getRegisterInfo()))
return outliner::InstrType::Illegal;
return outliner::InstrType::Legal;
}
void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
for (MachineInstr &MI : MBB) {
MachineOperand *Base;
unsigned Width;
int64_t Offset;
// Is this a load or store with an immediate offset with SP as the base?
if (!MI.mayLoadOrStore() ||
!getMemOperandWithOffsetWidth(MI, Base, Offset, Width, &RI) ||
(Base->isReg() && Base->getReg() != AArch64::SP))
continue;
// It is, so we have to fix it up.
unsigned Scale;
int64_t Dummy1, Dummy2;
MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
assert(Scale != 0 && "Unexpected opcode!");
// We've pushed the return address to the stack, so add 16 to the offset.
// This is safe, since we already checked if it would overflow when we
// checked if this instruction was legal to outline.
int64_t NewImm = (Offset + 16) / Scale;
StackOffsetOperand.setImm(NewImm);
}
}
void AArch64InstrInfo::buildOutlinedFrame(
MachineBasicBlock &MBB, MachineFunction &MF,
const outliner::OutlinedFunction &OF) const {
// For thunk outlining, rewrite the last instruction from a call to a
// tail-call.
if (OF.FrameConstructionID == MachineOutlinerThunk) {
MachineInstr *Call = &*--MBB.instr_end();
unsigned TailOpcode;
if (Call->getOpcode() == AArch64::BL) {
TailOpcode = AArch64::TCRETURNdi;
} else {
assert(Call->getOpcode() == AArch64::BLR);
TailOpcode = AArch64::TCRETURNriALL;
}
MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode))
.add(Call->getOperand(0))
.addImm(0);
MBB.insert(MBB.end(), TC);
Call->eraseFromParent();
}
// Is there a call in the outlined range?
auto IsNonTailCall = [](MachineInstr &MI) {
return MI.isCall() && !MI.isReturn();
};
if (std::any_of(MBB.instr_begin(), MBB.instr_end(), IsNonTailCall)) {
// Fix up the instructions in the range, since we're going to modify the
// stack.
assert(OF.FrameConstructionID != MachineOutlinerDefault &&
"Can only fix up stack references once");
fixupPostOutline(MBB);
// LR has to be a live in so that we can save it.
MBB.addLiveIn(AArch64::LR);
MachineBasicBlock::iterator It = MBB.begin();
MachineBasicBlock::iterator Et = MBB.end();
if (OF.FrameConstructionID == MachineOutlinerTailCall ||
OF.FrameConstructionID == MachineOutlinerThunk)
Et = std::prev(MBB.end());
// Insert a save before the outlined region
MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
.addReg(AArch64::SP, RegState::Define)
.addReg(AArch64::LR)
.addReg(AArch64::SP)
.addImm(-16);
It = MBB.insert(It, STRXpre);
const TargetSubtargetInfo &STI = MF.getSubtarget();
const MCRegisterInfo *MRI = STI.getRegisterInfo();
unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true);
// Add a CFI saying the stack was moved 16 B down.
int64_t StackPosEntry =
MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 16));
BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
.addCFIIndex(StackPosEntry)
.setMIFlags(MachineInstr::FrameSetup);
// Add a CFI saying that the LR that we want to find is now 16 B higher than
// before.
int64_t LRPosEntry =
MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, 16));
BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
.addCFIIndex(LRPosEntry)
.setMIFlags(MachineInstr::FrameSetup);
// Insert a restore before the terminator for the function.
MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
.addReg(AArch64::SP, RegState::Define)
.addReg(AArch64::LR, RegState::Define)
.addReg(AArch64::SP)
.addImm(16);
Et = MBB.insert(Et, LDRXpost);
}
// If this is a tail call outlined function, then there's already a return.
if (OF.FrameConstructionID == MachineOutlinerTailCall ||
OF.FrameConstructionID == MachineOutlinerThunk)
return;
// It's not a tail call, so we have to insert the return ourselves.
MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
.addReg(AArch64::LR, RegState::Undef);
MBB.insert(MBB.end(), ret);
// Did we have to modify the stack by saving the link register?
if (OF.FrameConstructionID != MachineOutlinerDefault)
return;
// We modified the stack.
// Walk over the basic block and fix up all the stack accesses.
fixupPostOutline(MBB);
}
MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
MachineFunction &MF, const outliner::Candidate &C) const {
// Are we tail calling?
if (C.CallConstructionID == MachineOutlinerTailCall) {
// If yes, then we can just branch to the label.
It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi))
.addGlobalAddress(M.getNamedValue(MF.getName()))
.addImm(0));
return It;
}
// Are we saving the link register?
if (C.CallConstructionID == MachineOutlinerNoLRSave ||
C.CallConstructionID == MachineOutlinerThunk) {
// No, so just insert the call.
It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
.addGlobalAddress(M.getNamedValue(MF.getName())));
return It;
}
// We want to return the spot where we inserted the call.
MachineBasicBlock::iterator CallPt;
// Instructions for saving and restoring LR around the call instruction we're
// going to insert.
MachineInstr *Save;
MachineInstr *Restore;
// Can we save to a register?
if (C.CallConstructionID == MachineOutlinerRegSave) {
// FIXME: This logic should be sunk into a target-specific interface so that
// we don't have to recompute the register.
unsigned Reg = findRegisterToSaveLRTo(C);
assert(Reg != 0 && "No callee-saved register available?");
// Save and restore LR from that register.
Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg)
.addReg(AArch64::XZR)
.addReg(AArch64::LR)
.addImm(0);
Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR)
.addReg(AArch64::XZR)
.addReg(Reg)
.addImm(0);
} else {
// We have the default case. Save and restore from SP.
Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
.addReg(AArch64::SP, RegState::Define)
.addReg(AArch64::LR)
.addReg(AArch64::SP)
.addImm(-16);
Restore = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
.addReg(AArch64::SP, RegState::Define)
.addReg(AArch64::LR, RegState::Define)
.addReg(AArch64::SP)
.addImm(16);
}
It = MBB.insert(It, Save);
It++;
// Insert the call.
It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
.addGlobalAddress(M.getNamedValue(MF.getName())));
CallPt = It;
It++;
It = MBB.insert(It, Restore);
return CallPt;
}
bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
MachineFunction &MF) const {
return MF.getFunction().optForMinSize();
}
#define GET_INSTRINFO_HELPERS
#include "AArch64GenInstrInfo.inc"
Index: vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.h
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.h (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64InstrInfo.h (revision 344166)
@@ -1,366 +1,370 @@
//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the AArch64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "AArch64GenInstrInfo.inc"
namespace llvm {
class AArch64Subtarget;
class AArch64TargetMachine;
static const MachineMemOperand::Flags MOSuppressPair =
MachineMemOperand::MOTargetFlag1;
static const MachineMemOperand::Flags MOStridedAccess =
MachineMemOperand::MOTargetFlag2;
#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
class AArch64InstrInfo final : public AArch64GenInstrInfo {
const AArch64RegisterInfo RI;
const AArch64Subtarget &Subtarget;
public:
explicit AArch64InstrInfo(const AArch64Subtarget &STI);
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
bool isAsCheapAsAMove(const MachineInstr &MI) const override;
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DstReg, unsigned &SubIdx) const override;
bool
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// Does this instruction set its full destination register to zero?
static bool isGPRZero(const MachineInstr &MI);
/// Does this instruction rename a GPR without modifying bits?
static bool isGPRCopy(const MachineInstr &MI);
/// Does this instruction rename an FPR without modifying bits?
static bool isFPRCopy(const MachineInstr &MI);
/// Return true if pairing the given load or store is hinted to be
/// unprofitable.
static bool isLdStPairSuppressed(const MachineInstr &MI);
/// Return true if the given load or store is a strided memory access.
static bool isStridedAccess(const MachineInstr &MI);
/// Return true if this is an unscaled load/store.
static bool isUnscaledLdSt(unsigned Opc);
static bool isUnscaledLdSt(MachineInstr &MI) {
return isUnscaledLdSt(MI.getOpcode());
}
/// Return true if pairing the given load or store may be paired with another.
static bool isPairableLdStInst(const MachineInstr &MI);
/// Return the opcode that set flags when possible. The caller is
/// responsible for ensuring the opc has a flag setting equivalent.
static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
/// Return true if this is a load/store that can be potentially paired/merged.
bool isCandidateToMergeOrPair(MachineInstr &MI) const;
/// Hint that pairing the given load or store is unprofitable.
static void suppressLdStPair(MachineInstr &MI);
bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool getMemOperandWithOffsetWidth(MachineInstr &MI, MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
/// Return the immediate offset of the base register in a load/store \p LdSt.
MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
/// Returns true if opcode \p Opc is a memory operation. If it is, set
/// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
///
/// For unscaled instructions, \p Scale is set to 1.
bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width,
int64_t &MinOffset, int64_t &MaxOffset) const;
bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
unsigned NumLoads) const override;
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc, unsigned Opcode,
llvm::ArrayRef<unsigned> Indices) const;
+ void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc, unsigned Opcode, unsigned ZeroReg,
+ llvm::ArrayRef<unsigned> Indices) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, unsigned SrcReg,
bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, unsigned DestReg,
int FrameIndex, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
// This tells target independent code that it is okay to pass instructions
// with subreg operands to foldMemoryOperandImpl.
bool isSubregFoldable() const override { return true; }
using TargetInstrInfo::foldMemoryOperandImpl;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
/// \returns true if a branch from an instruction with opcode \p BranchOpc
/// bytes is capable of jumping to a position \p BrOffset bytes away.
bool isBranchOffsetInRange(unsigned BranchOpc,
int64_t BrOffset) const override;
MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const override;
unsigned removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved = nullptr) const override;
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded = nullptr) const override;
bool
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int &, int &, int &) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const override;
void getNoop(MCInst &NopInst) const override;
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Convert the instruction supplying the argument to
/// the comparison into one that sets the zero bit in the flags register.
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
bool optimizeCondBranch(MachineInstr &MI) const override;
/// Return true when a code sequence can improve throughput. It
/// should be called only for instructions in loops.
/// \param Pattern - combiner pattern
bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in ``Root``. All potential patterns are
/// listed in the ``Patterns`` array.
bool getMachineCombinerPatterns(
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) const override;
/// Return true when Inst is associative and commutative so that it can be
/// reassociated.
bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
/// When getMachineCombinerPatterns() finds patterns, this function generates
/// the instructions that could replace the original code sequence
void genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
/// AArch64 supports MachineCombiner.
bool useMachineCombiner() const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const override;
ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const override;
ArrayRef<std::pair<unsigned, const char *>>
getSerializableBitmaskMachineOperandTargetFlags() const override;
ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
getSerializableMachineMemOperandTargetFlags() const override;
bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const override;
outliner::OutlinedFunction getOutliningCandidateInfo(
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
outliner::InstrType
getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
unsigned &Flags) const override;
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
const outliner::OutlinedFunction &OF) const override;
MachineBasicBlock::iterator
insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &It, MachineFunction &MF,
const outliner::Candidate &C) const override;
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
/// Returns true if the instruction has a shift by immediate that can be
/// executed in one cycle less.
static bool isFalkorShiftExtFast(const MachineInstr &MI);
/// Return true if the instructions is a SEH instruciton used for unwinding
/// on Windows.
static bool isSEHInstruction(const MachineInstr &MI);
#define GET_INSTRINFO_HELPER_DECLS
#include "AArch64GenInstrInfo.inc"
private:
/// Sets the offsets on outlined instructions in \p MBB which use SP
/// so that they will be valid post-outlining.
///
/// \param MBB A \p MachineBasicBlock in an outlined function.
void fixupPostOutline(MachineBasicBlock &MBB) const;
void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const;
bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
const MachineRegisterInfo *MRI) const;
/// Returns an unused general-purpose register which can be used for
/// constructing an outlined call if one exists. Returns 0 otherwise.
unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const;
};
/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
/// plus Offset. This is intended to be used from within the prolog/epilog
/// insertion (PEI) pass, where a virtual scratch register may be allocated
/// if necessary, to be replaced by the scavenger at the end of PEI.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
int Offset, const TargetInstrInfo *TII,
MachineInstr::MIFlag = MachineInstr::NoFlags,
bool SetNZCV = false, bool NeedsWinCFI = false);
/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
/// FP. Return false if the offset could not be handled directly in MI, and
/// return the left-over portion by reference.
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const AArch64InstrInfo *TII);
/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
enum AArch64FrameOffsetStatus {
AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
};
/// Check if the @p Offset is a valid frame offset for @p MI.
/// The returned value reports the validity of the frame offset for @p MI.
/// It uses the values defined by AArch64FrameOffsetStatus for that.
/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
/// use an offset.eq
/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
/// rewritten in @p MI.
/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
/// amount that is off the limit of the legal offset.
/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
/// If set, @p EmittableOffset contains the amount that can be set in @p MI
/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
/// is a legal offset.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
bool *OutUseUnscaledOp = nullptr,
unsigned *OutUnscaledOp = nullptr,
int *EmittableOffset = nullptr);
static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
static inline bool isCondBranchOpcode(int Opc) {
switch (Opc) {
case AArch64::Bcc:
case AArch64::CBZW:
case AArch64::CBZX:
case AArch64::CBNZW:
case AArch64::CBNZX:
case AArch64::TBZW:
case AArch64::TBZX:
case AArch64::TBNZW:
case AArch64::TBNZX:
return true;
default:
return false;
}
}
static inline bool isIndirectBranchOpcode(int Opc) {
return Opc == AArch64::BR;
}
// struct TSFlags {
#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 1-bit
// }
namespace AArch64 {
enum ElementSizeType {
ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
};
enum DestructiveInstType {
DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
Destructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
};
#undef TSFLAG_ELEMENT_SIZE_TYPE
#undef TSFLAG_DESTRUCTIVE_INST_TYPE
}
} // end namespace llvm
#endif
Index: vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64RegisterInfo.td
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64RegisterInfo.td (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64RegisterInfo.td (revision 344166)
@@ -1,1119 +1,1121 @@
//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
list<string> altNames = []>
: Register<n, altNames> {
let HWEncoding = enc;
let Namespace = "AArch64";
let SubRegs = subregs;
}
let Namespace = "AArch64" in {
def sub_32 : SubRegIndex<32>;
def bsub : SubRegIndex<8>;
def hsub : SubRegIndex<16>;
def ssub : SubRegIndex<32>;
def dsub : SubRegIndex<32>;
def sube32 : SubRegIndex<32>;
def subo32 : SubRegIndex<32>;
def qhisub : SubRegIndex<64>;
def qsub : SubRegIndex<64>;
def sube64 : SubRegIndex<64>;
def subo64 : SubRegIndex<64>;
// SVE
def zsub : SubRegIndex<128>;
// Note: zsub_hi should never be used directly because it represents
// the scalable part of the SVE vector and cannot be manipulated as a
// subvector in the same way the lower 128bits can.
def zsub_hi : SubRegIndex<128>;
// Note: Code depends on these having consecutive numbers
def dsub0 : SubRegIndex<64>;
def dsub1 : SubRegIndex<64>;
def dsub2 : SubRegIndex<64>;
def dsub3 : SubRegIndex<64>;
// Note: Code depends on these having consecutive numbers
def qsub0 : SubRegIndex<128>;
def qsub1 : SubRegIndex<128>;
def qsub2 : SubRegIndex<128>;
def qsub3 : SubRegIndex<128>;
}
let Namespace = "AArch64" in {
def vreg : RegAltNameIndex;
def vlist1 : RegAltNameIndex;
}
//===----------------------------------------------------------------------===//
// Registers
//===----------------------------------------------------------------------===//
def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>;
def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>;
def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>;
def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>;
def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>;
def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>;
def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>;
def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>;
def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>;
def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>;
def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
let SubRegIndices = [sub_32] in {
def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>;
def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>;
def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>;
def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>;
def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>;
def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>;
def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>;
def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>;
def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>;
def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>;
def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>;
def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
}
// Condition code register.
def NZCV : AArch64Reg<0, "nzcv">;
// First fault status register
def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
// GPR register classes with the intersections of GPR32/GPR32sp and
// GPR64/GPR64sp for use by the coalescer.
def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
let AltOrders = [(rotl GPR32common, 8)];
let AltOrderSelect = [{ return 1; }];
}
def GPR64common : RegisterClass<"AArch64", [i64], 64,
(add (sequence "X%u", 0, 28), FP, LR)> {
let AltOrders = [(rotl GPR64common, 8)];
let AltOrderSelect = [{ return 1; }];
}
// GPR register classes which exclude SP/WSP.
def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
let AltOrders = [(rotl GPR32, 8)];
let AltOrderSelect = [{ return 1; }];
}
def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
let AltOrders = [(rotl GPR64, 8)];
let AltOrderSelect = [{ return 1; }];
}
// GPR register classes which include SP/WSP.
def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
let AltOrders = [(rotl GPR32sp, 8)];
let AltOrderSelect = [{ return 1; }];
}
def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
let AltOrders = [(rotl GPR64sp, 8)];
let AltOrderSelect = [{ return 1; }];
}
def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
def GPR64spPlus0Operand : AsmOperandClass {
let Name = "GPR64sp0";
let RenderMethod = "addRegOperands";
let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
let ParserMethod = "tryParseGPR64sp0Operand";
}
def GPR64sp0 : RegisterOperand<GPR64sp> {
let ParserMatchClass = GPR64spPlus0Operand;
}
// GPR32/GPR64 but with zero-register substitution enabled.
// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
def GPR32z : RegisterOperand<GPR32> {
let GIZeroRegister = WZR;
}
def GPR64z : RegisterOperand<GPR64> {
let GIZeroRegister = XZR;
}
// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
// constraint used by any instructions, it is used as a common super-class.
def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
// For tail calls, we can't use callee-saved registers, as they are restored
// to the saved value before the tail call, which would clobber a call address.
// This is for indirect tail calls to store the address of the destination.
def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
X22, X23, X24, X25, X26,
X27, X28, FP, LR)>;
// Restricted set of tail call registers, for use when branch target
// enforcement is enabled. These are the only registers which can be used to
// indirectly branch (not call) to the "BTI c" instruction at the start of a
// BTI-protected function.
def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
// GPR register classes for post increment amount of vector load/store that
// has alternate printing when Rm=31 and prints a constant immediate value
// equal to the total number of bytes transferred.
// FIXME: TableGen *should* be able to do these itself now. There appears to be
// a bug in counting how many operands a Post-indexed MCInst should have which
// means the aliases don't trigger.
def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">;
def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">;
def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">;
def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">;
def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">;
def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">;
def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
// Condition code regclass.
def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
let CopyCost = -1; // Don't allow copying of status registers.
// CCR is not allocatable.
let isAllocatable = 0;
}
//===----------------------------------------------------------------------===//
// Floating Point Scalar Registers
//===----------------------------------------------------------------------===//
def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>;
def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>;
def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>;
def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>;
def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>;
def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>;
def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>;
def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>;
def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>;
def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>;
def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
let SubRegIndices = [bsub] in {
def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>;
def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>;
def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>;
def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>;
def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>;
def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>;
def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>;
def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>;
def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>;
def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>;
def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
}
let SubRegIndices = [hsub] in {
def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>;
def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>;
def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>;
def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>;
def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>;
def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>;
def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>;
def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>;
def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>;
def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>;
def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
}
let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
}
let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
}
def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> {
let Size = 8;
}
def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> {
let Size = 16;
}
def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
v1i64, v4f16],
64, (sequence "D%u", 0, 31)>;
// We don't (yet) have an f128 legal type, so don't use that here. We
// normalize 128-bit vectors to v2f64 for arg passing and such, so use
// that here.
def FPR128 : RegisterClass<"AArch64",
[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
v8f16],
128, (sequence "Q%u", 0, 31)>;
// The lower 16 vector registers. Some instructions can only take registers
// in this range.
def FPR128_lo : RegisterClass<"AArch64",
[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16],
128, (trunc FPR128, 16)>;
// Pairs, triples, and quads of 64-bit vector registers.
def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
[(rotl FPR64, 0), (rotl FPR64, 1),
(rotl FPR64, 2)]>;
def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
[(rotl FPR64, 0), (rotl FPR64, 1),
(rotl FPR64, 2), (rotl FPR64, 3)]>;
def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
let Size = 128;
}
def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
let Size = 192;
}
def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
let Size = 256;
}
// Pairs, triples, and quads of 128-bit vector registers.
def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
[(rotl FPR128, 0), (rotl FPR128, 1),
(rotl FPR128, 2)]>;
def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
[(rotl FPR128, 0), (rotl FPR128, 1),
(rotl FPR128, 2), (rotl FPR128, 3)]>;
def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
let Size = 256;
}
def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
let Size = 384;
}
def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
let Size = 512;
}
// Vector operand versions of the FP registers. Alternate name printing and
// assmebler matching.
def VectorReg64AsmOperand : AsmOperandClass {
let Name = "VectorReg64";
let PredicateMethod = "isNeonVectorReg";
}
def VectorReg128AsmOperand : AsmOperandClass {
let Name = "VectorReg128";
let PredicateMethod = "isNeonVectorReg";
}
def V64 : RegisterOperand<FPR64, "printVRegOperand"> {
let ParserMatchClass = VectorReg64AsmOperand;
}
def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
let ParserMatchClass = VectorReg128AsmOperand;
}
def VectorRegLoAsmOperand : AsmOperandClass {
let Name = "VectorRegLo";
let PredicateMethod = "isNeonVectorRegLo";
}
def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
let ParserMatchClass = VectorRegLoAsmOperand;
}
class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
: AsmOperandClass {
let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
let PredicateMethod
= "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">";
}
class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
: RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
# eltsize # "'>">;
multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
// With implicit types (probably on instruction instead). E.g. { v0, v1 }
def _64AsmOperand : AsmOperandClass {
let Name = NAME # "64";
let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
}
def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
}
def _128AsmOperand : AsmOperandClass {
let Name = NAME # "128";
let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
}
def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
}
// 64-bit register lists with explicit type.
// { v0.8b, v1.8b }
def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
}
// { v0.4h, v1.4h }
def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
}
// { v0.2s, v1.2s }
def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
}
// { v0.1d, v1.1d }
def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
}
// 128-bit register lists with explicit type
// { v0.16b, v1.16b }
def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
}
// { v0.8h, v1.8h }
def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
}
// { v0.4s, v1.4s }
def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
}
// { v0.2d, v1.2d }
def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
}
// { v0.b, v1.b }
def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
}
// { v0.h, v1.h }
def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
}
// { v0.s, v1.s }
def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
}
// { v0.d, v1.d }
def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
}
}
defm VecListOne : VectorList<1, FPR64, FPR128>;
defm VecListTwo : VectorList<2, DD, QQ>;
defm VecListThree : VectorList<3, DDD, QQQ>;
defm VecListFour : VectorList<4, DDDD, QQQQ>;
class FPRAsmOperand<string RC> : AsmOperandClass {
let Name = "FPRAsmOperand" # RC;
let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
let RenderMethod = "addRegOperands";
}
// Register operand versions of the scalar FP registers.
def FPR8Op : RegisterOperand<FPR8, "printOperand"> {
let ParserMatchClass = FPRAsmOperand<"FPR8">;
}
def FPR16Op : RegisterOperand<FPR16, "printOperand"> {
let ParserMatchClass = FPRAsmOperand<"FPR16">;
}
def FPR32Op : RegisterOperand<FPR32, "printOperand"> {
let ParserMatchClass = FPRAsmOperand<"FPR32">;
}
def FPR64Op : RegisterOperand<FPR64, "printOperand"> {
let ParserMatchClass = FPRAsmOperand<"FPR64">;
}
def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
let ParserMatchClass = FPRAsmOperand<"FPR128">;
}
//===----------------------------------------------------------------------===//
// ARMv8.1a atomic CASP register operands
-def WSeqPairs : RegisterTuples<[sube32, subo32],
- [(rotl GPR32, 0), (rotl GPR32, 1)]>;
-def XSeqPairs : RegisterTuples<[sube64, subo64],
- [(rotl GPR64, 0), (rotl GPR64, 1)]>;
+def WSeqPairs : RegisterTuples<[sube32, subo32],
+ [(decimate (rotl GPR32, 0), 2),
+ (decimate (rotl GPR32, 1), 2)]>;
+def XSeqPairs : RegisterTuples<[sube64, subo64],
+ [(decimate (rotl GPR64, 0), 2),
+ (decimate (rotl GPR64, 1), 2)]>;
def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32,
(add WSeqPairs)>{
let Size = 64;
}
def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64,
(add XSeqPairs)>{
let Size = 128;
}
let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
}
def WSeqPairClassOperand :
RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
let ParserMatchClass = WSeqPairsAsmOperandClass;
}
def XSeqPairClassOperand :
RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
let ParserMatchClass = XSeqPairsAsmOperandClass;
}
//===----- END: v8.1a atomic CASP register operands -----------------------===//
// SVE predicate registers
def P0 : AArch64Reg<0, "p0">, DwarfRegNum<[48]>;
def P1 : AArch64Reg<1, "p1">, DwarfRegNum<[49]>;
def P2 : AArch64Reg<2, "p2">, DwarfRegNum<[50]>;
def P3 : AArch64Reg<3, "p3">, DwarfRegNum<[51]>;
def P4 : AArch64Reg<4, "p4">, DwarfRegNum<[52]>;
def P5 : AArch64Reg<5, "p5">, DwarfRegNum<[53]>;
def P6 : AArch64Reg<6, "p6">, DwarfRegNum<[54]>;
def P7 : AArch64Reg<7, "p7">, DwarfRegNum<[55]>;
def P8 : AArch64Reg<8, "p8">, DwarfRegNum<[56]>;
def P9 : AArch64Reg<9, "p9">, DwarfRegNum<[57]>;
def P10 : AArch64Reg<10, "p10">, DwarfRegNum<[58]>;
def P11 : AArch64Reg<11, "p11">, DwarfRegNum<[59]>;
def P12 : AArch64Reg<12, "p12">, DwarfRegNum<[60]>;
def P13 : AArch64Reg<13, "p13">, DwarfRegNum<[61]>;
def P14 : AArch64Reg<14, "p14">, DwarfRegNum<[62]>;
def P15 : AArch64Reg<15, "p15">, DwarfRegNum<[63]>;
// The part of SVE registers that don't overlap Neon registers.
// These are only used as part of clobber lists.
def Z0_HI : AArch64Reg<0, "z0_hi">;
def Z1_HI : AArch64Reg<1, "z1_hi">;
def Z2_HI : AArch64Reg<2, "z2_hi">;
def Z3_HI : AArch64Reg<3, "z3_hi">;
def Z4_HI : AArch64Reg<4, "z4_hi">;
def Z5_HI : AArch64Reg<5, "z5_hi">;
def Z6_HI : AArch64Reg<6, "z6_hi">;
def Z7_HI : AArch64Reg<7, "z7_hi">;
def Z8_HI : AArch64Reg<8, "z8_hi">;
def Z9_HI : AArch64Reg<9, "z9_hi">;
def Z10_HI : AArch64Reg<10, "z10_hi">;
def Z11_HI : AArch64Reg<11, "z11_hi">;
def Z12_HI : AArch64Reg<12, "z12_hi">;
def Z13_HI : AArch64Reg<13, "z13_hi">;
def Z14_HI : AArch64Reg<14, "z14_hi">;
def Z15_HI : AArch64Reg<15, "z15_hi">;
def Z16_HI : AArch64Reg<16, "z16_hi">;
def Z17_HI : AArch64Reg<17, "z17_hi">;
def Z18_HI : AArch64Reg<18, "z18_hi">;
def Z19_HI : AArch64Reg<19, "z19_hi">;
def Z20_HI : AArch64Reg<20, "z20_hi">;
def Z21_HI : AArch64Reg<21, "z21_hi">;
def Z22_HI : AArch64Reg<22, "z22_hi">;
def Z23_HI : AArch64Reg<23, "z23_hi">;
def Z24_HI : AArch64Reg<24, "z24_hi">;
def Z25_HI : AArch64Reg<25, "z25_hi">;
def Z26_HI : AArch64Reg<26, "z26_hi">;
def Z27_HI : AArch64Reg<27, "z27_hi">;
def Z28_HI : AArch64Reg<28, "z28_hi">;
def Z29_HI : AArch64Reg<29, "z29_hi">;
def Z30_HI : AArch64Reg<30, "z30_hi">;
def Z31_HI : AArch64Reg<31, "z31_hi">;
// SVE variable-size vector registers
let SubRegIndices = [zsub,zsub_hi] in {
def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>;
def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>;
def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>;
def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>;
def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>;
def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>;
def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>;
def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>;
def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>;
def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>;
def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
}
// Enum descibing the element size for destructive
// operations.
class ElementSizeEnum<bits<3> val> {
bits<3> Value = val;
}
def ElementSizeNone : ElementSizeEnum<0>;
def ElementSizeB : ElementSizeEnum<1>;
def ElementSizeH : ElementSizeEnum<2>;
def ElementSizeS : ElementSizeEnum<3>;
def ElementSizeD : ElementSizeEnum<4>;
def ElementSizeQ : ElementSizeEnum<5>; // Unused
class SVERegOp <string Suffix, AsmOperandClass C,
ElementSizeEnum Size,
RegisterClass RC> : RegisterOperand<RC> {
ElementSizeEnum ElementSize;
let ElementSize = Size;
let PrintMethod = !if(!eq(Suffix, ""),
"printSVERegOp<>",
"printSVERegOp<'" # Suffix # "'>");
let ParserMatchClass = C;
}
class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
//******************************************************************************
// SVE predicate register classes.
class PPRClass<int lastreg> : RegisterClass<
"AArch64",
[ nxv16i1, nxv8i1, nxv4i1, nxv2i1 ], 16,
(sequence "P%u", 0, lastreg)> {
let Size = 16;
}
def PPR : PPRClass<15>;
def PPR_3b : PPRClass<7>; // Restricted 3 bit SVE predicate register class.
class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
let Name = "SVE" # name # "Reg";
let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
# Width # ", " # "AArch64::" # RegClass # "RegClassID>";
let DiagnosticType = "InvalidSVE" # name # "Reg";
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseSVEPredicateVector";
}
def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>;
def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>;
def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>;
def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>;
def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>;
def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>;
def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>;
def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>;
def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>;
def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>;
def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>;
def PPRAsmOp3b8 : PPRAsmOperand<"Predicate3bB", "PPR_3b", 8>;
def PPRAsmOp3b16 : PPRAsmOperand<"Predicate3bH", "PPR_3b", 16>;
def PPRAsmOp3b32 : PPRAsmOperand<"Predicate3bS", "PPR_3b", 32>;
def PPRAsmOp3b64 : PPRAsmOperand<"Predicate3bD", "PPR_3b", 64>;
def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
def PPR3b8 : PPRRegOp<"b", PPRAsmOp3b8, ElementSizeB, PPR_3b>;
def PPR3b16 : PPRRegOp<"h", PPRAsmOp3b16, ElementSizeH, PPR_3b>;
def PPR3b32 : PPRRegOp<"s", PPRAsmOp3b32, ElementSizeS, PPR_3b>;
def PPR3b64 : PPRRegOp<"d", PPRAsmOp3b64, ElementSizeD, PPR_3b>;
//******************************************************************************
// SVE vector register class
def ZPR : RegisterClass<"AArch64",
[nxv16i8, nxv8i16, nxv4i32, nxv2i64,
nxv2f16, nxv4f16, nxv8f16,
nxv1f32, nxv2f32, nxv4f32,
nxv1f64, nxv2f64],
128, (sequence "Z%u", 0, 31)> {
let Size = 128;
}
// SVE restricted 4 bit scalable vector register class
def ZPR_4b : RegisterClass<"AArch64",
[nxv16i8, nxv8i16, nxv4i32, nxv2i64,
nxv2f16, nxv4f16, nxv8f16,
nxv1f32, nxv2f32, nxv4f32,
nxv1f64, nxv2f64],
128, (sequence "Z%u", 0, 15)> {
let Size = 128;
}
// SVE restricted 3 bit scalable vector register class
def ZPR_3b : RegisterClass<"AArch64",
[nxv16i8, nxv8i16, nxv4i32, nxv2i64,
nxv2f16, nxv4f16, nxv8f16,
nxv1f32, nxv2f32, nxv4f32,
nxv1f64, nxv2f64],
128, (sequence "Z%u", 0, 7)> {
let Size = 128;
}
class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
: AsmOperandClass {
let Name = "SVE" # name # "Reg";
let PredicateMethod = "isSVEDataVectorRegOfWidth<"
# Width # ", AArch64::ZPR"
# RegClassSuffix # "RegClassID>";
let RenderMethod = "addRegOperands";
let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
let ParserMethod = "tryParseSVEDataVector<false, "
# !if(!eq(Width, 0), "false", "true") # ">";
}
def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>;
def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>;
def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>;
def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>;
def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>;
def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>;
def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>;
def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>;
def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>;
def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>;
def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>;
def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
class FPRasZPR<int Width> : AsmOperandClass{
let Name = "FPR" # Width # "asZPR";
let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
}
class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
let ParserMatchClass = FPRasZPR<Width>;
let PrintMethod = "printZPRasFPR<" # Width # ">";
}
def FPR8asZPR : FPRasZPROperand<8>;
def FPR16asZPR : FPRasZPROperand<16>;
def FPR32asZPR : FPRasZPROperand<32>;
def FPR64asZPR : FPRasZPROperand<64>;
def FPR128asZPR : FPRasZPROperand<128>;
let Namespace = "AArch64" in {
def zsub0 : SubRegIndex<128, -1>;
def zsub1 : SubRegIndex<128, -1>;
def zsub2 : SubRegIndex<128, -1>;
def zsub3 : SubRegIndex<128, -1>;
}
// Pairs, triples, and quads of SVE vector registers.
def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> {
let Size = 256;
}
def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
let Size = 384;
}
def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
let Size = 512;
}
class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
let Name = "SVEVectorList" # NumRegs # ElementWidth;
let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
let PredicateMethod =
"isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
}
def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> {
let ParserMatchClass = ZPRVectorList<8, 1>;
}
def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> {
let ParserMatchClass = ZPRVectorList<16, 1>;
}
def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> {
let ParserMatchClass = ZPRVectorList<32, 1>;
}
def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> {
let ParserMatchClass = ZPRVectorList<64, 1>;
}
def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
let ParserMatchClass = ZPRVectorList<8, 2>;
}
def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
let ParserMatchClass = ZPRVectorList<16, 2>;
}
def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
let ParserMatchClass = ZPRVectorList<32, 2>;
}
def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
let ParserMatchClass = ZPRVectorList<64, 2>;
}
def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
let ParserMatchClass = ZPRVectorList<8, 3>;
}
def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
let ParserMatchClass = ZPRVectorList<16, 3>;
}
def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
let ParserMatchClass = ZPRVectorList<32, 3>;
}
def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
let ParserMatchClass = ZPRVectorList<64, 3>;
}
def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
let ParserMatchClass = ZPRVectorList<8, 4>;
}
def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
let ParserMatchClass = ZPRVectorList<16, 4>;
}
def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
let ParserMatchClass = ZPRVectorList<32, 4>;
}
def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
let ParserMatchClass = ZPRVectorList<64, 4>;
}
class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
# !if(ScaleAlwaysSame, "Only", "");
let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
# RegWidth # ", AArch64::ZPRRegClassID, "
# "AArch64_AM::" # ShiftExtend # ", "
# Scale # ", "
# !if(ScaleAlwaysSame, "true", "false")
# ">";
let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseSVEDataVector<true, true>";
}
class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
int RegWidth, int Scale, string Suffix = "">
: RegisterOperand<ZPR> {
let ParserMatchClass =
!cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
let PrintMethod = "printRegWithShiftExtend<"
# !if(SignExtend, "true", "false") # ", "
# Scale # ", "
# !if(IsLSL, "'x'", "'w'") # ", "
# !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
}
foreach RegWidth = [32, 64] in {
// UXTW(8|16|32|64)
def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
// SXTW(8|16|32|64)
def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
// LSL(8|16|32|64)
def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
}
class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
let Name = AsmOperandName # Scale;
let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
let DiagnosticType = "Invalid" # AsmOperandName # Scale;
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseGPROperand<true>";
}
class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
let ParserMatchClass = !cast<AsmOperandClass>(Name);
let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
}
foreach Scale = [8, 16, 32, 64] in {
def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
}
Index: vendor/llvm/dist-release_80/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp (revision 344166)
@@ -1,1876 +1,1876 @@
//===- AArch64Disassembler.cpp - Disassembler for AArch64 -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#include "AArch64Disassembler.h"
#include "AArch64ExternalSymbolizer.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm-c/Disassembler.h"
#include "llvm/MC/MCDisassembler/MCRelocationInfo.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include <algorithm>
#include <memory>
using namespace llvm;
#define DEBUG_TYPE "aarch64-disassembler"
// Pull DecodeStatus and its enum values into the global namespace.
using DecodeStatus = MCDisassembler::DecodeStatus;
// Forward declare these because the autogenerated code will reference them.
// Definitions are further down.
static DecodeStatus DecodeFPR128RegisterClass(MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFPR128_loRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFPR64RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFPR32RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFPR16RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFPR8RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR64commonRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR64spRegisterClass(MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR32spRegisterClass(MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeQQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeQQQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeDDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeDDDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPR_4bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPR_3bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPR2RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPR3RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPR4RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodePPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodePPR_3bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFixedPointScaleImm32(MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeFixedPointScaleImm64(MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodePCRelLabel19(MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMemExtend(MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMRSSystemRegister(MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMSRSystemRegister(MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeMoveImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeUnsignedLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeSignedLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeAddSubERegInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeLogicalImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeModImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeModImmTiedInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeAdrInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeAddSubImmShift(MCInst &Inst, uint32_t insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeUnconditionalBranch(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeSystemPStateInstruction(MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeTestAndBranch(MCInst &Inst, uint32_t insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeFMOVLaneInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeVecShiftR64Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftR64ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder);
static DecodeStatus DecodeVecShiftR32Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftR32ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder);
static DecodeStatus DecodeVecShiftR16Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftR16ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder);
static DecodeStatus DecodeVecShiftR8Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftL64Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftL32Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftL16Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeVecShiftL8Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeWSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder);
static DecodeStatus DecodeXSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder);
static DecodeStatus DecodeSVELogicalImmInstruction(llvm::MCInst &Inst,
uint32_t insn,
uint64_t Address,
const void *Decoder);
template<int Bits>
static DecodeStatus DecodeSImm(llvm::MCInst &Inst, uint64_t Imm,
uint64_t Address, const void *Decoder);
template <int ElementWidth>
static DecodeStatus DecodeImm8OptLsl(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeSVEIncDecImm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeLoadAllocTagArrayInstruction(MCInst &Inst,
uint32_t insn,
uint64_t address,
const void* Decoder);
static bool Check(DecodeStatus &Out, DecodeStatus In) {
switch (In) {
case MCDisassembler::Success:
// Out stays the same.
return true;
case MCDisassembler::SoftFail:
Out = In;
return true;
case MCDisassembler::Fail:
Out = In;
return false;
}
llvm_unreachable("Invalid DecodeStatus!");
}
#include "AArch64GenDisassemblerTables.inc"
#include "AArch64GenInstrInfo.inc"
#define Success MCDisassembler::Success
#define Fail MCDisassembler::Fail
#define SoftFail MCDisassembler::SoftFail
static MCDisassembler *createAArch64Disassembler(const Target &T,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new AArch64Disassembler(STI, Ctx);
}
DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
ArrayRef<uint8_t> Bytes,
uint64_t Address,
raw_ostream &OS,
raw_ostream &CS) const {
CommentStream = &CS;
Size = 0;
// We want to read exactly 4 bytes of data.
if (Bytes.size() < 4)
return Fail;
Size = 4;
// Encoded as a small-endian 32-bit word in the stream.
uint32_t Insn =
(Bytes[3] << 24) | (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0);
// Calling the auto-generated decoder function.
return decodeInstruction(DecoderTable32, MI, Insn, Address, this, STI);
}
static MCSymbolizer *
createAArch64ExternalSymbolizer(const Triple &TT, LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp,
void *DisInfo, MCContext *Ctx,
std::unique_ptr<MCRelocationInfo> &&RelInfo) {
return new AArch64ExternalSymbolizer(*Ctx, std::move(RelInfo), GetOpInfo,
SymbolLookUp, DisInfo);
}
extern "C" void LLVMInitializeAArch64Disassembler() {
TargetRegistry::RegisterMCDisassembler(getTheAArch64leTarget(),
createAArch64Disassembler);
TargetRegistry::RegisterMCDisassembler(getTheAArch64beTarget(),
createAArch64Disassembler);
TargetRegistry::RegisterMCSymbolizer(getTheAArch64leTarget(),
createAArch64ExternalSymbolizer);
TargetRegistry::RegisterMCSymbolizer(getTheAArch64beTarget(),
createAArch64ExternalSymbolizer);
TargetRegistry::RegisterMCDisassembler(getTheARM64Target(),
createAArch64Disassembler);
TargetRegistry::RegisterMCSymbolizer(getTheARM64Target(),
createAArch64ExternalSymbolizer);
}
static const unsigned FPR128DecoderTable[] = {
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
AArch64::Q5, AArch64::Q6, AArch64::Q7, AArch64::Q8, AArch64::Q9,
AArch64::Q10, AArch64::Q11, AArch64::Q12, AArch64::Q13, AArch64::Q14,
AArch64::Q15, AArch64::Q16, AArch64::Q17, AArch64::Q18, AArch64::Q19,
AArch64::Q20, AArch64::Q21, AArch64::Q22, AArch64::Q23, AArch64::Q24,
AArch64::Q25, AArch64::Q26, AArch64::Q27, AArch64::Q28, AArch64::Q29,
AArch64::Q30, AArch64::Q31
};
static DecodeStatus DecodeFPR128RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = FPR128DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeFPR128_loRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 15)
return Fail;
return DecodeFPR128RegisterClass(Inst, RegNo, Addr, Decoder);
}
static const unsigned FPR64DecoderTable[] = {
AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
AArch64::D5, AArch64::D6, AArch64::D7, AArch64::D8, AArch64::D9,
AArch64::D10, AArch64::D11, AArch64::D12, AArch64::D13, AArch64::D14,
AArch64::D15, AArch64::D16, AArch64::D17, AArch64::D18, AArch64::D19,
AArch64::D20, AArch64::D21, AArch64::D22, AArch64::D23, AArch64::D24,
AArch64::D25, AArch64::D26, AArch64::D27, AArch64::D28, AArch64::D29,
AArch64::D30, AArch64::D31
};
static DecodeStatus DecodeFPR64RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = FPR64DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned FPR32DecoderTable[] = {
AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
AArch64::S5, AArch64::S6, AArch64::S7, AArch64::S8, AArch64::S9,
AArch64::S10, AArch64::S11, AArch64::S12, AArch64::S13, AArch64::S14,
AArch64::S15, AArch64::S16, AArch64::S17, AArch64::S18, AArch64::S19,
AArch64::S20, AArch64::S21, AArch64::S22, AArch64::S23, AArch64::S24,
AArch64::S25, AArch64::S26, AArch64::S27, AArch64::S28, AArch64::S29,
AArch64::S30, AArch64::S31
};
static DecodeStatus DecodeFPR32RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = FPR32DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned FPR16DecoderTable[] = {
AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
AArch64::H5, AArch64::H6, AArch64::H7, AArch64::H8, AArch64::H9,
AArch64::H10, AArch64::H11, AArch64::H12, AArch64::H13, AArch64::H14,
AArch64::H15, AArch64::H16, AArch64::H17, AArch64::H18, AArch64::H19,
AArch64::H20, AArch64::H21, AArch64::H22, AArch64::H23, AArch64::H24,
AArch64::H25, AArch64::H26, AArch64::H27, AArch64::H28, AArch64::H29,
AArch64::H30, AArch64::H31
};
static DecodeStatus DecodeFPR16RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = FPR16DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned FPR8DecoderTable[] = {
AArch64::B0, AArch64::B1, AArch64::B2, AArch64::B3, AArch64::B4,
AArch64::B5, AArch64::B6, AArch64::B7, AArch64::B8, AArch64::B9,
AArch64::B10, AArch64::B11, AArch64::B12, AArch64::B13, AArch64::B14,
AArch64::B15, AArch64::B16, AArch64::B17, AArch64::B18, AArch64::B19,
AArch64::B20, AArch64::B21, AArch64::B22, AArch64::B23, AArch64::B24,
AArch64::B25, AArch64::B26, AArch64::B27, AArch64::B28, AArch64::B29,
AArch64::B30, AArch64::B31
};
static DecodeStatus DecodeFPR8RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = FPR8DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned GPR64DecoderTable[] = {
AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
AArch64::X5, AArch64::X6, AArch64::X7, AArch64::X8, AArch64::X9,
AArch64::X10, AArch64::X11, AArch64::X12, AArch64::X13, AArch64::X14,
AArch64::X15, AArch64::X16, AArch64::X17, AArch64::X18, AArch64::X19,
AArch64::X20, AArch64::X21, AArch64::X22, AArch64::X23, AArch64::X24,
AArch64::X25, AArch64::X26, AArch64::X27, AArch64::X28, AArch64::FP,
AArch64::LR, AArch64::XZR
};
static DecodeStatus DecodeGPR64commonRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 30)
return Fail;
unsigned Register = GPR64DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = GPR64DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeGPR64spRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = GPR64DecoderTable[RegNo];
if (Register == AArch64::XZR)
Register = AArch64::SP;
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned GPR32DecoderTable[] = {
AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
AArch64::W5, AArch64::W6, AArch64::W7, AArch64::W8, AArch64::W9,
AArch64::W10, AArch64::W11, AArch64::W12, AArch64::W13, AArch64::W14,
AArch64::W15, AArch64::W16, AArch64::W17, AArch64::W18, AArch64::W19,
AArch64::W20, AArch64::W21, AArch64::W22, AArch64::W23, AArch64::W24,
AArch64::W25, AArch64::W26, AArch64::W27, AArch64::W28, AArch64::W29,
AArch64::W30, AArch64::WZR
};
static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = GPR32DecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeGPR32spRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = GPR32DecoderTable[RegNo];
if (Register == AArch64::WZR)
Register = AArch64::WSP;
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned ZPRDecoderTable[] = {
AArch64::Z0, AArch64::Z1, AArch64::Z2, AArch64::Z3,
AArch64::Z4, AArch64::Z5, AArch64::Z6, AArch64::Z7,
AArch64::Z8, AArch64::Z9, AArch64::Z10, AArch64::Z11,
AArch64::Z12, AArch64::Z13, AArch64::Z14, AArch64::Z15,
AArch64::Z16, AArch64::Z17, AArch64::Z18, AArch64::Z19,
AArch64::Z20, AArch64::Z21, AArch64::Z22, AArch64::Z23,
AArch64::Z24, AArch64::Z25, AArch64::Z26, AArch64::Z27,
AArch64::Z28, AArch64::Z29, AArch64::Z30, AArch64::Z31
};
static DecodeStatus DecodeZPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void* Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = ZPRDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeZPR_4bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder) {
if (RegNo > 15)
return Fail;
return DecodeZPRRegisterClass(Inst, RegNo, Address, Decoder);
}
static DecodeStatus DecodeZPR_3bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder) {
if (RegNo > 7)
return Fail;
return DecodeZPRRegisterClass(Inst, RegNo, Address, Decoder);
}
static const unsigned ZZDecoderTable[] = {
AArch64::Z0_Z1, AArch64::Z1_Z2, AArch64::Z2_Z3, AArch64::Z3_Z4,
AArch64::Z4_Z5, AArch64::Z5_Z6, AArch64::Z6_Z7, AArch64::Z7_Z8,
AArch64::Z8_Z9, AArch64::Z9_Z10, AArch64::Z10_Z11, AArch64::Z11_Z12,
AArch64::Z12_Z13, AArch64::Z13_Z14, AArch64::Z14_Z15, AArch64::Z15_Z16,
AArch64::Z16_Z17, AArch64::Z17_Z18, AArch64::Z18_Z19, AArch64::Z19_Z20,
AArch64::Z20_Z21, AArch64::Z21_Z22, AArch64::Z22_Z23, AArch64::Z23_Z24,
AArch64::Z24_Z25, AArch64::Z25_Z26, AArch64::Z26_Z27, AArch64::Z27_Z28,
AArch64::Z28_Z29, AArch64::Z29_Z30, AArch64::Z30_Z31, AArch64::Z31_Z0
};
static DecodeStatus DecodeZPR2RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void* Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = ZZDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned ZZZDecoderTable[] = {
AArch64::Z0_Z1_Z2, AArch64::Z1_Z2_Z3, AArch64::Z2_Z3_Z4,
AArch64::Z3_Z4_Z5, AArch64::Z4_Z5_Z6, AArch64::Z5_Z6_Z7,
AArch64::Z6_Z7_Z8, AArch64::Z7_Z8_Z9, AArch64::Z8_Z9_Z10,
AArch64::Z9_Z10_Z11, AArch64::Z10_Z11_Z12, AArch64::Z11_Z12_Z13,
AArch64::Z12_Z13_Z14, AArch64::Z13_Z14_Z15, AArch64::Z14_Z15_Z16,
AArch64::Z15_Z16_Z17, AArch64::Z16_Z17_Z18, AArch64::Z17_Z18_Z19,
AArch64::Z18_Z19_Z20, AArch64::Z19_Z20_Z21, AArch64::Z20_Z21_Z22,
AArch64::Z21_Z22_Z23, AArch64::Z22_Z23_Z24, AArch64::Z23_Z24_Z25,
AArch64::Z24_Z25_Z26, AArch64::Z25_Z26_Z27, AArch64::Z26_Z27_Z28,
AArch64::Z27_Z28_Z29, AArch64::Z28_Z29_Z30, AArch64::Z29_Z30_Z31,
AArch64::Z30_Z31_Z0, AArch64::Z31_Z0_Z1
};
static DecodeStatus DecodeZPR3RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void* Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = ZZZDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned ZZZZDecoderTable[] = {
AArch64::Z0_Z1_Z2_Z3, AArch64::Z1_Z2_Z3_Z4, AArch64::Z2_Z3_Z4_Z5,
AArch64::Z3_Z4_Z5_Z6, AArch64::Z4_Z5_Z6_Z7, AArch64::Z5_Z6_Z7_Z8,
AArch64::Z6_Z7_Z8_Z9, AArch64::Z7_Z8_Z9_Z10, AArch64::Z8_Z9_Z10_Z11,
AArch64::Z9_Z10_Z11_Z12, AArch64::Z10_Z11_Z12_Z13, AArch64::Z11_Z12_Z13_Z14,
AArch64::Z12_Z13_Z14_Z15, AArch64::Z13_Z14_Z15_Z16, AArch64::Z14_Z15_Z16_Z17,
AArch64::Z15_Z16_Z17_Z18, AArch64::Z16_Z17_Z18_Z19, AArch64::Z17_Z18_Z19_Z20,
AArch64::Z18_Z19_Z20_Z21, AArch64::Z19_Z20_Z21_Z22, AArch64::Z20_Z21_Z22_Z23,
AArch64::Z21_Z22_Z23_Z24, AArch64::Z22_Z23_Z24_Z25, AArch64::Z23_Z24_Z25_Z26,
AArch64::Z24_Z25_Z26_Z27, AArch64::Z25_Z26_Z27_Z28, AArch64::Z26_Z27_Z28_Z29,
AArch64::Z27_Z28_Z29_Z30, AArch64::Z28_Z29_Z30_Z31, AArch64::Z29_Z30_Z31_Z0,
AArch64::Z30_Z31_Z0_Z1, AArch64::Z31_Z0_Z1_Z2
};
static DecodeStatus DecodeZPR4RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void* Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = ZZZZDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned PPRDecoderTable[] = {
AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3,
AArch64::P4, AArch64::P5, AArch64::P6, AArch64::P7,
AArch64::P8, AArch64::P9, AArch64::P10, AArch64::P11,
AArch64::P12, AArch64::P13, AArch64::P14, AArch64::P15
};
static DecodeStatus DecodePPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr, const void *Decoder) {
if (RegNo > 15)
return Fail;
unsigned Register = PPRDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodePPR_3bRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void* Decoder) {
if (RegNo > 7)
return Fail;
// Just reuse the PPR decode table
return DecodePPRRegisterClass(Inst, RegNo, Addr, Decoder);
}
static const unsigned VectorDecoderTable[] = {
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
AArch64::Q5, AArch64::Q6, AArch64::Q7, AArch64::Q8, AArch64::Q9,
AArch64::Q10, AArch64::Q11, AArch64::Q12, AArch64::Q13, AArch64::Q14,
AArch64::Q15, AArch64::Q16, AArch64::Q17, AArch64::Q18, AArch64::Q19,
AArch64::Q20, AArch64::Q21, AArch64::Q22, AArch64::Q23, AArch64::Q24,
AArch64::Q25, AArch64::Q26, AArch64::Q27, AArch64::Q28, AArch64::Q29,
AArch64::Q30, AArch64::Q31
};
static DecodeStatus DecodeVectorRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = VectorDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned QQDecoderTable[] = {
AArch64::Q0_Q1, AArch64::Q1_Q2, AArch64::Q2_Q3, AArch64::Q3_Q4,
AArch64::Q4_Q5, AArch64::Q5_Q6, AArch64::Q6_Q7, AArch64::Q7_Q8,
AArch64::Q8_Q9, AArch64::Q9_Q10, AArch64::Q10_Q11, AArch64::Q11_Q12,
AArch64::Q12_Q13, AArch64::Q13_Q14, AArch64::Q14_Q15, AArch64::Q15_Q16,
AArch64::Q16_Q17, AArch64::Q17_Q18, AArch64::Q18_Q19, AArch64::Q19_Q20,
AArch64::Q20_Q21, AArch64::Q21_Q22, AArch64::Q22_Q23, AArch64::Q23_Q24,
AArch64::Q24_Q25, AArch64::Q25_Q26, AArch64::Q26_Q27, AArch64::Q27_Q28,
AArch64::Q28_Q29, AArch64::Q29_Q30, AArch64::Q30_Q31, AArch64::Q31_Q0
};
static DecodeStatus DecodeQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr, const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = QQDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned QQQDecoderTable[] = {
AArch64::Q0_Q1_Q2, AArch64::Q1_Q2_Q3, AArch64::Q2_Q3_Q4,
AArch64::Q3_Q4_Q5, AArch64::Q4_Q5_Q6, AArch64::Q5_Q6_Q7,
AArch64::Q6_Q7_Q8, AArch64::Q7_Q8_Q9, AArch64::Q8_Q9_Q10,
AArch64::Q9_Q10_Q11, AArch64::Q10_Q11_Q12, AArch64::Q11_Q12_Q13,
AArch64::Q12_Q13_Q14, AArch64::Q13_Q14_Q15, AArch64::Q14_Q15_Q16,
AArch64::Q15_Q16_Q17, AArch64::Q16_Q17_Q18, AArch64::Q17_Q18_Q19,
AArch64::Q18_Q19_Q20, AArch64::Q19_Q20_Q21, AArch64::Q20_Q21_Q22,
AArch64::Q21_Q22_Q23, AArch64::Q22_Q23_Q24, AArch64::Q23_Q24_Q25,
AArch64::Q24_Q25_Q26, AArch64::Q25_Q26_Q27, AArch64::Q26_Q27_Q28,
AArch64::Q27_Q28_Q29, AArch64::Q28_Q29_Q30, AArch64::Q29_Q30_Q31,
AArch64::Q30_Q31_Q0, AArch64::Q31_Q0_Q1
};
static DecodeStatus DecodeQQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr, const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = QQQDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned QQQQDecoderTable[] = {
AArch64::Q0_Q1_Q2_Q3, AArch64::Q1_Q2_Q3_Q4, AArch64::Q2_Q3_Q4_Q5,
AArch64::Q3_Q4_Q5_Q6, AArch64::Q4_Q5_Q6_Q7, AArch64::Q5_Q6_Q7_Q8,
AArch64::Q6_Q7_Q8_Q9, AArch64::Q7_Q8_Q9_Q10, AArch64::Q8_Q9_Q10_Q11,
AArch64::Q9_Q10_Q11_Q12, AArch64::Q10_Q11_Q12_Q13, AArch64::Q11_Q12_Q13_Q14,
AArch64::Q12_Q13_Q14_Q15, AArch64::Q13_Q14_Q15_Q16, AArch64::Q14_Q15_Q16_Q17,
AArch64::Q15_Q16_Q17_Q18, AArch64::Q16_Q17_Q18_Q19, AArch64::Q17_Q18_Q19_Q20,
AArch64::Q18_Q19_Q20_Q21, AArch64::Q19_Q20_Q21_Q22, AArch64::Q20_Q21_Q22_Q23,
AArch64::Q21_Q22_Q23_Q24, AArch64::Q22_Q23_Q24_Q25, AArch64::Q23_Q24_Q25_Q26,
AArch64::Q24_Q25_Q26_Q27, AArch64::Q25_Q26_Q27_Q28, AArch64::Q26_Q27_Q28_Q29,
AArch64::Q27_Q28_Q29_Q30, AArch64::Q28_Q29_Q30_Q31, AArch64::Q29_Q30_Q31_Q0,
AArch64::Q30_Q31_Q0_Q1, AArch64::Q31_Q0_Q1_Q2
};
static DecodeStatus DecodeQQQQRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = QQQQDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned DDDecoderTable[] = {
AArch64::D0_D1, AArch64::D1_D2, AArch64::D2_D3, AArch64::D3_D4,
AArch64::D4_D5, AArch64::D5_D6, AArch64::D6_D7, AArch64::D7_D8,
AArch64::D8_D9, AArch64::D9_D10, AArch64::D10_D11, AArch64::D11_D12,
AArch64::D12_D13, AArch64::D13_D14, AArch64::D14_D15, AArch64::D15_D16,
AArch64::D16_D17, AArch64::D17_D18, AArch64::D18_D19, AArch64::D19_D20,
AArch64::D20_D21, AArch64::D21_D22, AArch64::D22_D23, AArch64::D23_D24,
AArch64::D24_D25, AArch64::D25_D26, AArch64::D26_D27, AArch64::D27_D28,
AArch64::D28_D29, AArch64::D29_D30, AArch64::D30_D31, AArch64::D31_D0
};
static DecodeStatus DecodeDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr, const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = DDDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned DDDDecoderTable[] = {
AArch64::D0_D1_D2, AArch64::D1_D2_D3, AArch64::D2_D3_D4,
AArch64::D3_D4_D5, AArch64::D4_D5_D6, AArch64::D5_D6_D7,
AArch64::D6_D7_D8, AArch64::D7_D8_D9, AArch64::D8_D9_D10,
AArch64::D9_D10_D11, AArch64::D10_D11_D12, AArch64::D11_D12_D13,
AArch64::D12_D13_D14, AArch64::D13_D14_D15, AArch64::D14_D15_D16,
AArch64::D15_D16_D17, AArch64::D16_D17_D18, AArch64::D17_D18_D19,
AArch64::D18_D19_D20, AArch64::D19_D20_D21, AArch64::D20_D21_D22,
AArch64::D21_D22_D23, AArch64::D22_D23_D24, AArch64::D23_D24_D25,
AArch64::D24_D25_D26, AArch64::D25_D26_D27, AArch64::D26_D27_D28,
AArch64::D27_D28_D29, AArch64::D28_D29_D30, AArch64::D29_D30_D31,
AArch64::D30_D31_D0, AArch64::D31_D0_D1
};
static DecodeStatus DecodeDDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr, const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = DDDDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned DDDDDecoderTable[] = {
AArch64::D0_D1_D2_D3, AArch64::D1_D2_D3_D4, AArch64::D2_D3_D4_D5,
AArch64::D3_D4_D5_D6, AArch64::D4_D5_D6_D7, AArch64::D5_D6_D7_D8,
AArch64::D6_D7_D8_D9, AArch64::D7_D8_D9_D10, AArch64::D8_D9_D10_D11,
AArch64::D9_D10_D11_D12, AArch64::D10_D11_D12_D13, AArch64::D11_D12_D13_D14,
AArch64::D12_D13_D14_D15, AArch64::D13_D14_D15_D16, AArch64::D14_D15_D16_D17,
AArch64::D15_D16_D17_D18, AArch64::D16_D17_D18_D19, AArch64::D17_D18_D19_D20,
AArch64::D18_D19_D20_D21, AArch64::D19_D20_D21_D22, AArch64::D20_D21_D22_D23,
AArch64::D21_D22_D23_D24, AArch64::D22_D23_D24_D25, AArch64::D23_D24_D25_D26,
AArch64::D24_D25_D26_D27, AArch64::D25_D26_D27_D28, AArch64::D26_D27_D28_D29,
AArch64::D27_D28_D29_D30, AArch64::D28_D29_D30_D31, AArch64::D29_D30_D31_D0,
AArch64::D30_D31_D0_D1, AArch64::D31_D0_D1_D2
};
static DecodeStatus DecodeDDDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = DDDDDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static DecodeStatus DecodeFixedPointScaleImm32(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder) {
// scale{5} is asserted as 1 in tblgen.
Imm |= 0x20;
Inst.addOperand(MCOperand::createImm(64 - Imm));
return Success;
}
static DecodeStatus DecodeFixedPointScaleImm64(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder) {
Inst.addOperand(MCOperand::createImm(64 - Imm));
return Success;
}
static DecodeStatus DecodePCRelLabel19(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
int64_t ImmVal = Imm;
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
// Sign-extend 19-bit immediate.
if (ImmVal & (1 << (19 - 1)))
ImmVal |= ~((1LL << 19) - 1);
if (!Dis->tryAddingSymbolicOperand(Inst, ImmVal * 4, Addr,
Inst.getOpcode() != AArch64::LDRXl, 0, 4))
Inst.addOperand(MCOperand::createImm(ImmVal));
return Success;
}
static DecodeStatus DecodeMemExtend(MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::createImm((Imm >> 1) & 1));
Inst.addOperand(MCOperand::createImm(Imm & 1));
return Success;
}
static DecodeStatus DecodeMRSSystemRegister(MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder) {
Inst.addOperand(MCOperand::createImm(Imm));
// Every system register in the encoding space is valid with the syntax
// S<op0>_<op1>_<Cn>_<Cm>_<op2>, so decoding system registers always succeeds.
return Success;
}
static DecodeStatus DecodeMSRSystemRegister(MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder) {
Inst.addOperand(MCOperand::createImm(Imm));
return Success;
}
static DecodeStatus DecodeFMOVLaneInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder) {
// This decoder exists to add the dummy Lane operand to the MCInst, which must
// be 1 in assembly but has no other real manifestation.
unsigned Rd = fieldFromInstruction(Insn, 0, 5);
unsigned Rn = fieldFromInstruction(Insn, 5, 5);
unsigned IsToVec = fieldFromInstruction(Insn, 16, 1);
if (IsToVec) {
DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
DecodeGPR64RegisterClass(Inst, Rn, Address, Decoder);
} else {
DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
}
// Add the lane
Inst.addOperand(MCOperand::createImm(1));
return Success;
}
static DecodeStatus DecodeVecShiftRImm(MCInst &Inst, unsigned Imm,
unsigned Add) {
Inst.addOperand(MCOperand::createImm(Add - Imm));
return Success;
}
static DecodeStatus DecodeVecShiftLImm(MCInst &Inst, unsigned Imm,
unsigned Add) {
Inst.addOperand(MCOperand::createImm((Imm + Add) & (Add - 1)));
return Success;
}
static DecodeStatus DecodeVecShiftR64Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm, 64);
}
static DecodeStatus DecodeVecShiftR64ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm | 0x20, 64);
}
static DecodeStatus DecodeVecShiftR32Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm, 32);
}
static DecodeStatus DecodeVecShiftR32ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm | 0x10, 32);
}
static DecodeStatus DecodeVecShiftR16Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm, 16);
}
static DecodeStatus DecodeVecShiftR16ImmNarrow(MCInst &Inst, unsigned Imm,
uint64_t Addr,
const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm | 0x8, 16);
}
static DecodeStatus DecodeVecShiftR8Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftRImm(Inst, Imm, 8);
}
static DecodeStatus DecodeVecShiftL64Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftLImm(Inst, Imm, 64);
}
static DecodeStatus DecodeVecShiftL32Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftLImm(Inst, Imm, 32);
}
static DecodeStatus DecodeVecShiftL16Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftLImm(Inst, Imm, 16);
}
static DecodeStatus DecodeVecShiftL8Imm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
return DecodeVecShiftLImm(Inst, Imm, 8);
}
static DecodeStatus DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rm = fieldFromInstruction(insn, 16, 5);
unsigned shiftHi = fieldFromInstruction(insn, 22, 2);
unsigned shiftLo = fieldFromInstruction(insn, 10, 6);
unsigned shift = (shiftHi << 6) | shiftLo;
switch (Inst.getOpcode()) {
default:
return Fail;
case AArch64::ADDWrs:
case AArch64::ADDSWrs:
case AArch64::SUBWrs:
case AArch64::SUBSWrs:
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
LLVM_FALLTHROUGH;
case AArch64::ANDWrs:
case AArch64::ANDSWrs:
case AArch64::BICWrs:
case AArch64::BICSWrs:
case AArch64::ORRWrs:
case AArch64::ORNWrs:
case AArch64::EORWrs:
case AArch64::EONWrs: {
// if sf == '0' and imm6<5> == '1' then ReservedValue()
if (shiftLo >> 5 == 1)
return Fail;
DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
}
case AArch64::ADDXrs:
case AArch64::ADDSXrs:
case AArch64::SUBXrs:
case AArch64::SUBSXrs:
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
LLVM_FALLTHROUGH;
case AArch64::ANDXrs:
case AArch64::ANDSXrs:
case AArch64::BICXrs:
case AArch64::BICSXrs:
case AArch64::ORRXrs:
case AArch64::ORNXrs:
case AArch64::EORXrs:
case AArch64::EONXrs:
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
break;
}
Inst.addOperand(MCOperand::createImm(shift));
return Success;
}
static DecodeStatus DecodeMoveImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned imm = fieldFromInstruction(insn, 5, 16);
unsigned shift = fieldFromInstruction(insn, 21, 2);
shift <<= 4;
switch (Inst.getOpcode()) {
default:
return Fail;
case AArch64::MOVZWi:
case AArch64::MOVNWi:
case AArch64::MOVKWi:
if (shift & (1U << 5))
return Fail;
DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
break;
case AArch64::MOVZXi:
case AArch64::MOVNXi:
case AArch64::MOVKXi:
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
break;
}
if (Inst.getOpcode() == AArch64::MOVKWi ||
Inst.getOpcode() == AArch64::MOVKXi)
Inst.addOperand(Inst.getOperand(0));
Inst.addOperand(MCOperand::createImm(imm));
Inst.addOperand(MCOperand::createImm(shift));
return Success;
}
static DecodeStatus DecodeUnsignedLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned offset = fieldFromInstruction(insn, 10, 12);
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
switch (Inst.getOpcode()) {
default:
return Fail;
case AArch64::PRFMui:
// Rt is an immediate in prefetch.
Inst.addOperand(MCOperand::createImm(Rt));
break;
case AArch64::STRBBui:
case AArch64::LDRBBui:
case AArch64::LDRSBWui:
case AArch64::STRHHui:
case AArch64::LDRHHui:
case AArch64::LDRSHWui:
case AArch64::STRWui:
case AArch64::LDRWui:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRSBXui:
case AArch64::LDRSHXui:
case AArch64::LDRSWui:
case AArch64::STRXui:
case AArch64::LDRXui:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRQui:
case AArch64::STRQui:
DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRDui:
case AArch64::STRDui:
DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRSui:
case AArch64::STRSui:
DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRHui:
case AArch64::STRHui:
DecodeFPR16RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDRBui:
case AArch64::STRBui:
DecodeFPR8RegisterClass(Inst, Rt, Addr, Decoder);
break;
}
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
if (!Dis->tryAddingSymbolicOperand(Inst, offset, Addr, Fail, 0, 4))
Inst.addOperand(MCOperand::createImm(offset));
return Success;
}
static DecodeStatus DecodeSignedLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
int64_t offset = fieldFromInstruction(insn, 12, 9);
// offset is a 9-bit signed immediate, so sign extend it to
// fill the unsigned.
if (offset & (1 << (9 - 1)))
offset |= ~((1LL << 9) - 1);
// First operand is always the writeback to the address register, if needed.
switch (Inst.getOpcode()) {
default:
break;
case AArch64::LDRSBWpre:
case AArch64::LDRSHWpre:
case AArch64::STRBBpre:
case AArch64::LDRBBpre:
case AArch64::STRHHpre:
case AArch64::LDRHHpre:
case AArch64::STRWpre:
case AArch64::LDRWpre:
case AArch64::LDRSBWpost:
case AArch64::LDRSHWpost:
case AArch64::STRBBpost:
case AArch64::LDRBBpost:
case AArch64::STRHHpost:
case AArch64::LDRHHpost:
case AArch64::STRWpost:
case AArch64::LDRWpost:
case AArch64::LDRSBXpre:
case AArch64::LDRSHXpre:
case AArch64::STRXpre:
case AArch64::LDRSWpre:
case AArch64::LDRXpre:
case AArch64::LDRSBXpost:
case AArch64::LDRSHXpost:
case AArch64::STRXpost:
case AArch64::LDRSWpost:
case AArch64::LDRXpost:
case AArch64::LDRQpre:
case AArch64::STRQpre:
case AArch64::LDRQpost:
case AArch64::STRQpost:
case AArch64::LDRDpre:
case AArch64::STRDpre:
case AArch64::LDRDpost:
case AArch64::STRDpost:
case AArch64::LDRSpre:
case AArch64::STRSpre:
case AArch64::LDRSpost:
case AArch64::STRSpost:
case AArch64::LDRHpre:
case AArch64::STRHpre:
case AArch64::LDRHpost:
case AArch64::STRHpost:
case AArch64::LDRBpre:
case AArch64::STRBpre:
case AArch64::LDRBpost:
case AArch64::STRBpost:
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
break;
}
switch (Inst.getOpcode()) {
default:
return Fail;
case AArch64::PRFUMi:
// Rt is an immediate in prefetch.
Inst.addOperand(MCOperand::createImm(Rt));
break;
case AArch64::STURBBi:
case AArch64::LDURBBi:
case AArch64::LDURSBWi:
case AArch64::STURHHi:
case AArch64::LDURHHi:
case AArch64::LDURSHWi:
case AArch64::STURWi:
case AArch64::LDURWi:
case AArch64::LDTRSBWi:
case AArch64::LDTRSHWi:
case AArch64::STTRWi:
case AArch64::LDTRWi:
case AArch64::STTRHi:
case AArch64::LDTRHi:
case AArch64::LDTRBi:
case AArch64::STTRBi:
case AArch64::LDRSBWpre:
case AArch64::LDRSHWpre:
case AArch64::STRBBpre:
case AArch64::LDRBBpre:
case AArch64::STRHHpre:
case AArch64::LDRHHpre:
case AArch64::STRWpre:
case AArch64::LDRWpre:
case AArch64::LDRSBWpost:
case AArch64::LDRSHWpost:
case AArch64::STRBBpost:
case AArch64::LDRBBpost:
case AArch64::STRHHpost:
case AArch64::LDRHHpost:
case AArch64::STRWpost:
case AArch64::LDRWpost:
case AArch64::STLURBi:
case AArch64::STLURHi:
case AArch64::STLURWi:
case AArch64::LDAPURBi:
case AArch64::LDAPURSBWi:
case AArch64::LDAPURHi:
case AArch64::LDAPURSHWi:
case AArch64::LDAPURi:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURSBXi:
case AArch64::LDURSHXi:
case AArch64::LDURSWi:
case AArch64::STURXi:
case AArch64::LDURXi:
case AArch64::LDTRSBXi:
case AArch64::LDTRSHXi:
case AArch64::LDTRSWi:
case AArch64::STTRXi:
case AArch64::LDTRXi:
case AArch64::LDRSBXpre:
case AArch64::LDRSHXpre:
case AArch64::STRXpre:
case AArch64::LDRSWpre:
case AArch64::LDRXpre:
case AArch64::LDRSBXpost:
case AArch64::LDRSHXpost:
case AArch64::STRXpost:
case AArch64::LDRSWpost:
case AArch64::LDRXpost:
case AArch64::LDAPURSWi:
case AArch64::LDAPURSHXi:
case AArch64::LDAPURSBXi:
case AArch64::STLURXi:
case AArch64::LDAPURXi:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURQi:
case AArch64::STURQi:
case AArch64::LDRQpre:
case AArch64::STRQpre:
case AArch64::LDRQpost:
case AArch64::STRQpost:
DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURDi:
case AArch64::STURDi:
case AArch64::LDRDpre:
case AArch64::STRDpre:
case AArch64::LDRDpost:
case AArch64::STRDpost:
DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURSi:
case AArch64::STURSi:
case AArch64::LDRSpre:
case AArch64::STRSpre:
case AArch64::LDRSpost:
case AArch64::STRSpost:
DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURHi:
case AArch64::STURHi:
case AArch64::LDRHpre:
case AArch64::STRHpre:
case AArch64::LDRHpost:
case AArch64::STRHpost:
DecodeFPR16RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::LDURBi:
case AArch64::STURBi:
case AArch64::LDRBpre:
case AArch64::STRBpre:
case AArch64::LDRBpost:
case AArch64::STRBpost:
DecodeFPR8RegisterClass(Inst, Rt, Addr, Decoder);
break;
}
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(offset));
bool IsLoad = fieldFromInstruction(insn, 22, 1);
bool IsIndexed = fieldFromInstruction(insn, 10, 2) != 0;
bool IsFP = fieldFromInstruction(insn, 26, 1);
// Cannot write back to a transfer register (but xzr != sp).
if (IsLoad && IsIndexed && !IsFP && Rn != 31 && Rt == Rn)
return SoftFail;
return Success;
}
static DecodeStatus DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rt2 = fieldFromInstruction(insn, 10, 5);
unsigned Rs = fieldFromInstruction(insn, 16, 5);
unsigned Opcode = Inst.getOpcode();
switch (Opcode) {
default:
return Fail;
case AArch64::STLXRW:
case AArch64::STLXRB:
case AArch64::STLXRH:
case AArch64::STXRW:
case AArch64::STXRB:
case AArch64::STXRH:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
case AArch64::LDARW:
case AArch64::LDARB:
case AArch64::LDARH:
case AArch64::LDAXRW:
case AArch64::LDAXRB:
case AArch64::LDAXRH:
case AArch64::LDXRW:
case AArch64::LDXRB:
case AArch64::LDXRH:
case AArch64::STLRW:
case AArch64::STLRB:
case AArch64::STLRH:
case AArch64::STLLRW:
case AArch64::STLLRB:
case AArch64::STLLRH:
case AArch64::LDLARW:
case AArch64::LDLARB:
case AArch64::LDLARH:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::STLXRX:
case AArch64::STXRX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
case AArch64::LDARX:
case AArch64::LDAXRX:
case AArch64::LDXRX:
case AArch64::STLRX:
case AArch64::LDLARX:
case AArch64::STLLRX:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case AArch64::STLXPW:
case AArch64::STXPW:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
case AArch64::LDAXPW:
case AArch64::LDXPW:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
case AArch64::STLXPX:
case AArch64::STXPX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
case AArch64::LDAXPX:
case AArch64::LDXPX:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rt2, Addr, Decoder);
break;
}
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
// You shouldn't load to the same register twice in an instruction...
if ((Opcode == AArch64::LDAXPW || Opcode == AArch64::LDXPW ||
Opcode == AArch64::LDAXPX || Opcode == AArch64::LDXPX) &&
Rt == Rt2)
return SoftFail;
return Success;
}
static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rt2 = fieldFromInstruction(insn, 10, 5);
int64_t offset = fieldFromInstruction(insn, 15, 7);
bool IsLoad = fieldFromInstruction(insn, 22, 1);
// offset is a 7-bit signed immediate, so sign extend it to
// fill the unsigned.
if (offset & (1 << (7 - 1)))
offset |= ~((1LL << 7) - 1);
unsigned Opcode = Inst.getOpcode();
bool NeedsDisjointWritebackTransfer = false;
// First operand is always writeback of base register.
switch (Opcode) {
default:
break;
case AArch64::LDPXpost:
case AArch64::STPXpost:
case AArch64::LDPSWpost:
case AArch64::LDPXpre:
case AArch64::STPXpre:
case AArch64::LDPSWpre:
case AArch64::LDPWpost:
case AArch64::STPWpost:
case AArch64::LDPWpre:
case AArch64::STPWpre:
case AArch64::LDPQpost:
case AArch64::STPQpost:
case AArch64::LDPQpre:
case AArch64::STPQpre:
case AArch64::LDPDpost:
case AArch64::STPDpost:
case AArch64::LDPDpre:
case AArch64::STPDpre:
case AArch64::LDPSpost:
case AArch64::STPSpost:
case AArch64::LDPSpre:
case AArch64::STPSpre:
case AArch64::STGPpre:
case AArch64::STGPpost:
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
break;
}
switch (Opcode) {
default:
return Fail;
case AArch64::LDPXpost:
case AArch64::STPXpost:
case AArch64::LDPSWpost:
case AArch64::LDPXpre:
case AArch64::STPXpre:
case AArch64::LDPSWpre:
case AArch64::STGPpre:
case AArch64::STGPpost:
NeedsDisjointWritebackTransfer = true;
LLVM_FALLTHROUGH;
case AArch64::LDNPXi:
case AArch64::STNPXi:
case AArch64::LDPXi:
case AArch64::STPXi:
case AArch64::LDPSWi:
case AArch64::STGPi:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rt2, Addr, Decoder);
break;
case AArch64::LDPWpost:
case AArch64::STPWpost:
case AArch64::LDPWpre:
case AArch64::STPWpre:
NeedsDisjointWritebackTransfer = true;
LLVM_FALLTHROUGH;
case AArch64::LDNPWi:
case AArch64::STNPWi:
case AArch64::LDPWi:
case AArch64::STPWi:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
case AArch64::LDNPQi:
case AArch64::STNPQi:
case AArch64::LDPQpost:
case AArch64::STPQpost:
case AArch64::LDPQi:
case AArch64::STPQi:
case AArch64::LDPQpre:
case AArch64::STPQpre:
DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
DecodeFPR128RegisterClass(Inst, Rt2, Addr, Decoder);
break;
case AArch64::LDNPDi:
case AArch64::STNPDi:
case AArch64::LDPDpost:
case AArch64::STPDpost:
case AArch64::LDPDi:
case AArch64::STPDi:
case AArch64::LDPDpre:
case AArch64::STPDpre:
DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
DecodeFPR64RegisterClass(Inst, Rt2, Addr, Decoder);
break;
case AArch64::LDNPSi:
case AArch64::STNPSi:
case AArch64::LDPSpost:
case AArch64::STPSpost:
case AArch64::LDPSi:
case AArch64::STPSi:
case AArch64::LDPSpre:
case AArch64::STPSpre:
DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
DecodeFPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
}
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(offset));
// You shouldn't load to the same register twice in an instruction...
if (IsLoad && Rt == Rt2)
return SoftFail;
// ... or do any operation that writes-back to a transfer register. But note
// that "stp xzr, xzr, [sp], #4" is fine because xzr and sp are different.
if (NeedsDisjointWritebackTransfer && Rn != 31 && (Rt == Rn || Rt2 == Rn))
return SoftFail;
return Success;
}
static DecodeStatus DecodeAddSubERegInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rm = fieldFromInstruction(insn, 16, 5);
unsigned extend = fieldFromInstruction(insn, 10, 6);
unsigned shift = extend & 0x7;
if (shift > 4)
return Fail;
switch (Inst.getOpcode()) {
default:
return Fail;
case AArch64::ADDWrx:
case AArch64::SUBWrx:
DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
case AArch64::ADDSWrx:
case AArch64::SUBSWrx:
DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
case AArch64::ADDXrx:
case AArch64::SUBXrx:
DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
case AArch64::ADDSXrx:
case AArch64::SUBSXrx:
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
case AArch64::ADDXrx64:
case AArch64::SUBXrx64:
DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
break;
case AArch64::SUBSXrx64:
case AArch64::ADDSXrx64:
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
break;
}
Inst.addOperand(MCOperand::createImm(extend));
return Success;
}
static DecodeStatus DecodeLogicalImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Datasize = fieldFromInstruction(insn, 31, 1);
unsigned imm;
if (Datasize) {
if (Inst.getOpcode() == AArch64::ANDSXri)
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
else
DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder);
imm = fieldFromInstruction(insn, 10, 13);
if (!AArch64_AM::isValidDecodeLogicalImmediate(imm, 64))
return Fail;
} else {
if (Inst.getOpcode() == AArch64::ANDSWri)
DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
else
DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR32RegisterClass(Inst, Rn, Addr, Decoder);
imm = fieldFromInstruction(insn, 10, 12);
if (!AArch64_AM::isValidDecodeLogicalImmediate(imm, 32))
return Fail;
}
Inst.addOperand(MCOperand::createImm(imm));
return Success;
}
static DecodeStatus DecodeModImmInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned cmode = fieldFromInstruction(insn, 12, 4);
unsigned imm = fieldFromInstruction(insn, 16, 3) << 5;
imm |= fieldFromInstruction(insn, 5, 5);
if (Inst.getOpcode() == AArch64::MOVID)
DecodeFPR64RegisterClass(Inst, Rd, Addr, Decoder);
else
DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(imm));
switch (Inst.getOpcode()) {
default:
break;
case AArch64::MOVIv4i16:
case AArch64::MOVIv8i16:
case AArch64::MVNIv4i16:
case AArch64::MVNIv8i16:
case AArch64::MOVIv2i32:
case AArch64::MOVIv4i32:
case AArch64::MVNIv2i32:
case AArch64::MVNIv4i32:
Inst.addOperand(MCOperand::createImm((cmode & 6) << 2));
break;
case AArch64::MOVIv2s_msl:
case AArch64::MOVIv4s_msl:
case AArch64::MVNIv2s_msl:
case AArch64::MVNIv4s_msl:
Inst.addOperand(MCOperand::createImm(cmode & 1 ? 0x110 : 0x108));
break;
}
return Success;
}
static DecodeStatus DecodeModImmTiedInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned cmode = fieldFromInstruction(insn, 12, 4);
unsigned imm = fieldFromInstruction(insn, 16, 3) << 5;
imm |= fieldFromInstruction(insn, 5, 5);
// Tied operands added twice.
DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(imm));
Inst.addOperand(MCOperand::createImm((cmode & 6) << 2));
return Success;
}
static DecodeStatus DecodeAdrInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr, const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
int64_t imm = fieldFromInstruction(insn, 5, 19) << 2;
imm |= fieldFromInstruction(insn, 29, 2);
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
// Sign-extend the 21-bit immediate.
if (imm & (1 << (21 - 1)))
imm |= ~((1LL << 21) - 1);
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
if (!Dis->tryAddingSymbolicOperand(Inst, imm, Addr, Fail, 0, 4))
Inst.addOperand(MCOperand::createImm(imm));
return Success;
}
static DecodeStatus DecodeAddSubImmShift(MCInst &Inst, uint32_t insn,
uint64_t Addr, const void *Decoder) {
unsigned Rd = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Imm = fieldFromInstruction(insn, 10, 14);
unsigned S = fieldFromInstruction(insn, 29, 1);
unsigned Datasize = fieldFromInstruction(insn, 31, 1);
unsigned ShifterVal = (Imm >> 12) & 3;
unsigned ImmVal = Imm & 0xFFF;
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
if (ShifterVal != 0 && ShifterVal != 1)
return Fail;
if (Datasize) {
if (Rd == 31 && !S)
DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
else
DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
} else {
if (Rd == 31 && !S)
DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
else
DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
}
if (!Dis->tryAddingSymbolicOperand(Inst, Imm, Addr, Fail, 0, 4))
Inst.addOperand(MCOperand::createImm(ImmVal));
Inst.addOperand(MCOperand::createImm(12 * ShifterVal));
return Success;
}
static DecodeStatus DecodeUnconditionalBranch(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
int64_t imm = fieldFromInstruction(insn, 0, 26);
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
// Sign-extend the 26-bit immediate.
if (imm & (1 << (26 - 1)))
imm |= ~((1LL << 26) - 1);
if (!Dis->tryAddingSymbolicOperand(Inst, imm * 4, Addr, true, 0, 4))
Inst.addOperand(MCOperand::createImm(imm));
return Success;
}
static DecodeStatus DecodeSystemPStateInstruction(MCInst &Inst, uint32_t insn,
uint64_t Addr,
const void *Decoder) {
uint64_t op1 = fieldFromInstruction(insn, 16, 3);
uint64_t op2 = fieldFromInstruction(insn, 5, 3);
uint64_t crm = fieldFromInstruction(insn, 8, 4);
uint64_t pstate_field = (op1 << 3) | op2;
switch (pstate_field) {
case 0x01: // XAFlag
case 0x02: // AXFlag
return Fail;
}
if ((pstate_field == AArch64PState::PAN ||
pstate_field == AArch64PState::UAO ||
pstate_field == AArch64PState::SSBS) && crm > 1)
return Fail;
Inst.addOperand(MCOperand::createImm(pstate_field));
Inst.addOperand(MCOperand::createImm(crm));
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
auto PState = AArch64PState::lookupPStateByEncoding(pstate_field);
if (PState && PState->haveFeatures(Dis->getSubtargetInfo().getFeatureBits()))
return Success;
return Fail;
}
static DecodeStatus DecodeTestAndBranch(MCInst &Inst, uint32_t insn,
uint64_t Addr, const void *Decoder) {
uint64_t Rt = fieldFromInstruction(insn, 0, 5);
uint64_t bit = fieldFromInstruction(insn, 31, 1) << 5;
bit |= fieldFromInstruction(insn, 19, 5);
int64_t dst = fieldFromInstruction(insn, 5, 14);
const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
// Sign-extend 14-bit immediate.
if (dst & (1 << (14 - 1)))
dst |= ~((1LL << 14) - 1);
if (fieldFromInstruction(insn, 31, 1) == 0)
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
else
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(bit));
if (!Dis->tryAddingSymbolicOperand(Inst, dst * 4, Addr, true, 0, 4))
Inst.addOperand(MCOperand::createImm(dst));
return Success;
}
static DecodeStatus DecodeGPRSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegClassID,
unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
// Register number must be even (see CASP instruction)
if (RegNo & 0x1)
return Fail;
- unsigned Register = AArch64MCRegisterClasses[RegClassID].getRegister(RegNo);
- Inst.addOperand(MCOperand::createReg(Register));
+ unsigned Reg = AArch64MCRegisterClasses[RegClassID].getRegister(RegNo / 2);
+ Inst.addOperand(MCOperand::createReg(Reg));
return Success;
}
static DecodeStatus DecodeWSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
return DecodeGPRSeqPairsClassRegisterClass(Inst,
AArch64::WSeqPairsClassRegClassID,
RegNo, Addr, Decoder);
}
static DecodeStatus DecodeXSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
return DecodeGPRSeqPairsClassRegisterClass(Inst,
AArch64::XSeqPairsClassRegClassID,
RegNo, Addr, Decoder);
}
static DecodeStatus DecodeSVELogicalImmInstruction(llvm::MCInst &Inst,
uint32_t insn,
uint64_t Addr,
const void *Decoder) {
unsigned Zdn = fieldFromInstruction(insn, 0, 5);
unsigned imm = fieldFromInstruction(insn, 5, 13);
if (!AArch64_AM::isValidDecodeLogicalImmediate(imm, 64))
return Fail;
// The same (tied) operand is added twice to the instruction.
DecodeZPRRegisterClass(Inst, Zdn, Addr, Decoder);
if (Inst.getOpcode() != AArch64::DUPM_ZI)
DecodeZPRRegisterClass(Inst, Zdn, Addr, Decoder);
Inst.addOperand(MCOperand::createImm(imm));
return Success;
}
template<int Bits>
static DecodeStatus DecodeSImm(llvm::MCInst &Inst, uint64_t Imm,
uint64_t Address, const void *Decoder) {
if (Imm & ~((1LL << Bits) - 1))
return Fail;
// Imm is a signed immediate, so sign extend it.
if (Imm & (1 << (Bits - 1)))
Imm |= ~((1LL << Bits) - 1);
Inst.addOperand(MCOperand::createImm(Imm));
return Success;
}
// Decode 8-bit signed/unsigned immediate for a given element width.
template <int ElementWidth>
static DecodeStatus DecodeImm8OptLsl(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
unsigned Val = (uint8_t)Imm;
unsigned Shift = (Imm & 0x100) ? 8 : 0;
if (ElementWidth == 8 && Shift)
return Fail;
Inst.addOperand(MCOperand::createImm(Val));
Inst.addOperand(MCOperand::createImm(Shift));
return Success;
}
// Decode uimm4 ranged from 1-16.
static DecodeStatus DecodeSVEIncDecImm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder) {
Inst.addOperand(MCOperand::createImm(Imm + 1));
return Success;
}
static DecodeStatus DecodeLoadAllocTagArrayInstruction(MCInst &Inst,
uint32_t insn,
uint64_t address,
const void* Decoder) {
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rt = fieldFromInstruction(insn, 0, 5);
// Outputs
DecodeGPR64spRegisterClass(Inst, Rn, address, Decoder);
DecodeGPR64RegisterClass(Inst, Rt, address, Decoder);
// Input (Rn again)
Inst.addOperand(Inst.getOperand(0));
//Do this post decode since the raw number for xzr and sp is the same
if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
return SoftFail;
} else {
return Success;
}
}
Index: vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZISelLowering.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZISelLowering.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZISelLowering.cpp (revision 344166)
@@ -1,7406 +1,7447 @@
//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SystemZTargetLowering class.
//
//===----------------------------------------------------------------------===//
#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
#include "SystemZMachineFunctionInfo.h"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/KnownBits.h"
#include <cctype>
using namespace llvm;
#define DEBUG_TYPE "systemz-lower"
namespace {
// Represents information about a comparison.
struct Comparison {
Comparison(SDValue Op0In, SDValue Op1In)
: Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
// The operands to the comparison.
SDValue Op0, Op1;
// The opcode that should be used to compare Op0 and Op1.
unsigned Opcode;
// A SystemZICMP value. Only used for integer comparisons.
unsigned ICmpType;
// The mask of CC values that Opcode can produce.
unsigned CCValid;
// The mask of CC values for which the original condition is true.
unsigned CCMask;
};
} // end anonymous namespace
// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::i32:
return true;
case MVT::i64:
return false;
default:
llvm_unreachable("Unsupported type");
}
}
// Return a version of MachineOperand that can be safely used before the
// final use.
static MachineOperand earlyUseOperand(MachineOperand Op) {
if (Op.isReg())
Op.setIsKill(false);
return Op;
}
SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
const SystemZSubtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
// Set up the register classes.
if (Subtarget.hasHighWord())
addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
else
addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
if (Subtarget.hasVector()) {
addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
} else {
addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
}
if (Subtarget.hasVectorEnhancements1())
addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
else
addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
if (Subtarget.hasVector()) {
addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
}
// Compute derived properties from the register classes
computeRegisterProperties(Subtarget.getRegisterInfo());
// Set up special registers.
setStackPointerRegisterToSaveRestore(SystemZ::R15D);
// TODO: It may be better to default to latency-oriented scheduling, however
// LLVM's current latency-oriented scheduler can't handle physreg definitions
// such as SystemZ has with CC, so set this to the register-pressure
// scheduler, because it can.
setSchedulingPreference(Sched::RegPressure);
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Instructions are strings of 2-byte aligned 2-byte values.
setMinFunctionAlignment(2);
// For performance reasons we prefer 16-byte alignment.
setPrefFunctionAlignment(4);
// Handle operations that are handled in a similar way for all types.
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
I <= MVT::LAST_FP_VALUETYPE;
++I) {
MVT VT = MVT::SimpleValueType(I);
if (isTypeLegal(VT)) {
// Lower SET_CC into an IPM-based sequence.
setOperationAction(ISD::SETCC, VT, Custom);
// Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
setOperationAction(ISD::SELECT, VT, Expand);
// Lower SELECT_CC and BR_CC into separate comparisons and branches.
setOperationAction(ISD::SELECT_CC, VT, Custom);
setOperationAction(ISD::BR_CC, VT, Custom);
}
}
// Expand jump table branches as address arithmetic followed by an
// indirect jump.
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
// Expand BRCOND into a BR_CC (see above).
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
// Handle integer types.
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
I <= MVT::LAST_INTEGER_VALUETYPE;
++I) {
MVT VT = MVT::SimpleValueType(I);
if (isTypeLegal(VT)) {
// Expand individual DIV and REMs into DIVREMs.
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Custom);
setOperationAction(ISD::UDIVREM, VT, Custom);
// Support addition/subtraction with overflow.
setOperationAction(ISD::SADDO, VT, Custom);
setOperationAction(ISD::SSUBO, VT, Custom);
// Support addition/subtraction with carry.
setOperationAction(ISD::UADDO, VT, Custom);
setOperationAction(ISD::USUBO, VT, Custom);
// Support carry in as value rather than glue.
setOperationAction(ISD::ADDCARRY, VT, Custom);
setOperationAction(ISD::SUBCARRY, VT, Custom);
// Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
// stores, putting a serialization instruction after the stores.
setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
// Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
// available, or if the operand is constant.
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
// Use POPCNT on z196 and above.
if (Subtarget.hasPopulationCount())
setOperationAction(ISD::CTPOP, VT, Custom);
else
setOperationAction(ISD::CTPOP, VT, Expand);
// No special instructions for these.
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
// Use *MUL_LOHI where possible instead of MULH*.
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Custom);
setOperationAction(ISD::UMUL_LOHI, VT, Custom);
// Only z196 and above have native support for conversions to unsigned.
// On z10, promoting to i64 doesn't generate an inexact condition for
// values that are outside the i32 range but in the i64 range, so use
// the default expansion.
if (!Subtarget.hasFPExtension())
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
}
}
// Type legalization will convert 8- and 16-bit atomic operations into
// forms that operate on i32s (but still keeping the original memory VT).
// Lower them into full i32 operations.
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
// Even though i128 is not a legal type, we still need to custom lower
// the atomic operations in order to exploit SystemZ instructions.
setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
// We can use the CC result of compare-and-swap to implement
// the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
// Traps are legal, as we will convert them to "j .+2".
setOperationAction(ISD::TRAP, MVT::Other, Legal);
// z10 has instructions for signed but not unsigned FP conversion.
// Handle unsigned 32-bit types as signed 64-bit types.
if (!Subtarget.hasFPExtension()) {
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
}
// We have native support for a 64-bit CTLZ, via FLOGR.
setOperationAction(ISD::CTLZ, MVT::i32, Promote);
setOperationAction(ISD::CTLZ, MVT::i64, Legal);
// Give LowerOperation the chance to replace 64-bit ORs with subregs.
setOperationAction(ISD::OR, MVT::i64, Custom);
// FIXME: Can we support these natively?
setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
// We have native instructions for i8, i16 and i32 extensions, but not i1.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
}
// Handle the various types of symbolic address.
setOperationAction(ISD::ConstantPool, PtrVT, Custom);
setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
setOperationAction(ISD::BlockAddress, PtrVT, Custom);
setOperationAction(ISD::JumpTable, PtrVT, Custom);
// We need to handle dynamic allocations specially because of the
// 160-byte area at the bottom of the stack.
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
// Use custom expanders so that we can force the function to use
// a frame pointer.
setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
// Handle prefetches with PFD or PFDRL.
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
for (MVT VT : MVT::vector_valuetypes()) {
// Assume by default that all vector operations need to be expanded.
for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
if (getOperationAction(Opcode, VT) == Legal)
setOperationAction(Opcode, VT, Expand);
// Likewise all truncating stores and extending loads.
for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
}
if (isTypeLegal(VT)) {
// These operations are legal for anything that can be stored in a
// vector register, even if there is no native support for the format
// as such. In particular, we can do these for v4f32 even though there
// are no specific instructions for that format.
setOperationAction(ISD::LOAD, VT, Legal);
setOperationAction(ISD::STORE, VT, Legal);
setOperationAction(ISD::VSELECT, VT, Legal);
setOperationAction(ISD::BITCAST, VT, Legal);
setOperationAction(ISD::UNDEF, VT, Legal);
// Likewise, except that we need to replace the nodes with something
// more specific.
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
}
}
// Handle integer vector types.
for (MVT VT : MVT::integer_vector_valuetypes()) {
if (isTypeLegal(VT)) {
// These operations have direct equivalents.
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
setOperationAction(ISD::ADD, VT, Legal);
setOperationAction(ISD::SUB, VT, Legal);
if (VT != MVT::v2i64)
setOperationAction(ISD::MUL, VT, Legal);
setOperationAction(ISD::AND, VT, Legal);
setOperationAction(ISD::OR, VT, Legal);
setOperationAction(ISD::XOR, VT, Legal);
if (Subtarget.hasVectorEnhancements1())
setOperationAction(ISD::CTPOP, VT, Legal);
else
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Legal);
setOperationAction(ISD::CTLZ, VT, Legal);
// Convert a GPR scalar to a vector by inserting it into element 0.
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
// Use a series of unpacks for extensions.
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
// Detect shifts by a scalar amount and convert them into
// V*_BY_SCALAR.
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::SRL, VT, Custom);
// At present ROTL isn't matched by DAGCombiner. ROTR should be
// converted into ROTL.
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
// Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
// and inverting the result as necessary.
setOperationAction(ISD::SETCC, VT, Custom);
}
}
if (Subtarget.hasVector()) {
// There should be no need to check for float types other than v2f64
// since <2 x f32> isn't a legal type.
setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
}
// Handle floating-point types.
for (unsigned I = MVT::FIRST_FP_VALUETYPE;
I <= MVT::LAST_FP_VALUETYPE;
++I) {
MVT VT = MVT::SimpleValueType(I);
if (isTypeLegal(VT)) {
// We can use FI for FRINT.
setOperationAction(ISD::FRINT, VT, Legal);
// We can use the extended form of FI for other rounding operations.
if (Subtarget.hasFPExtension()) {
setOperationAction(ISD::FNEARBYINT, VT, Legal);
setOperationAction(ISD::FFLOOR, VT, Legal);
setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::FTRUNC, VT, Legal);
setOperationAction(ISD::FROUND, VT, Legal);
}
// No special instructions for these.
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
}
}
// Handle floating-point vector types.
if (Subtarget.hasVector()) {
// Scalar-to-vector conversion is just a subreg.
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
// Some insertions and extractions can be done directly but others
// need to go via integers.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
// These operations have direct equivalents.
setOperationAction(ISD::FADD, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::FMA, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
setOperationAction(ISD::FABS, MVT::v2f64, Legal);
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
}
// The vector enhancements facility 1 has instructions for these.
if (Subtarget.hasVectorEnhancements1()) {
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::FMA, MVT::v4f32, Legal);
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::FABS, MVT::v4f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal);
setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
setOperationAction(ISD::FMINIMUM, MVT::f64, Legal);
setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal);
setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal);
setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal);
setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
setOperationAction(ISD::FMINIMUM, MVT::f128, Legal);
}
// We have fused multiply-addition for f32 and f64 but not f128.
setOperationAction(ISD::FMA, MVT::f32, Legal);
setOperationAction(ISD::FMA, MVT::f64, Legal);
if (Subtarget.hasVectorEnhancements1())
setOperationAction(ISD::FMA, MVT::f128, Legal);
else
setOperationAction(ISD::FMA, MVT::f128, Expand);
// We don't have a copysign instruction on vector registers.
if (Subtarget.hasVectorEnhancements1())
setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
// Needed so that we don't try to implement f128 constant loads using
// a load-and-extend of a f80 constant (in cases where the constant
// would fit in an f80).
for (MVT VT : MVT::fp_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
// We don't have extending load instruction on vector registers.
if (Subtarget.hasVectorEnhancements1()) {
setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
}
// Floating-point truncation and stores need to be done separately.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setTruncStoreAction(MVT::f128, MVT::f32, Expand);
setTruncStoreAction(MVT::f128, MVT::f64, Expand);
// We have 64-bit FPR<->GPR moves, but need special handling for
// 32-bit forms.
if (!Subtarget.hasVector()) {
setOperationAction(ISD::BITCAST, MVT::i32, Custom);
setOperationAction(ISD::BITCAST, MVT::f32, Custom);
}
// VASTART and VACOPY need to deal with the SystemZ-specific varargs
// structure, but VAEND is a no-op.
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
// Codes for which we want to perform some z-specific combinations.
setTargetDAGCombine(ISD::ZERO_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::FP_ROUND);
setTargetDAGCombine(ISD::FP_EXTEND);
setTargetDAGCombine(ISD::BSWAP);
setTargetDAGCombine(ISD::SDIV);
setTargetDAGCombine(ISD::UDIV);
setTargetDAGCombine(ISD::SREM);
setTargetDAGCombine(ISD::UREM);
// Handle intrinsics.
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
// We want to use MVC in preference to even a single load/store pair.
MaxStoresPerMemcpy = 0;
MaxStoresPerMemcpyOptSize = 0;
// The main memset sequence is a byte store followed by an MVC.
// Two STC or MV..I stores win over that, but the kind of fused stores
// generated by target-independent code don't when the byte value is
// variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
// than "STC;MVC". Handle the choice in target-specific code instead.
MaxStoresPerMemset = 0;
MaxStoresPerMemsetOptSize = 0;
}
EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext &, EVT VT) const {
if (!VT.isVector())
return MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
VT = VT.getScalarType();
if (!VT.isSimple())
return false;
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
case MVT::f64:
return true;
case MVT::f128:
return Subtarget.hasVectorEnhancements1();
default:
break;
}
return false;
}
bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
// We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
return Imm.isZero() || Imm.isNegZero();
}
bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
// We can use CGFI or CLGFI.
return isInt<32>(Imm) || isUInt<32>(Imm);
}
bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
// We can use ALGFI or SLGFI.
return isUInt<32>(Imm) || isUInt<32>(-Imm);
}
bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned,
unsigned,
bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
// they are required.
if (Fast)
*Fast = true;
return true;
}
// Information about the addressing mode for a memory access.
struct AddressingMode {
// True if a long displacement is supported.
bool LongDisplacement;
// True if use of index register is supported.
bool IndexReg;
AddressingMode(bool LongDispl, bool IdxReg) :
LongDisplacement(LongDispl), IndexReg(IdxReg) {}
};
// Return the desired addressing mode for a Load which has only one use (in
// the same block) which is a Store.
static AddressingMode getLoadStoreAddrMode(bool HasVector,
Type *Ty) {
// With vector support a Load->Store combination may be combined to either
// an MVC or vector operations and it seems to work best to allow the
// vector addressing mode.
if (HasVector)
return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
// Otherwise only the MVC case is special.
bool MVC = Ty->isIntegerTy(8);
return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
}
// Return the addressing mode which seems most desirable given an LLVM
// Instruction pointer.
static AddressingMode
supportedAddressingMode(Instruction *I, bool HasVector) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
}
}
if (isa<LoadInst>(I) && I->hasOneUse()) {
auto *SingleUser = dyn_cast<Instruction>(*I->user_begin());
if (SingleUser->getParent() == I->getParent()) {
if (isa<ICmpInst>(SingleUser)) {
if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
if (C->getBitWidth() <= 64 &&
(isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
// Comparison of memory with 16 bit signed / unsigned immediate
return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
} else if (isa<StoreInst>(SingleUser))
// Load->Store
return getLoadStoreAddrMode(HasVector, I->getType());
}
} else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
// Load->Store
return getLoadStoreAddrMode(HasVector, LoadI->getType());
}
if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
// * Use LDE instead of LE/LEY for z13 to avoid partial register
// dependencies (LDE only supports small offsets).
// * Utilize the vector registers to hold floating point
// values (vector load / store instructions only support small
// offsets).
Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
I->getOperand(0)->getType());
bool IsFPAccess = MemAccessTy->isFloatingPointTy();
bool IsVectorAccess = MemAccessTy->isVectorTy();
// A store of an extracted vector element will be combined into a VSTE type
// instruction.
if (!IsVectorAccess && isa<StoreInst>(I)) {
Value *DataOp = I->getOperand(0);
if (isa<ExtractElementInst>(DataOp))
IsVectorAccess = true;
}
// A load which gets inserted into a vector element will be combined into a
// VLE type instruction.
if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
User *LoadUser = *I->user_begin();
if (isa<InsertElementInst>(LoadUser))
IsVectorAccess = true;
}
if (IsFPAccess || IsVectorAccess)
return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
}
return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
}
bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
// Punt on globals for now, although they can be used in limited
// RELATIVE LONG cases.
if (AM.BaseGV)
return false;
// Require a 20-bit signed offset.
if (!isInt<20>(AM.BaseOffs))
return false;
AddressingMode SupportedAM(true, true);
if (I != nullptr)
SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
return false;
if (!SupportedAM.IndexReg)
// No indexing allowed.
return AM.Scale == 0;
else
// Indexing is OK but no scale factor can be applied.
return AM.Scale == 0 || AM.Scale == 1;
}
bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
return false;
unsigned FromBits = FromType->getPrimitiveSizeInBits();
unsigned ToBits = ToType->getPrimitiveSizeInBits();
return FromBits > ToBits;
}
bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
if (!FromVT.isInteger() || !ToVT.isInteger())
return false;
unsigned FromBits = FromVT.getSizeInBits();
unsigned ToBits = ToVT.getSizeInBits();
return FromBits > ToBits;
}
//===----------------------------------------------------------------------===//
// Inline asm support
//===----------------------------------------------------------------------===//
TargetLowering::ConstraintType
SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'f': // Floating-point register
case 'h': // High-part register
case 'r': // General-purpose register
case 'v': // Vector register
return C_RegisterClass;
case 'Q': // Memory with base and unsigned 12-bit displacement
case 'R': // Likewise, plus an index
case 'S': // Memory with base and signed 20-bit displacement
case 'T': // Likewise, plus an index
case 'm': // Equivalent to 'T'.
return C_Memory;
case 'I': // Unsigned 8-bit constant
case 'J': // Unsigned 12-bit constant
case 'K': // Signed 16-bit constant
case 'L': // Signed 20-bit displacement (on all targets we support)
case 'M': // 0x7fffffff
return C_Other;
default:
break;
}
}
return TargetLowering::getConstraintType(Constraint);
}
TargetLowering::ConstraintWeight SystemZTargetLowering::
getSingleConstraintMatchWeight(AsmOperandInfo &info,
const char *constraint) const {
ConstraintWeight weight = CW_Invalid;
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
break;
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'h': // High-part register
case 'r': // General-purpose register
if (CallOperandVal->getType()->isIntegerTy())
weight = CW_Register;
break;
case 'f': // Floating-point register
if (type->isFloatingPointTy())
weight = CW_Register;
break;
case 'v': // Vector register
if ((type->isVectorTy() || type->isFloatingPointTy()) &&
Subtarget.hasVector())
weight = CW_Register;
break;
case 'I': // Unsigned 8-bit constant
if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<8>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'J': // Unsigned 12-bit constant
if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<12>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'K': // Signed 16-bit constant
if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<16>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'L': // Signed 20-bit displacement (on all targets we support)
if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<20>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'M': // 0x7fffffff
if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (C->getZExtValue() == 0x7fffffff)
weight = CW_Constant;
break;
}
return weight;
}
// Parse a "{tNNN}" register constraint for which the register type "t"
// has already been verified. MC is the class associated with "t" and
// Map maps 0-based register numbers to LLVM register numbers.
static std::pair<unsigned, const TargetRegisterClass *>
parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
const unsigned *Map, unsigned Size) {
assert(*(Constraint.end()-1) == '}' && "Missing '}'");
if (isdigit(Constraint[2])) {
unsigned Index;
bool Failed =
Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
if (!Failed && Index < Size && Map[Index])
return std::make_pair(Map[Index], RC);
}
return std::make_pair(0U, nullptr);
}
std::pair<unsigned, const TargetRegisterClass *>
SystemZTargetLowering::getRegForInlineAsmConstraint(
const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
default: break;
case 'd': // Data register (equivalent to 'r')
case 'r': // General-purpose register
if (VT == MVT::i64)
return std::make_pair(0U, &SystemZ::GR64BitRegClass);
else if (VT == MVT::i128)
return std::make_pair(0U, &SystemZ::GR128BitRegClass);
return std::make_pair(0U, &SystemZ::GR32BitRegClass);
case 'a': // Address register
if (VT == MVT::i64)
return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
else if (VT == MVT::i128)
return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
case 'h': // High-part register (an LLVM extension)
return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
case 'f': // Floating-point register
if (VT == MVT::f64)
return std::make_pair(0U, &SystemZ::FP64BitRegClass);
else if (VT == MVT::f128)
return std::make_pair(0U, &SystemZ::FP128BitRegClass);
return std::make_pair(0U, &SystemZ::FP32BitRegClass);
case 'v': // Vector register
if (Subtarget.hasVector()) {
if (VT == MVT::f32)
return std::make_pair(0U, &SystemZ::VR32BitRegClass);
if (VT == MVT::f64)
return std::make_pair(0U, &SystemZ::VR64BitRegClass);
return std::make_pair(0U, &SystemZ::VR128BitRegClass);
}
break;
}
}
if (Constraint.size() > 0 && Constraint[0] == '{') {
// We need to override the default register parsing for GPRs and FPRs
// because the interpretation depends on VT. The internal names of
// the registers are also different from the external names
// (F0D and F0S instead of F0, etc.).
if (Constraint[1] == 'r') {
if (VT == MVT::i32)
return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
SystemZMC::GR32Regs, 16);
if (VT == MVT::i128)
return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
SystemZMC::GR128Regs, 16);
return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
SystemZMC::GR64Regs, 16);
}
if (Constraint[1] == 'f') {
if (VT == MVT::f32)
return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
SystemZMC::FP32Regs, 16);
if (VT == MVT::f128)
return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
SystemZMC::FP128Regs, 16);
return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
SystemZMC::FP64Regs, 16);
}
if (Constraint[1] == 'v') {
if (VT == MVT::f32)
return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
SystemZMC::VR32Regs, 32);
if (VT == MVT::f64)
return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
SystemZMC::VR64Regs, 32);
return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
SystemZMC::VR128Regs, 32);
}
}
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
void SystemZTargetLowering::
LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
// Only support length 1 constraints for now.
if (Constraint.length() == 1) {
switch (Constraint[0]) {
case 'I': // Unsigned 8-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<8>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'J': // Unsigned 12-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<12>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'K': // Signed 16-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<16>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'L': // Signed 20-bit displacement (on all targets we support)
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<20>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'M': // 0x7fffffff
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (C->getZExtValue() == 0x7fffffff)
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
}
}
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
//===----------------------------------------------------------------------===//
// Calling conventions
//===----------------------------------------------------------------------===//
#include "SystemZGenCallingConv.inc"
const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
CallingConv::ID) const {
static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
SystemZ::R14D, 0 };
return ScratchRegs;
}
bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
Type *ToType) const {
return isTruncateFree(FromType, ToType);
}
bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
return CI->isTailCall();
}
// We do not yet support 128-bit single-element vector types. If the user
// attempts to use such types as function argument or return type, prefer
// to error out instead of emitting code violating the ABI.
static void VerifyVectorType(MVT VT, EVT ArgVT) {
if (ArgVT.isVector() && !VT.isVector())
report_fatal_error("Unsupported vector argument or return type");
}
static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
for (unsigned i = 0; i < Ins.size(); ++i)
VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
}
static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
for (unsigned i = 0; i < Outs.size(); ++i)
VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
}
// Value is a value that has been passed to us in the location described by VA
// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
// any loads onto Chain.
static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
CCValAssign &VA, SDValue Chain,
SDValue Value) {
// If the argument has been promoted from a smaller type, insert an
// assertion to capture this.
if (VA.getLocInfo() == CCValAssign::SExt)
Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::ZExt)
Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
DAG.getValueType(VA.getValVT()));
if (VA.isExtInLoc())
Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
else if (VA.getLocInfo() == CCValAssign::BCvt) {
// If this is a short vector argument loaded from the stack,
// extend from i64 to full vector size and then bitcast.
assert(VA.getLocVT() == MVT::i64);
assert(VA.getValVT().isVector());
Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
} else
assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
return Value;
}
// Value is a value of type VA.getValVT() that we need to copy into
// the location described by VA. Return a copy of Value converted to
// VA.getValVT(). The caller is responsible for handling indirect values.
static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
CCValAssign &VA, SDValue Value) {
switch (VA.getLocInfo()) {
case CCValAssign::SExt:
return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
case CCValAssign::ZExt:
return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
case CCValAssign::AExt:
return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
case CCValAssign::BCvt:
// If this is a short vector argument to be stored to the stack,
// bitcast to v2i64 and then extract first element.
assert(VA.getLocVT() == MVT::i64);
assert(VA.getValVT().isVector());
Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
DAG.getConstant(0, DL, MVT::i32));
case CCValAssign::Full:
return Value;
default:
llvm_unreachable("Unhandled getLocInfo()");
}
}
SDValue SystemZTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
SystemZMachineFunctionInfo *FuncInfo =
MF.getInfo<SystemZMachineFunctionInfo>();
auto *TFL =
static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Detect unsupported vector argument types.
if (Subtarget.hasVector())
VerifyVectorTypes(Ins);
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
unsigned NumFixedGPRs = 0;
unsigned NumFixedFPRs = 0;
for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
SDValue ArgValue;
CCValAssign &VA = ArgLocs[I];
EVT LocVT = VA.getLocVT();
if (VA.isRegLoc()) {
// Arguments passed in registers
const TargetRegisterClass *RC;
switch (LocVT.getSimpleVT().SimpleTy) {
default:
// Integers smaller than i64 should be promoted to i64.
llvm_unreachable("Unexpected argument type");
case MVT::i32:
NumFixedGPRs += 1;
RC = &SystemZ::GR32BitRegClass;
break;
case MVT::i64:
NumFixedGPRs += 1;
RC = &SystemZ::GR64BitRegClass;
break;
case MVT::f32:
NumFixedFPRs += 1;
RC = &SystemZ::FP32BitRegClass;
break;
case MVT::f64:
NumFixedFPRs += 1;
RC = &SystemZ::FP64BitRegClass;
break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
RC = &SystemZ::VR128BitRegClass;
break;
}
unsigned VReg = MRI.createVirtualRegister(RC);
MRI.addLiveIn(VA.getLocReg(), VReg);
ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
} else {
assert(VA.isMemLoc() && "Argument not register or memory");
// Create the frame index object for this incoming parameter.
int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
VA.getLocMemOffset(), true);
// Create the SelectionDAG nodes corresponding to a load
// from this parameter. Unpromoted ints and floats are
// passed as right-justified 8-byte values.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
DAG.getIntPtrConstant(4, DL));
ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
}
// Convert the value of the argument register into the value that's
// being passed.
if (VA.getLocInfo() == CCValAssign::Indirect) {
InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
MachinePointerInfo()));
// If the original argument was split (e.g. i128), we need
// to load all parts of it here (using the same address).
unsigned ArgIndex = Ins[I].OrigArgIndex;
assert (Ins[I].PartOffset == 0);
while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
CCValAssign &PartVA = ArgLocs[I + 1];
unsigned PartOffset = Ins[I + 1].PartOffset;
SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
DAG.getIntPtrConstant(PartOffset, DL));
InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
MachinePointerInfo()));
++I;
}
} else
InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
}
if (IsVarArg) {
// Save the number of non-varargs registers for later use by va_start, etc.
FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
// Likewise the address (in the form of a frame index) of where the
// first stack vararg would be. The 1-byte size here is arbitrary.
int64_t StackSize = CCInfo.getNextStackOffset();
FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
// ...and a similar frame index for the caller-allocated save area
// that will be used to store the incoming registers.
int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
// Store the FPR varargs in the reserved frame slots. (We store the
// GPRs as part of the prologue.)
if (NumFixedFPRs < SystemZ::NumArgFPRs) {
SDValue MemOps[SystemZ::NumArgFPRs];
for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
&SystemZ::FP64BitRegClass);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
}
// Join the stores, which are independent of one another.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
makeArrayRef(&MemOps[NumFixedFPRs],
SystemZ::NumArgFPRs-NumFixedFPRs));
}
}
return Chain;
}
static bool canUseSiblingCall(const CCState &ArgCCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs,
SmallVectorImpl<ISD::OutputArg> &Outs) {
// Punt if there are any indirect or stack arguments, or if the call
// needs the callee-saved argument register R6, or if the call uses
// the callee-saved register arguments SwiftSelf and SwiftError.
for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
CCValAssign &VA = ArgLocs[I];
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
if (!VA.isRegLoc())
return false;
unsigned Reg = VA.getLocReg();
if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
return false;
if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
return false;
}
return true;
}
SDValue
SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &DL = CLI.DL;
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
EVT PtrVT = getPointerTy(MF.getDataLayout());
// Detect unsupported vector argument and return types.
if (Subtarget.hasVector()) {
VerifyVectorTypes(Outs);
VerifyVectorTypes(Ins);
}
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// We don't support GuaranteedTailCallOpt, only automatically-detected
// sibling calls.
if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
IsTailCall = false;
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = ArgCCInfo.getNextStackOffset();
// Mark the start of the call.
if (!IsTailCall)
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
// Copy argument values to their designated locations.
SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
SDValue StackPtr;
for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
CCValAssign &VA = ArgLocs[I];
SDValue ArgValue = OutVals[I];
if (VA.getLocInfo() == CCValAssign::Indirect) {
// Store the argument in a stack slot and pass its address.
SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
MemOpChains.push_back(
DAG.getStore(Chain, DL, ArgValue, SpillSlot,
MachinePointerInfo::getFixedStack(MF, FI)));
// If the original argument was split (e.g. i128), we need
// to store all parts of it here (and pass just one address).
unsigned ArgIndex = Outs[I].OrigArgIndex;
assert (Outs[I].PartOffset == 0);
while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
SDValue PartValue = OutVals[I + 1];
unsigned PartOffset = Outs[I + 1].PartOffset;
SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
DAG.getIntPtrConstant(PartOffset, DL));
MemOpChains.push_back(
DAG.getStore(Chain, DL, PartValue, Address,
MachinePointerInfo::getFixedStack(MF, FI)));
++I;
}
ArgValue = SpillSlot;
} else
ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
if (VA.isRegLoc())
// Queue up the argument copies and emit them at the end.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
else {
assert(VA.isMemLoc() && "Argument not register or memory");
// Work out the address of the stack slot. Unpromoted ints and
// floats are passed as right-justified 8-byte values.
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
Offset += 4;
SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
DAG.getIntPtrConstant(Offset, DL));
// Emit the store.
MemOpChains.push_back(
DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
}
}
// Join the stores, which are independent of one another.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Accept direct calls by converting symbolic call addresses to the
// associated Target* opcodes. Force %r1 to be used for indirect
// tail calls.
SDValue Glue;
if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
} else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
} else if (IsTailCall) {
Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
Glue = Chain.getValue(1);
Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
}
// Build a sequence of copy-to-reg nodes, chained and glued together.
for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
RegsToPass[I].second, Glue);
Glue = Chain.getValue(1);
}
// The first call operand is the chain and the second is the target address.
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
// Add argument registers to the end of the list so that they are
// known live into the call.
for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
Ops.push_back(DAG.getRegister(RegsToPass[I].first,
RegsToPass[I].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
// Glue the call to the argument copies, if any.
if (Glue.getNode())
Ops.push_back(Glue);
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (IsTailCall)
return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getConstant(NumBytes, DL, PtrVT, true),
DAG.getConstant(0, DL, PtrVT, true),
Glue, DL);
Glue = Chain.getValue(1);
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RetLocs;
CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
CCValAssign &VA = RetLocs[I];
// Copy the value out, gluing the copy to the end of the call sequence.
SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
VA.getLocVT(), Glue);
Chain = RetValue.getValue(1);
Glue = RetValue.getValue(2);
// Convert the value of the return register into the value that's
// being returned.
InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
}
return Chain;
}
bool SystemZTargetLowering::
CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
// Detect unsupported vector return types.
if (Subtarget.hasVector())
VerifyVectorTypes(Outs);
// Special case that we cannot easily detect in RetCC_SystemZ since
// i128 is not a legal type.
for (auto &Out : Outs)
if (Out.ArgVT == MVT::i128)
return false;
SmallVector<CCValAssign, 16> RetLocs;
CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
}
SDValue
SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// Detect unsupported vector return types.
if (Subtarget.hasVector())
VerifyVectorTypes(Outs);
// Assign locations to each returned value.
SmallVector<CCValAssign, 16> RetLocs;
CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
// Quick exit for void returns
if (RetLocs.empty())
return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
// Copy the result values into the output registers.
SDValue Glue;
SmallVector<SDValue, 4> RetOps;
RetOps.push_back(Chain);
for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
CCValAssign &VA = RetLocs[I];
SDValue RetValue = OutVals[I];
// Make the return register live on exit.
assert(VA.isRegLoc() && "Can only return in registers!");
// Promote the value as required.
RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
// Chain and glue the copies together.
unsigned Reg = VA.getLocReg();
Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
}
// Update chain and glue.
RetOps[0] = Chain;
if (Glue.getNode())
RetOps.push_back(Glue);
return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
}
// Return true if Op is an intrinsic node with chain that returns the CC value
// as its only (other) argument. Provide the associated SystemZISD opcode and
// the mask of valid CC values if so.
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
unsigned &CCValid) {
unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (Id) {
case Intrinsic::s390_tbegin:
Opcode = SystemZISD::TBEGIN;
CCValid = SystemZ::CCMASK_TBEGIN;
return true;
case Intrinsic::s390_tbegin_nofloat:
Opcode = SystemZISD::TBEGIN_NOFLOAT;
CCValid = SystemZ::CCMASK_TBEGIN;
return true;
case Intrinsic::s390_tend:
Opcode = SystemZISD::TEND;
CCValid = SystemZ::CCMASK_TEND;
return true;
default:
return false;
}
}
// Return true if Op is an intrinsic node without chain that returns the
// CC value as its final argument. Provide the associated SystemZISD
// opcode and the mask of valid CC values if so.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
case Intrinsic::s390_vpkshs:
case Intrinsic::s390_vpksfs:
case Intrinsic::s390_vpksgs:
Opcode = SystemZISD::PACKS_CC;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vpklshs:
case Intrinsic::s390_vpklsfs:
case Intrinsic::s390_vpklsgs:
Opcode = SystemZISD::PACKLS_CC;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vceqbs:
case Intrinsic::s390_vceqhs:
case Intrinsic::s390_vceqfs:
case Intrinsic::s390_vceqgs:
Opcode = SystemZISD::VICMPES;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vchbs:
case Intrinsic::s390_vchhs:
case Intrinsic::s390_vchfs:
case Intrinsic::s390_vchgs:
Opcode = SystemZISD::VICMPHS;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vchlbs:
case Intrinsic::s390_vchlhs:
case Intrinsic::s390_vchlfs:
case Intrinsic::s390_vchlgs:
Opcode = SystemZISD::VICMPHLS;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vtm:
Opcode = SystemZISD::VTM;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vfaebs:
case Intrinsic::s390_vfaehs:
case Intrinsic::s390_vfaefs:
Opcode = SystemZISD::VFAE_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfaezbs:
case Intrinsic::s390_vfaezhs:
case Intrinsic::s390_vfaezfs:
Opcode = SystemZISD::VFAEZ_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfeebs:
case Intrinsic::s390_vfeehs:
case Intrinsic::s390_vfeefs:
Opcode = SystemZISD::VFEE_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfeezbs:
case Intrinsic::s390_vfeezhs:
case Intrinsic::s390_vfeezfs:
Opcode = SystemZISD::VFEEZ_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfenebs:
case Intrinsic::s390_vfenehs:
case Intrinsic::s390_vfenefs:
Opcode = SystemZISD::VFENE_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfenezbs:
case Intrinsic::s390_vfenezhs:
case Intrinsic::s390_vfenezfs:
Opcode = SystemZISD::VFENEZ_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vistrbs:
case Intrinsic::s390_vistrhs:
case Intrinsic::s390_vistrfs:
Opcode = SystemZISD::VISTR_CC;
CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
return true;
case Intrinsic::s390_vstrcbs:
case Intrinsic::s390_vstrchs:
case Intrinsic::s390_vstrcfs:
Opcode = SystemZISD::VSTRC_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vstrczbs:
case Intrinsic::s390_vstrczhs:
case Intrinsic::s390_vstrczfs:
Opcode = SystemZISD::VSTRCZ_CC;
CCValid = SystemZ::CCMASK_ANY;
return true;
case Intrinsic::s390_vfcedbs:
case Intrinsic::s390_vfcesbs:
Opcode = SystemZISD::VFCMPES;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vfchdbs:
case Intrinsic::s390_vfchsbs:
Opcode = SystemZISD::VFCMPHS;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vfchedbs:
case Intrinsic::s390_vfchesbs:
Opcode = SystemZISD::VFCMPHES;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_vftcidb:
case Intrinsic::s390_vftcisb:
Opcode = SystemZISD::VFTCI;
CCValid = SystemZ::CCMASK_VCMP;
return true;
case Intrinsic::s390_tdc:
Opcode = SystemZISD::TDC;
CCValid = SystemZ::CCMASK_TDC;
return true;
default:
return false;
}
}
// Emit an intrinsic with chain and an explicit CC register result.
static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
unsigned Opcode) {
// Copy all operands except the intrinsic ID.
unsigned NumOps = Op.getNumOperands();
SmallVector<SDValue, 6> Ops;
Ops.reserve(NumOps - 1);
Ops.push_back(Op.getOperand(0));
for (unsigned I = 2; I < NumOps; ++I)
Ops.push_back(Op.getOperand(I));
assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
SDValue OldChain = SDValue(Op.getNode(), 1);
SDValue NewChain = SDValue(Intr.getNode(), 1);
DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
return Intr.getNode();
}
// Emit an intrinsic with an explicit CC register result.
static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
unsigned Opcode) {
// Copy all operands except the intrinsic ID.
unsigned NumOps = Op.getNumOperands();
SmallVector<SDValue, 6> Ops;
Ops.reserve(NumOps - 1);
for (unsigned I = 1; I < NumOps; ++I)
Ops.push_back(Op.getOperand(I));
SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
return Intr.getNode();
}
// CC is a comparison that will be implemented using an integer or
// floating-point comparison. Return the condition code mask for
// a branch on true. In the integer case, CCMASK_CMP_UO is set for
// unsigned comparisons and clear for signed ones. In the floating-point
// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
static unsigned CCMaskForCondCode(ISD::CondCode CC) {
#define CONV(X) \
case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
switch (CC) {
default:
llvm_unreachable("Invalid integer condition!");
CONV(EQ);
CONV(NE);
CONV(GT);
CONV(GE);
CONV(LT);
CONV(LE);
case ISD::SETO: return SystemZ::CCMASK_CMP_O;
case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
}
#undef CONV
}
// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
if (C.ICmpType == SystemZICMP::UnsignedOnly)
return;
auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
if (!ConstOp1)
return;
int64_t Value = ConstOp1->getSExtValue();
if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
(Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
(Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
(Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
}
}
// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
// adjust the operands as necessary.
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
Comparison &C) {
// For us to make any changes, it must a comparison between a single-use
// load and a constant.
if (!C.Op0.hasOneUse() ||
C.Op0.getOpcode() != ISD::LOAD ||
C.Op1.getOpcode() != ISD::Constant)
return;
// We must have an 8- or 16-bit load.
auto *Load = cast<LoadSDNode>(C.Op0);
unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
if (NumBits != 8 && NumBits != 16)
return;
// The load must be an extending one and the constant must be within the
// range of the unextended value.
auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
uint64_t Value = ConstOp1->getZExtValue();
uint64_t Mask = (1 << NumBits) - 1;
if (Load->getExtensionType() == ISD::SEXTLOAD) {
// Make sure that ConstOp1 is in range of C.Op0.
int64_t SignedValue = ConstOp1->getSExtValue();
if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
return;
if (C.ICmpType != SystemZICMP::SignedOnly) {
// Unsigned comparison between two sign-extended values is equivalent
// to unsigned comparison between two zero-extended values.
Value &= Mask;
} else if (NumBits == 8) {
// Try to treat the comparison as unsigned, so that we can use CLI.
// Adjust CCMask and Value as necessary.
if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
// Test whether the high bit of the byte is set.
Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
// Test whether the high bit of the byte is clear.
Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
else
// No instruction exists for this combination.
return;
C.ICmpType = SystemZICMP::UnsignedOnly;
}
} else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
if (Value > Mask)
return;
// If the constant is in range, we can use any comparison.
C.ICmpType = SystemZICMP::Any;
} else
return;
// Make sure that the first operand is an i32 of the right extension type.
ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
ISD::SEXTLOAD :
ISD::ZEXTLOAD);
if (C.Op0.getValueType() != MVT::i32 ||
Load->getExtensionType() != ExtType) {
C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
Load->getBasePtr(), Load->getPointerInfo(),
Load->getMemoryVT(), Load->getAlignment(),
Load->getMemOperand()->getFlags());
// Update the chain uses.
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
}
// Make sure that the second operand is an i32 with the right value.
if (C.Op1.getValueType() != MVT::i32 ||
Value != ConstOp1->getZExtValue())
C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
}
// Return true if Op is either an unextended load, or a load suitable
// for integer register-memory comparisons of type ICmpType.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
if (Load) {
// There are no instructions to compare a register with a memory byte.
if (Load->getMemoryVT() == MVT::i8)
return false;
// Otherwise decide on extension type.
switch (Load->getExtensionType()) {
case ISD::NON_EXTLOAD:
return true;
case ISD::SEXTLOAD:
return ICmpType != SystemZICMP::UnsignedOnly;
case ISD::ZEXTLOAD:
return ICmpType != SystemZICMP::SignedOnly;
default:
break;
}
}
return false;
}
// Return true if it is better to swap the operands of C.
static bool shouldSwapCmpOperands(const Comparison &C) {
// Leave f128 comparisons alone, since they have no memory forms.
if (C.Op0.getValueType() == MVT::f128)
return false;
// Always keep a floating-point constant second, since comparisons with
// zero can use LOAD TEST and comparisons with other constants make a
// natural memory operand.
if (isa<ConstantFPSDNode>(C.Op1))
return false;
// Never swap comparisons with zero since there are many ways to optimize
// those later.
auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
if (ConstOp1 && ConstOp1->getZExtValue() == 0)
return false;
// Also keep natural memory operands second if the loaded value is
// only used here. Several comparisons have memory forms.
if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
return false;
// Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
// In that case we generally prefer the memory to be second.
if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
// The only exceptions are when the second operand is a constant and
// we can use things like CHHSI.
if (!ConstOp1)
return true;
// The unsigned memory-immediate instructions can handle 16-bit
// unsigned integers.
if (C.ICmpType != SystemZICMP::SignedOnly &&
isUInt<16>(ConstOp1->getZExtValue()))
return false;
// The signed memory-immediate instructions can handle 16-bit
// signed integers.
if (C.ICmpType != SystemZICMP::UnsignedOnly &&
isInt<16>(ConstOp1->getSExtValue()))
return false;
return true;
}
// Try to promote the use of CGFR and CLGFR.
unsigned Opcode0 = C.Op0.getOpcode();
if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
return true;
if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
return true;
if (C.ICmpType != SystemZICMP::SignedOnly &&
Opcode0 == ISD::AND &&
C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
return true;
return false;
}
// Return a version of comparison CC mask CCMask in which the LT and GT
// actions are swapped.
static unsigned reverseCCMask(unsigned CCMask) {
return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
(CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
(CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
(CCMask & SystemZ::CCMASK_CMP_UO));
}
// Check whether C tests for equality between X and Y and whether X - Y
// or Y - X is also computed. In that case it's better to compare the
// result of the subtraction against zero.
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
Comparison &C) {
if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
C.CCMask == SystemZ::CCMASK_CMP_NE) {
for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
SDNode *N = *I;
if (N->getOpcode() == ISD::SUB &&
((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
(N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
C.Op0 = SDValue(N, 0);
C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
return;
}
}
}
}
// Check whether C compares a floating-point value with zero and if that
// floating-point value is also negated. In this case we can use the
// negation to set CC, so avoiding separate LOAD AND TEST and
// LOAD (NEGATIVE/COMPLEMENT) instructions.
static void adjustForFNeg(Comparison &C) {
auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
if (C1 && C1->isZero()) {
for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
SDNode *N = *I;
if (N->getOpcode() == ISD::FNEG) {
C.Op0 = SDValue(N, 0);
C.CCMask = reverseCCMask(C.CCMask);
return;
}
}
}
}
// Check whether C compares (shl X, 32) with 0 and whether X is
// also sign-extended. In that case it is better to test the result
// of the sign extension using LTGFR.
//
// This case is important because InstCombine transforms a comparison
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {
// Check for a comparison between (shl X, 32) and 0.
if (C.Op0.getOpcode() == ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
// See whether X has any SIGN_EXTEND_INREG uses.
for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
SDNode *N = *I;
if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
C.Op0 = SDValue(N, 0);
return;
}
}
}
}
}
// If C compares the truncation of an extending load, try to compare
// the untruncated value instead. This exposes more opportunities to
// reuse CC.
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
Comparison &C) {
if (C.Op0.getOpcode() == ISD::TRUNCATE &&
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
unsigned Type = L->getExtensionType();
if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
(Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
C.Op0 = C.Op0.getOperand(0);
C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
}
}
}
}
// Return true if shift operation N has an in-range constant shift value.
// Store it in ShiftVal if so.
static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!Shift)
return false;
uint64_t Amount = Shift->getZExtValue();
if (Amount >= N.getValueSizeInBits())
return false;
ShiftVal = Amount;
return true;
}
// Check whether an AND with Mask is suitable for a TEST UNDER MASK
// instruction and whether the CC value is descriptive enough to handle
// a comparison of type Opcode between the AND result and CmpVal.
// CCMask says which comparison result is being tested and BitSize is
// the number of bits in the operands. If TEST UNDER MASK can be used,
// return the corresponding CC mask, otherwise return 0.
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
uint64_t Mask, uint64_t CmpVal,
unsigned ICmpType) {
assert(Mask != 0 && "ANDs with zero should have been removed by now");
// Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
!SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
return 0;
// Work out the masks for the lowest and highest bits.
unsigned HighShift = 63 - countLeadingZeros(Mask);
uint64_t High = uint64_t(1) << HighShift;
uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
// Signed ordered comparisons are effectively unsigned if the sign
// bit is dropped.
bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
// Check for equality comparisons with 0, or the equivalent.
if (CmpVal == 0) {
if (CCMask == SystemZ::CCMASK_CMP_EQ)
return SystemZ::CCMASK_TM_ALL_0;
if (CCMask == SystemZ::CCMASK_CMP_NE)
return SystemZ::CCMASK_TM_SOME_1;
}
if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
if (CCMask == SystemZ::CCMASK_CMP_LT)
return SystemZ::CCMASK_TM_ALL_0;
if (CCMask == SystemZ::CCMASK_CMP_GE)
return SystemZ::CCMASK_TM_SOME_1;
}
if (EffectivelyUnsigned && CmpVal < Low) {
if (CCMask == SystemZ::CCMASK_CMP_LE)
return SystemZ::CCMASK_TM_ALL_0;
if (CCMask == SystemZ::CCMASK_CMP_GT)
return SystemZ::CCMASK_TM_SOME_1;
}
// Check for equality comparisons with the mask, or the equivalent.
if (CmpVal == Mask) {
if (CCMask == SystemZ::CCMASK_CMP_EQ)
return SystemZ::CCMASK_TM_ALL_1;
if (CCMask == SystemZ::CCMASK_CMP_NE)
return SystemZ::CCMASK_TM_SOME_0;
}
if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
if (CCMask == SystemZ::CCMASK_CMP_GT)
return SystemZ::CCMASK_TM_ALL_1;
if (CCMask == SystemZ::CCMASK_CMP_LE)
return SystemZ::CCMASK_TM_SOME_0;
}
if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
if (CCMask == SystemZ::CCMASK_CMP_GE)
return SystemZ::CCMASK_TM_ALL_1;
if (CCMask == SystemZ::CCMASK_CMP_LT)
return SystemZ::CCMASK_TM_SOME_0;
}
// Check for ordered comparisons with the top bit.
if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
if (CCMask == SystemZ::CCMASK_CMP_LE)
return SystemZ::CCMASK_TM_MSB_0;
if (CCMask == SystemZ::CCMASK_CMP_GT)
return SystemZ::CCMASK_TM_MSB_1;
}
if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
if (CCMask == SystemZ::CCMASK_CMP_LT)
return SystemZ::CCMASK_TM_MSB_0;
if (CCMask == SystemZ::CCMASK_CMP_GE)
return SystemZ::CCMASK_TM_MSB_1;
}
// If there are just two bits, we can do equality checks for Low and High
// as well.
if (Mask == Low + High) {
if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
return SystemZ::CCMASK_TM_MIXED_MSB_0;
if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
return SystemZ::CCMASK_TM_MIXED_MSB_1;
if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
}
// Looks like we've exhausted our options.
return 0;
}
// See whether C can be implemented as a TEST UNDER MASK instruction.
// Update the arguments with the TM version if so.
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
Comparison &C) {
// Check that we have a comparison with a constant.
auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
if (!ConstOp1)
return;
uint64_t CmpVal = ConstOp1->getZExtValue();
// Check whether the nonconstant input is an AND with a constant mask.
Comparison NewC(C);
uint64_t MaskVal;
ConstantSDNode *Mask = nullptr;
if (C.Op0.getOpcode() == ISD::AND) {
NewC.Op0 = C.Op0.getOperand(0);
NewC.Op1 = C.Op0.getOperand(1);
Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
if (!Mask)
return;
MaskVal = Mask->getZExtValue();
} else {
// There is no instruction to compare with a 64-bit immediate
// so use TMHH instead if possible. We need an unsigned ordered
// comparison with an i64 immediate.
if (NewC.Op0.getValueType() != MVT::i64 ||
NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
NewC.ICmpType == SystemZICMP::SignedOnly)
return;
// Convert LE and GT comparisons into LT and GE.
if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
if (CmpVal == uint64_t(-1))
return;
CmpVal += 1;
NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
}
// If the low N bits of Op1 are zero than the low N bits of Op0 can
// be masked off without changing the result.
MaskVal = -(CmpVal & -CmpVal);
NewC.ICmpType = SystemZICMP::UnsignedOnly;
}
if (!MaskVal)
return;
// Check whether the combination of mask, comparison value and comparison
// type are suitable.
unsigned BitSize = NewC.Op0.getValueSizeInBits();
unsigned NewCCMask, ShiftVal;
if (NewC.ICmpType != SystemZICMP::SignedOnly &&
NewC.Op0.getOpcode() == ISD::SHL &&
isSimpleShift(NewC.Op0, ShiftVal) &&
(MaskVal >> ShiftVal != 0) &&
((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
(NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal >> ShiftVal,
CmpVal >> ShiftVal,
SystemZICMP::Any))) {
NewC.Op0 = NewC.Op0.getOperand(0);
MaskVal >>= ShiftVal;
} else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
NewC.Op0.getOpcode() == ISD::SRL &&
isSimpleShift(NewC.Op0, ShiftVal) &&
(MaskVal << ShiftVal != 0) &&
((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
(NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal << ShiftVal,
CmpVal << ShiftVal,
SystemZICMP::UnsignedOnly))) {
NewC.Op0 = NewC.Op0.getOperand(0);
MaskVal <<= ShiftVal;
} else {
NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
NewC.ICmpType);
if (!NewCCMask)
return;
}
// Go ahead and make the change.
C.Opcode = SystemZISD::TM;
C.Op0 = NewC.Op0;
if (Mask && Mask->getZExtValue() == MaskVal)
C.Op1 = SDValue(Mask, 0);
else
C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
C.CCValid = SystemZ::CCMASK_TM;
C.CCMask = NewCCMask;
}
// See whether the comparison argument contains a redundant AND
// and remove it if so. This sometimes happens due to the generic
// BRCOND expansion.
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
Comparison &C) {
if (C.Op0.getOpcode() != ISD::AND)
return;
auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (!Mask)
return;
KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
return;
C.Op0 = C.Op0.getOperand(0);
}
// Return a Comparison that tests the condition-code result of intrinsic
// node Call against constant integer CC using comparison code Cond.
// Opcode is the opcode of the SystemZISD operation for the intrinsic
// and CCValid is the set of possible condition-code results.
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
SDValue Call, unsigned CCValid, uint64_t CC,
ISD::CondCode Cond) {
Comparison C(Call, SDValue());
C.Opcode = Opcode;
C.CCValid = CCValid;
if (Cond == ISD::SETEQ)
// bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
else if (Cond == ISD::SETNE)
// ...and the inverse of that.
C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
// bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
// always true for CC>3.
C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
// ...and the inverse of that.
C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
// bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
// always true for CC>3.
C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
// ...and the inverse of that.
C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
else
llvm_unreachable("Unexpected integer comparison type");
C.CCMask &= CCValid;
return C;
}
// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
ISD::CondCode Cond, const SDLoc &DL) {
if (CmpOp1.getOpcode() == ISD::Constant) {
uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
unsigned Opcode, CCValid;
if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
}
Comparison C(CmpOp0, CmpOp1);
C.CCMask = CCMaskForCondCode(Cond);
if (C.Op0.getValueType().isFloatingPoint()) {
C.CCValid = SystemZ::CCMASK_FCMP;
C.Opcode = SystemZISD::FCMP;
adjustForFNeg(C);
} else {
C.CCValid = SystemZ::CCMASK_ICMP;
C.Opcode = SystemZISD::ICMP;
// Choose the type of comparison. Equality and inequality tests can
// use either signed or unsigned comparisons. The choice also doesn't
// matter if both sign bits are known to be clear. In those cases we
// want to give the main isel code the freedom to choose whichever
// form fits best.
if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
C.CCMask == SystemZ::CCMASK_CMP_NE ||
(DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
C.ICmpType = SystemZICMP::Any;
else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
C.ICmpType = SystemZICMP::UnsignedOnly;
else
C.ICmpType = SystemZICMP::SignedOnly;
C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
adjustForRedundantAnd(DAG, DL, C);
adjustZeroCmp(DAG, DL, C);
adjustSubwordCmp(DAG, DL, C);
adjustForSubtraction(DAG, DL, C);
adjustForLTGFR(C);
adjustICmpTruncate(DAG, DL, C);
}
if (shouldSwapCmpOperands(C)) {
std::swap(C.Op0, C.Op1);
C.CCMask = reverseCCMask(C.CCMask);
}
adjustForTestUnderMask(DAG, DL, C);
return C;
}
// Emit the comparison instruction described by C.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
if (!C.Op1.getNode()) {
SDNode *Node;
switch (C.Op0.getOpcode()) {
case ISD::INTRINSIC_W_CHAIN:
Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
return SDValue(Node, 0);
case ISD::INTRINSIC_WO_CHAIN:
Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
return SDValue(Node, Node->getNumValues() - 1);
default:
llvm_unreachable("Invalid comparison operands");
}
}
if (C.Opcode == SystemZISD::ICMP)
return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
DAG.getConstant(C.ICmpType, DL, MVT::i32));
if (C.Opcode == SystemZISD::TM) {
bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
DAG.getConstant(RegisterOnly, DL, MVT::i32));
}
return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
}
// Implement a 32-bit *MUL_LOHI operation by extending both operands to
// 64 bits. Extend is the extension type to use. Store the high part
// in Hi and the low part in Lo.
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
SDValue Op0, SDValue Op1, SDValue &Hi,
SDValue &Lo) {
Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
DAG.getConstant(32, DL, MVT::i64));
Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
}
// Lower a binary operation that produces two VT results, one in each
// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
// and Opcode performs the GR128 operation. Store the even register result
// in Even and the odd register result in Odd.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
unsigned Opcode, SDValue Op0, SDValue Op1,
SDValue &Even, SDValue &Odd) {
SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
bool Is32Bit = is32Bit(VT);
Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
}
// Return an i32 value that is 1 if the CC value produced by CCReg is
// in the mask CCMask and 0 otherwise. CC is known to have a value
// in CCValid, so other values can be ignored.
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
unsigned CCValid, unsigned CCMask) {
SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32),
DAG.getConstant(0, DL, MVT::i32),
DAG.getConstant(CCValid, DL, MVT::i32),
DAG.getConstant(CCMask, DL, MVT::i32), CCReg };
return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
}
// Return the SystemISD vector comparison operation for CC, or 0 if it cannot
// be done directly. IsFP is true if CC is for a floating-point rather than
// integer comparison.
static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
switch (CC) {
case ISD::SETOEQ:
case ISD::SETEQ:
return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
case ISD::SETOGE:
case ISD::SETGE:
return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
case ISD::SETOGT:
case ISD::SETGT:
return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
case ISD::SETUGT:
return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
default:
return 0;
}
}
// Return the SystemZISD vector comparison operation for CC or its inverse,
// or 0 if neither can be done directly. Indicate in Invert whether the
// result is for the inverse of CC. IsFP is true if CC is for a
// floating-point rather than integer comparison.
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
bool &Invert) {
if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
Invert = false;
return Opcode;
}
CC = ISD::getSetCCInverse(CC, !IsFP);
if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
Invert = true;
return Opcode;
}
return 0;
}
// Return a v2f64 that contains the extended form of elements Start and Start+1
// of v4f32 value Op.
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
SDValue Op) {
int Mask[] = { Start, -1, Start + 1, -1 };
Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
}
// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
// producing a result of type VT.
SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
const SDLoc &DL, EVT VT,
SDValue CmpOp0,
SDValue CmpOp1) const {
// There is no hardware support for v4f32 (unless we have the vector
// enhancements facility 1), so extend the vector into two v2f64s
// and compare those.
if (CmpOp0.getValueType() == MVT::v4f32 &&
!Subtarget.hasVectorEnhancements1()) {
SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
}
return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
}
// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
// an integer mask of type VT.
SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
const SDLoc &DL, EVT VT,
ISD::CondCode CC,
SDValue CmpOp0,
SDValue CmpOp1) const {
bool IsFP = CmpOp0.getValueType().isFloatingPoint();
bool Invert = false;
SDValue Cmp;
switch (CC) {
// Handle tests for order using (or (ogt y x) (oge x y)).
case ISD::SETUO:
Invert = true;
LLVM_FALLTHROUGH;
case ISD::SETO: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
break;
}
// Handle <> tests using (or (ogt y x) (ogt x y)).
case ISD::SETUEQ:
Invert = true;
LLVM_FALLTHROUGH;
case ISD::SETONE: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
break;
}
// Otherwise a single comparison is enough. It doesn't really
// matter whether we try the inversion or the swap first, since
// there are no cases where both work.
default:
if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
else {
CC = ISD::getSetCCSwappedOperands(CC);
if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
else
llvm_unreachable("Unhandled comparison");
}
break;
}
if (Invert) {
SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
DAG.getConstant(65535, DL, MVT::i32));
Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask);
Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
}
return Cmp;
}
SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
SelectionDAG &DAG) const {
SDValue CmpOp0 = Op.getOperand(0);
SDValue CmpOp1 = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDLoc DL(Op);
EVT VT = Op.getValueType();
if (VT.isVector())
return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
SDValue CCReg = emitCmp(DAG, DL, C);
return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
}
SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue CmpOp0 = Op.getOperand(2);
SDValue CmpOp1 = Op.getOperand(3);
SDValue Dest = Op.getOperand(4);
SDLoc DL(Op);
Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
SDValue CCReg = emitCmp(DAG, DL, C);
return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
}
// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
// allowing Pos and Neg to be wider than CmpOp.
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
return (Neg.getOpcode() == ISD::SUB &&
Neg.getOperand(0).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
Neg.getOperand(1) == Pos &&
(Pos == CmpOp ||
(Pos.getOpcode() == ISD::SIGN_EXTEND &&
Pos.getOperand(0) == CmpOp)));
}
// Return the absolute or negative absolute of Op; IsNegative decides which.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
bool IsNegative) {
Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
if (IsNegative)
Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
DAG.getConstant(0, DL, Op.getValueType()), Op);
return Op;
}
SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
SelectionDAG &DAG) const {
SDValue CmpOp0 = Op.getOperand(0);
SDValue CmpOp1 = Op.getOperand(1);
SDValue TrueOp = Op.getOperand(2);
SDValue FalseOp = Op.getOperand(3);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDLoc DL(Op);
Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
// Check for absolute and negative-absolute selections, including those
// where the comparison value is sign-extended (for LPGFR and LNGFR).
// This check supplements the one in DAGCombiner.
if (C.Opcode == SystemZISD::ICMP &&
C.CCMask != SystemZ::CCMASK_CMP_EQ &&
C.CCMask != SystemZ::CCMASK_CMP_NE &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
if (isAbsolute(C.Op0, TrueOp, FalseOp))
return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
if (isAbsolute(C.Op0, FalseOp, TrueOp))
return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
}
SDValue CCReg = emitCmp(DAG, DL, C);
SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
DAG.getConstant(C.CCMask, DL, MVT::i32), CCReg};
return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
}
SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const {
SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
CodeModel::Model CM = DAG.getTarget().getCodeModel();
SDValue Result;
if (Subtarget.isPC32DBLSymbol(GV, CM)) {
// Assign anchors at 1<<12 byte boundaries.
uint64_t Anchor = Offset & ~uint64_t(0xfff);
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
// The offset can be folded into the address if it is aligned to a halfword.
Offset -= Anchor;
if (Offset != 0 && (Offset & 1) == 0) {
SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
Offset = 0;
}
} else {
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
if (Offset != 0)
Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
DAG.getConstant(Offset, DL, PtrVT));
return Result;
}
SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
SelectionDAG &DAG,
unsigned Opcode,
SDValue GOTOffset) const {
SDLoc DL(Node);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Chain = DAG.getEntryNode();
SDValue Glue;
// __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
Glue = Chain.getValue(1);
Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
Glue = Chain.getValue(1);
// The first call operand is the chain and the second is the TLS symbol.
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
Node->getValueType(0),
0, 0));
// Add argument registers to the end of the list so that they are
// known live into the call.
Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
// Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask =
TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
// Glue the call to the argument copies.
Ops.push_back(Glue);
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
// Copy the return value from %r2.
return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
}
SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
SelectionDAG &DAG) const {
SDValue Chain = DAG.getEntryNode();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// The high part of the thread pointer is in access register 0.
SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
// The low part of the thread pointer is in access register 1.
SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
// Merge them into a single 64-bit address.
SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
DAG.getConstant(32, DL, PtrVT));
return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
}
SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const {
if (DAG.getTarget().useEmulatedTLS())
return LowerToTLSEmulatedModel(Node, DAG);
SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
SDValue TP = lowerThreadPointer(DL, DAG);
// Get the offset of GA from the thread pointer, based on the TLS model.
SDValue Offset;
switch (model) {
case TLSModel::GeneralDynamic: {
// Load the GOT offset of the tls_index (module ID / per-symbol offset).
SystemZConstantPoolValue *CPV =
SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
// Call __tls_get_offset to retrieve the offset.
Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
break;
}
case TLSModel::LocalDynamic: {
// Load the GOT offset of the module ID.
SystemZConstantPoolValue *CPV =
SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
// Call __tls_get_offset to retrieve the module base offset.
Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
// Note: The SystemZLDCleanupPass will remove redundant computations
// of the module base offset. Count total number of local-dynamic
// accesses to trigger execution of that pass.
SystemZMachineFunctionInfo* MFI =
DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
MFI->incNumLocalDynamicTLSAccesses();
// Add the per-symbol offset.
CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
DTPOffset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), DTPOffset,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
break;
}
case TLSModel::InitialExec: {
// Load the offset from the GOT.
Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
SystemZII::MO_INDNTPOFF);
Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
Offset =
DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
break;
}
case TLSModel::LocalExec: {
// Force the offset into the constant pool and load it from there.
SystemZConstantPoolValue *CPV =
SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
break;
}
}
// Add the base and offset together.
return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
}
SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
SelectionDAG &DAG) const {
SDLoc DL(Node);
const BlockAddress *BA = Node->getBlockAddress();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
return Result;
}
SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
SelectionDAG &DAG) const {
SDLoc DL(JT);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
// Use LARL to load the address of the table.
return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
}
SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
SelectionDAG &DAG) const {
SDLoc DL(CP);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result;
if (CP->isMachineConstantPoolEntry())
Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
CP->getAlignment());
else
Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
CP->getAlignment(), CP->getOffset());
// Use LARL to load the address of the constant pool entry.
return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
}
SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setFrameAddressIsTaken(true);
SDLoc DL(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// If the back chain frame index has not been allocated yet, do so.
SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
int BackChainIdx = FI->getFramePointerSaveIndex();
if (!BackChainIdx) {
// By definition, the frame address is the address of the back chain.
BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
FI->setFramePointerSaveIndex(BackChainIdx);
}
SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
// FIXME The frontend should detect this case.
if (Depth > 0) {
report_fatal_error("Unsupported stack frame traversal count");
}
return BackChain;
}
SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setReturnAddressIsTaken(true);
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
SDLoc DL(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// FIXME The frontend should detect this case.
if (Depth > 0) {
report_fatal_error("Unsupported stack frame traversal count");
}
// Return R14D, which has the return address. Mark it an implicit live-in.
unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
}
SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue In = Op.getOperand(0);
EVT InVT = In.getValueType();
EVT ResVT = Op.getValueType();
// Convert loads directly. This is normally done by DAGCombiner,
// but we need this case for bitcasts that are created during lowering
// and which are then lowered themselves.
if (auto *LoadN = dyn_cast<LoadSDNode>(In))
if (ISD::isNormalLoad(LoadN)) {
SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
LoadN->getBasePtr(), LoadN->getMemOperand());
// Update the chain uses.
DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
return NewLoad;
}
if (InVT == MVT::i32 && ResVT == MVT::f32) {
SDValue In64;
if (Subtarget.hasHighWord()) {
SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
MVT::i64);
In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
MVT::i64, SDValue(U64, 0), In);
} else {
In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
DAG.getConstant(32, DL, MVT::i64));
}
SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
DL, MVT::f32, Out64);
}
if (InVT == MVT::f32 && ResVT == MVT::i32) {
SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
MVT::f64, SDValue(U64, 0), In);
SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
if (Subtarget.hasHighWord())
return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
MVT::i32, Out64);
SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
DAG.getConstant(32, DL, MVT::i64));
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
}
llvm_unreachable("Unexpected bitcast combination");
}
SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SystemZMachineFunctionInfo *FuncInfo =
MF.getInfo<SystemZMachineFunctionInfo>();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Chain = Op.getOperand(0);
SDValue Addr = Op.getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDLoc DL(Op);
// The initial values of each field.
const unsigned NumFields = 4;
SDValue Fields[NumFields] = {
DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
};
// Store each field into its respective slot.
SDValue MemOps[NumFields];
unsigned Offset = 0;
for (unsigned I = 0; I < NumFields; ++I) {
SDValue FieldAddr = Addr;
if (Offset != 0)
FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
DAG.getIntPtrConstant(Offset, DL));
MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
MachinePointerInfo(SV, Offset));
Offset += 8;
}
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue DstPtr = Op.getOperand(1);
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
/*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
/*isTailCall*/false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
MachineFunction &MF = DAG.getMachineFunction();
bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
SDValue Align = Op.getOperand(2);
SDLoc DL(Op);
// If user has set the no alignment function attribute, ignore
// alloca alignments.
uint64_t AlignVal = (RealignOpt ?
dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
unsigned SPReg = getStackPointerRegisterToSaveRestore();
SDValue NeededSpace = Size;
// Get a reference to the stack pointer.
SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
// If we need a backchain, save it now.
SDValue Backchain;
if (StoreBackchain)
Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
// Add extra space for alignment if needed.
if (ExtraAlignSpace)
NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
// Get the new stack pointer value.
SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
// Copy the new stack pointer back.
Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
// The allocated data lives above the 160 bytes allocated for the standard
// frame, plus any outgoing stack arguments. We don't know how much that
// amounts to yet, so emit a special ADJDYNALLOC placeholder.
SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
// Dynamically realign if needed.
if (RequiredAlign > StackAlign) {
Result =
DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
Result =
DAG.getNode(ISD::AND, DL, MVT::i64, Result,
DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
}
if (StoreBackchain)
Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
SDValue Ops[2] = { Result, Chain };
return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
}
SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue Ops[2];
if (is32Bit(VT))
// Just do a normal 64-bit multiplication and extract the results.
// We define this so that it can be used for constant division.
lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
else if (Subtarget.hasMiscellaneousExtensions2())
// SystemZISD::SMUL_LOHI returns the low result in the odd register and
// the high result in the even register. ISD::SMUL_LOHI is defined to
// return the low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
else {
// Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
//
// (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
//
// but using the fact that the upper halves are either all zeros
// or all ones:
//
// (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
//
// and grouping the right terms together since they are quicker than the
// multiplication:
//
// (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
SDValue LL = Op.getOperand(0);
SDValue RL = Op.getOperand(1);
SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
// SystemZISD::UMUL_LOHI returns the low result in the odd register and
// the high result in the even register. ISD::SMUL_LOHI is defined to
// return the low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
LL, RL, Ops[1], Ops[0]);
SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
}
return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue Ops[2];
if (is32Bit(VT))
// Just do a normal 64-bit multiplication and extract the results.
// We define this so that it can be used for constant division.
lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
else
// SystemZISD::UMUL_LOHI returns the low result in the odd register and
// the high result in the even register. ISD::UMUL_LOHI is defined to
// return the low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SelectionDAG &DAG) const {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
EVT VT = Op.getValueType();
SDLoc DL(Op);
// We use DSGF for 32-bit division. This means the first operand must
// always be 64-bit, and the second operand should be 32-bit whenever
// that is possible, to improve performance.
if (is32Bit(VT))
Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
else if (DAG.ComputeNumSignBits(Op1) > 32)
Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
// DSG(F) returns the remainder in the even register and the
// quotient in the odd register.
SDValue Ops[2];
lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
// DL(G) returns the remainder in the even register and the
// quotient in the odd register.
SDValue Ops[2];
lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
// Get the known-zero masks for each operand.
SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
DAG.computeKnownBits(Ops[1])};
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
Known[1].Zero.getZExtValue() };
unsigned High, Low;
if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
High = 1, Low = 0;
else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
High = 0, Low = 1;
else
return Op;
SDValue LowOp = Ops[Low];
SDValue HighOp = Ops[High];
// If the high part is a constant, we're better off using IILH.
if (HighOp.getOpcode() == ISD::Constant)
return Op;
// If the low part is a constant that is outside the range of LHI,
// then we're better off using IILF.
if (LowOp.getOpcode() == ISD::Constant) {
int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
if (!isInt<16>(Value))
return Op;
}
// Check whether the high part is an AND that doesn't change the
// high 32 bits and just masks out low bits. We can skip it if so.
if (HighOp.getOpcode() == ISD::AND &&
HighOp.getOperand(1).getOpcode() == ISD::Constant) {
SDValue HighOp0 = HighOp.getOperand(0);
uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
HighOp = HighOp0;
}
// Take advantage of the fact that all GR32 operations only change the
// low 32 bits by truncating Low to an i32 and inserting it directly
// using a subreg. The interesting cases are those where the truncation
// can be folded.
SDLoc DL(Op);
SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
MVT::i64, HighOp, Low32);
}
// Lower SADDO/SSUBO/UADDO/USUBO nodes.
SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
SelectionDAG &DAG) const {
SDNode *N = Op.getNode();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDLoc DL(N);
unsigned BaseOp = 0;
unsigned CCValid = 0;
unsigned CCMask = 0;
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown instruction!");
case ISD::SADDO:
BaseOp = SystemZISD::SADDO;
CCValid = SystemZ::CCMASK_ARITH;
CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
break;
case ISD::SSUBO:
BaseOp = SystemZISD::SSUBO;
CCValid = SystemZ::CCMASK_ARITH;
CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
break;
case ISD::UADDO:
BaseOp = SystemZISD::UADDO;
CCValid = SystemZ::CCMASK_LOGICAL;
CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
break;
case ISD::USUBO:
BaseOp = SystemZISD::USUBO;
CCValid = SystemZ::CCMASK_LOGICAL;
CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
break;
}
SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
if (N->getValueType(1) == MVT::i1)
SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
}
// Lower ADDCARRY/SUBCARRY nodes.
SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
SelectionDAG &DAG) const {
SDNode *N = Op.getNode();
MVT VT = N->getSimpleValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Carry = Op.getOperand(2);
SDLoc DL(N);
unsigned BaseOp = 0;
unsigned CCValid = 0;
unsigned CCMask = 0;
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown instruction!");
case ISD::ADDCARRY:
BaseOp = SystemZISD::ADDCARRY;
CCValid = SystemZ::CCMASK_LOGICAL;
CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
break;
case ISD::SUBCARRY:
BaseOp = SystemZISD::SUBCARRY;
CCValid = SystemZ::CCMASK_LOGICAL;
CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
break;
}
// Set the condition code from the carry flag.
Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
DAG.getConstant(CCValid, DL, MVT::i32),
DAG.getConstant(CCMask, DL, MVT::i32));
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
if (N->getValueType(1) == MVT::i1)
SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
}
SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
Op = Op.getOperand(0);
// Handle vector types via VPOPCT.
if (VT.isVector()) {
Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
switch (VT.getScalarSizeInBits()) {
case 8:
break;
case 16: {
Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
break;
}
case 32: {
SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
DAG.getConstant(0, DL, MVT::i32));
Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
break;
}
case 64: {
SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
DAG.getConstant(0, DL, MVT::i32));
Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
break;
}
default:
llvm_unreachable("Unexpected type");
}
return Op;
}
// Get the known-zero mask for the operand.
KnownBits Known = DAG.computeKnownBits(Op);
unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
if (NumSignificantBits == 0)
return DAG.getConstant(0, DL, VT);
// Skip known-zero high parts of the operand.
int64_t OrigBitSize = VT.getSizeInBits();
int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
BitSize = std::min(BitSize, OrigBitSize);
// The POPCNT instruction counts the number of bits in each byte.
Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
// Add up per-byte counts in a binary tree. All bits of Op at
// position larger than BitSize remain zero throughout.
for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
if (BitSize != OrigBitSize)
Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
}
// Extract overall result from high byte.
if (BitSize > 8)
Op = DAG.getNode(ISD::SRL, DL, VT, Op,
DAG.getConstant(BitSize - 8, DL, VT));
return Op;
}
SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
FenceSSID == SyncScope::System) {
return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
Op.getOperand(0)),
0);
}
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
}
// Op is an atomic load. Lower it into a normal volatile load.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SelectionDAG &DAG) const {
auto *Node = cast<AtomicSDNode>(Op.getNode());
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
Node->getChain(), Node->getBasePtr(),
Node->getMemoryVT(), Node->getMemOperand());
}
// Op is an atomic store. Lower it into a normal volatile store.
SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
SelectionDAG &DAG) const {
auto *Node = cast<AtomicSDNode>(Op.getNode());
SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
Node->getBasePtr(), Node->getMemoryVT(),
Node->getMemOperand());
// We have to enforce sequential consistency by performing a
// serialization operation after the store.
if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
MVT::Other, Chain), 0);
return Chain;
}
// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
SelectionDAG &DAG,
unsigned Opcode) const {
auto *Node = cast<AtomicSDNode>(Op.getNode());
// 32-bit operations need no code outside the main loop.
EVT NarrowVT = Node->getMemoryVT();
EVT WideVT = MVT::i32;
if (NarrowVT == WideVT)
return Op;
int64_t BitSize = NarrowVT.getSizeInBits();
SDValue ChainIn = Node->getChain();
SDValue Addr = Node->getBasePtr();
SDValue Src2 = Node->getVal();
MachineMemOperand *MMO = Node->getMemOperand();
SDLoc DL(Node);
EVT PtrVT = Addr.getValueType();
// Convert atomic subtracts of constants into additions.
if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
Opcode = SystemZISD::ATOMIC_LOADW_ADD;
Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
}
// Get the address of the containing word.
SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
DAG.getConstant(-4, DL, PtrVT));
// Get the number of bits that the word must be rotated left in order
// to bring the field to the top bits of a GR32.
SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
DAG.getConstant(3, DL, PtrVT));
BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
// Get the complementing shift amount, for rotating a field in the top
// bits back to its proper position.
SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
DAG.getConstant(0, DL, WideVT), BitShift);
// Extend the source operand to 32 bits and prepare it for the inner loop.
// ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
// operations require the source to be shifted in advance. (This shift
// can be folded if the source is constant.) For AND and NAND, the lower
// bits must be set, while for other opcodes they should be left clear.
if (Opcode != SystemZISD::ATOMIC_SWAPW)
Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
DAG.getConstant(32 - BitSize, DL, WideVT));
if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
Opcode == SystemZISD::ATOMIC_LOADW_NAND)
Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
// Construct the ATOMIC_LOADW_* node.
SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
DAG.getConstant(BitSize, DL, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
NarrowVT, MMO);
// Rotate the result of the final CS so that the field is in the lower
// bits of a GR32, then truncate it.
SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
DAG.getConstant(BitSize, DL, WideVT));
SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
return DAG.getMergeValues(RetOps, DL);
}
// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
// operations into additions.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
SelectionDAG &DAG) const {
auto *Node = cast<AtomicSDNode>(Op.getNode());
EVT MemVT = Node->getMemoryVT();
if (MemVT == MVT::i32 || MemVT == MVT::i64) {
// A full-width operation.
assert(Op.getValueType() == MemVT && "Mismatched VTs");
SDValue Src2 = Node->getVal();
SDValue NegSrc2;
SDLoc DL(Src2);
if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
// Use an addition if the operand is constant and either LAA(G) is
// available or the negative value is in the range of A(G)FHI.
int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
NegSrc2 = DAG.getConstant(Value, DL, MemVT);
} else if (Subtarget.hasInterlockedAccess1())
// Use LAA(G) if available.
NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
Src2);
if (NegSrc2.getNode())
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
Node->getChain(), Node->getBasePtr(), NegSrc2,
Node->getMemOperand());
// Use the node as-is.
return Op;
}
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
}
// Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SelectionDAG &DAG) const {
auto *Node = cast<AtomicSDNode>(Op.getNode());
SDValue ChainIn = Node->getOperand(0);
SDValue Addr = Node->getOperand(1);
SDValue CmpVal = Node->getOperand(2);
SDValue SwapVal = Node->getOperand(3);
MachineMemOperand *MMO = Node->getMemOperand();
SDLoc DL(Node);
// We have native support for 32-bit and 64-bit compare and swap, but we
// still need to expand extracting the "success" result from the CC.
EVT NarrowVT = Node->getMemoryVT();
EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
if (NarrowVT == WideVT) {
SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP,
DL, Tys, Ops, NarrowVT, MMO);
SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
return SDValue();
}
// Convert 8-bit and 16-bit compare and swap to a loop, implemented
// via a fullword ATOMIC_CMP_SWAPW operation.
int64_t BitSize = NarrowVT.getSizeInBits();
EVT PtrVT = Addr.getValueType();
// Get the address of the containing word.
SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
DAG.getConstant(-4, DL, PtrVT));
// Get the number of bits that the word must be rotated left in order
// to bring the field to the top bits of a GR32.
SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
DAG.getConstant(3, DL, PtrVT));
BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
// Get the complementing shift amount, for rotating a field in the top
// bits back to its proper position.
SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
DAG.getConstant(0, DL, WideVT), BitShift);
// Construct the ATOMIC_CMP_SWAPW node.
SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
VTList, Ops, NarrowVT, MMO);
SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
return SDValue();
}
SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
SystemZ::R15D, Op.getValueType());
}
SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue NewSP = Op.getOperand(1);
SDValue Backchain;
SDLoc DL(Op);
if (StoreBackchain) {
SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
}
Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
if (StoreBackchain)
Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
return Chain;
}
SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
SelectionDAG &DAG) const {
bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
if (!IsData)
// Just preserve the chain.
return Op.getOperand(0);
SDLoc DL(Op);
bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
SDValue Ops[] = {
Op.getOperand(0),
DAG.getConstant(Code, DL, MVT::i32),
Op.getOperand(1)
};
return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
Node->getVTList(), Ops,
Node->getMemoryVT(), Node->getMemOperand());
}
// Convert condition code in CCReg to an i32 value.
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
SDLoc DL(CCReg);
SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
}
SDValue
SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opcode, CCValid;
if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
SDValue CC = getCCResult(DAG, SDValue(Node, 0));
DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
return SDValue();
}
return SDValue();
}
SDValue
SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opcode, CCValid;
if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
if (Op->getNumValues() == 1)
return getCCResult(DAG, SDValue(Node, 0));
assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
}
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
case Intrinsic::thread_pointer:
return lowerThreadPointer(SDLoc(Op), DAG);
case Intrinsic::s390_vpdi:
return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::s390_vperm:
return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::s390_vuphb:
case Intrinsic::s390_vuphh:
case Intrinsic::s390_vuphf:
return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
Op.getOperand(1));
case Intrinsic::s390_vuplhb:
case Intrinsic::s390_vuplhh:
case Intrinsic::s390_vuplhf:
return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
Op.getOperand(1));
case Intrinsic::s390_vuplb:
case Intrinsic::s390_vuplhw:
case Intrinsic::s390_vuplf:
return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
Op.getOperand(1));
case Intrinsic::s390_vupllb:
case Intrinsic::s390_vupllh:
case Intrinsic::s390_vupllf:
return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
Op.getOperand(1));
case Intrinsic::s390_vsumb:
case Intrinsic::s390_vsumh:
case Intrinsic::s390_vsumgh:
case Intrinsic::s390_vsumgf:
case Intrinsic::s390_vsumqf:
case Intrinsic::s390_vsumqg:
return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
}
return SDValue();
}
namespace {
// Says that SystemZISD operation Opcode can be used to perform the equivalent
// of a VPERM with permute vector Bytes. If Opcode takes three operands,
// Operand is the constant third operand, otherwise it is the number of
// bytes in each element of the result.
struct Permute {
unsigned Opcode;
unsigned Operand;
unsigned char Bytes[SystemZ::VectorBytes];
};
}
static const Permute PermuteForms[] = {
// VMRHG
{ SystemZISD::MERGE_HIGH, 8,
{ 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
// VMRHF
{ SystemZISD::MERGE_HIGH, 4,
{ 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
// VMRHH
{ SystemZISD::MERGE_HIGH, 2,
{ 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
// VMRHB
{ SystemZISD::MERGE_HIGH, 1,
{ 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
// VMRLG
{ SystemZISD::MERGE_LOW, 8,
{ 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
// VMRLF
{ SystemZISD::MERGE_LOW, 4,
{ 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
// VMRLH
{ SystemZISD::MERGE_LOW, 2,
{ 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
// VMRLB
{ SystemZISD::MERGE_LOW, 1,
{ 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
// VPKG
{ SystemZISD::PACK, 4,
{ 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
// VPKF
{ SystemZISD::PACK, 2,
{ 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
// VPKH
{ SystemZISD::PACK, 1,
{ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
// VPDI V1, V2, 4 (low half of V1, high half of V2)
{ SystemZISD::PERMUTE_DWORDS, 4,
{ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
// VPDI V1, V2, 1 (high half of V1, low half of V2)
{ SystemZISD::PERMUTE_DWORDS, 1,
{ 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
};
// Called after matching a vector shuffle against a particular pattern.
// Both the original shuffle and the pattern have two vector operands.
// OpNos[0] is the operand of the original shuffle that should be used for
// operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
// OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
// set OpNo0 and OpNo1 to the shuffle operands that should actually be used
// for operands 0 and 1 of the pattern.
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
if (OpNos[0] < 0) {
if (OpNos[1] < 0)
return false;
OpNo0 = OpNo1 = OpNos[1];
} else if (OpNos[1] < 0) {
OpNo0 = OpNo1 = OpNos[0];
} else {
OpNo0 = OpNos[0];
OpNo1 = OpNos[1];
}
return true;
}
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. Return true if the VPERM can be implemented using P.
// When returning true set OpNo0 to the VPERM operand that should be
// used for operand 0 of P and likewise OpNo1 for operand 1 of P.
//
// For example, if swapping the VPERM operands allows P to match, OpNo0
// will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
// operand, but rewriting it to use two duplicated operands allows it to
// match P, then OpNo0 and OpNo1 will be the same.
static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
unsigned &OpNo0, unsigned &OpNo1) {
int OpNos[] = { -1, -1 };
for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
int Elt = Bytes[I];
if (Elt >= 0) {
// Make sure that the two permute vectors use the same suboperand
// byte number. Only the operand numbers (the high bits) are
// allowed to differ.
if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
return false;
int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
// Make sure that the operand mappings are consistent with previous
// elements.
if (OpNos[ModelOpNo] == 1 - RealOpNo)
return false;
OpNos[ModelOpNo] = RealOpNo;
}
}
return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
}
// As above, but search for a matching permute.
static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
unsigned &OpNo0, unsigned &OpNo1) {
for (auto &P : PermuteForms)
if (matchPermute(Bytes, P, OpNo0, OpNo1))
return &P;
return nullptr;
}
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. This permute is an operand of an outer permute.
// See whether redistributing the -1 bytes gives a shuffle that can be
// implemented using P. If so, set Transform to a VPERM-like permute vector
// that, when applied to the result of P, gives the original permute in Bytes.
static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
const Permute &P,
SmallVectorImpl<int> &Transform) {
unsigned To = 0;
for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
int Elt = Bytes[From];
if (Elt < 0)
// Byte number From of the result is undefined.
Transform[From] = -1;
else {
while (P.Bytes[To] != Elt) {
To += 1;
if (To == SystemZ::VectorBytes)
return false;
}
Transform[From] = To;
}
}
return true;
}
// As above, but search for a matching permute.
static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
SmallVectorImpl<int> &Transform) {
for (auto &P : PermuteForms)
if (matchDoublePermute(Bytes, P, Transform))
return &P;
return nullptr;
}
// Convert the mask of the given shuffle op into a byte-level mask,
// as if it had type vNi8.
static bool getVPermMask(SDValue ShuffleOp,
SmallVectorImpl<int> &Bytes) {
EVT VT = ShuffleOp.getValueType();
unsigned NumElements = VT.getVectorNumElements();
unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
Bytes.resize(NumElements * BytesPerElement, -1);
for (unsigned I = 0; I < NumElements; ++I) {
int Index = VSN->getMaskElt(I);
if (Index >= 0)
for (unsigned J = 0; J < BytesPerElement; ++J)
Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
}
return true;
}
if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
unsigned Index = ShuffleOp.getConstantOperandVal(1);
Bytes.resize(NumElements * BytesPerElement, -1);
for (unsigned I = 0; I < NumElements; ++I)
for (unsigned J = 0; J < BytesPerElement; ++J)
Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
return true;
}
return false;
}
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
// the result come from a contiguous sequence of bytes from one input.
// Set Base to the selector for the first byte if so.
static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
unsigned BytesPerElement, int &Base) {
Base = -1;
for (unsigned I = 0; I < BytesPerElement; ++I) {
if (Bytes[Start + I] >= 0) {
unsigned Elem = Bytes[Start + I];
if (Base < 0) {
Base = Elem - I;
// Make sure the bytes would come from one input operand.
if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
return false;
} else if (unsigned(Base) != Elem - I)
return false;
}
}
return true;
}
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. Return true if it can be performed using VSLDI.
// When returning true, set StartIndex to the shift amount and OpNo0
// and OpNo1 to the VPERM operands that should be used as the first
// and second shift operand respectively.
static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
unsigned &StartIndex, unsigned &OpNo0,
unsigned &OpNo1) {
int OpNos[] = { -1, -1 };
int Shift = -1;
for (unsigned I = 0; I < 16; ++I) {
int Index = Bytes[I];
if (Index >= 0) {
int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
if (Shift < 0)
Shift = ExpectedShift;
else if (Shift != ExpectedShift)
return false;
// Make sure that the operand mappings are consistent with previous
// elements.
if (OpNos[ModelOpNo] == 1 - RealOpNo)
return false;
OpNos[ModelOpNo] = RealOpNo;
}
}
StartIndex = Shift;
return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
}
// Create a node that performs P on operands Op0 and Op1, casting the
// operands to the appropriate type. The type of the result is determined by P.
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
const Permute &P, SDValue Op0, SDValue Op1) {
// VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
// elements of a PACK are twice as wide as the outputs.
unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
P.Operand);
// Cast both operands to the appropriate type.
MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
SystemZ::VectorBytes / InBytes);
Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
SDValue Op;
if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32);
Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
} else if (P.Opcode == SystemZISD::PACK) {
MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
SystemZ::VectorBytes / P.Operand);
Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
} else {
Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
}
return Op;
}
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. Implement it on operands Ops[0] and Ops[1] using
// VSLDI or VPERM.
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
SDValue *Ops,
const SmallVectorImpl<int> &Bytes) {
for (unsigned I = 0; I < 2; ++I)
Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
// First see whether VSLDI can be used.
unsigned StartIndex, OpNo0, OpNo1;
if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32));
// Fall back on VPERM. Construct an SDNode for the permute vector.
SDValue IndexNodes[SystemZ::VectorBytes];
for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
if (Bytes[I] >= 0)
IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
else
IndexNodes[I] = DAG.getUNDEF(MVT::i32);
SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
}
namespace {
// Describes a general N-operand vector shuffle.
struct GeneralShuffle {
GeneralShuffle(EVT vt) : VT(vt) {}
void addUndef();
bool add(SDValue, unsigned);
SDValue getNode(SelectionDAG &, const SDLoc &);
// The operands of the shuffle.
SmallVector<SDValue, SystemZ::VectorBytes> Ops;
// Index I is -1 if byte I of the result is undefined. Otherwise the
// result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
// Bytes[I] / SystemZ::VectorBytes.
SmallVector<int, SystemZ::VectorBytes> Bytes;
// The type of the shuffle result.
EVT VT;
};
}
// Add an extra undefined element to the shuffle.
void GeneralShuffle::addUndef() {
unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
for (unsigned I = 0; I < BytesPerElement; ++I)
Bytes.push_back(-1);
}
// Add an extra element to the shuffle, taking it from element Elem of Op.
// A null Op indicates a vector input whose value will be calculated later;
// there is at most one such input per shuffle and it always has the same
// type as the result. Aborts and returns false if the source vector elements
// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
// LLVM they become implicitly extended, but this is rare and not optimized.
bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
// The source vector can have wider elements than the result,
// either through an explicit TRUNCATE or because of type legalization.
// We want the least significant part.
EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
// Return false if the source elements are smaller than their destination
// elements.
if (FromBytesPerElement < BytesPerElement)
return false;
unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
(FromBytesPerElement - BytesPerElement));
// Look through things like shuffles and bitcasts.
while (Op.getNode()) {
if (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
// See whether the bytes we need come from a contiguous part of one
// operand.
SmallVector<int, SystemZ::VectorBytes> OpBytes;
if (!getVPermMask(Op, OpBytes))
break;
int NewByte;
if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
break;
if (NewByte < 0) {
addUndef();
return true;
}
Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
Byte = unsigned(NewByte) % SystemZ::VectorBytes;
} else if (Op.isUndef()) {
addUndef();
return true;
} else
break;
}
// Make sure that the source of the extraction is in Ops.
unsigned OpNo = 0;
for (; OpNo < Ops.size(); ++OpNo)
if (Ops[OpNo] == Op)
break;
if (OpNo == Ops.size())
Ops.push_back(Op);
// Add the element to Bytes.
unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
for (unsigned I = 0; I < BytesPerElement; ++I)
Bytes.push_back(Base + I);
return true;
}
// Return SDNodes for the completed shuffle.
SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
if (Ops.size() == 0)
return DAG.getUNDEF(VT);
// Make sure that there are at least two shuffle operands.
if (Ops.size() == 1)
Ops.push_back(DAG.getUNDEF(MVT::v16i8));
// Create a tree of shuffles, deferring root node until after the loop.
// Try to redistribute the undefined elements of non-root nodes so that
// the non-root shuffles match something like a pack or merge, then adjust
// the parent node's permute vector to compensate for the new order.
// Among other things, this copes with vectors like <2 x i16> that were
// padded with undefined elements during type legalization.
//
// In the best case this redistribution will lead to the whole tree
// using packs and merges. It should rarely be a loss in other cases.
unsigned Stride = 1;
for (; Stride * 2 < Ops.size(); Stride *= 2) {
for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
// Create a mask for just these two operands.
SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
if (OpNo == I)
NewBytes[J] = Byte;
else if (OpNo == I + Stride)
NewBytes[J] = SystemZ::VectorBytes + Byte;
else
NewBytes[J] = -1;
}
// See if it would be better to reorganize NewMask to avoid using VPERM.
SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
// Applying NewBytesMap to Ops[I] gets back to NewBytes.
for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
if (NewBytes[J] >= 0) {
assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
"Invalid double permute");
Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
} else
assert(NewBytesMap[J] < 0 && "Invalid double permute");
}
} else {
// Just use NewBytes on the operands.
Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
if (NewBytes[J] >= 0)
Bytes[J] = I * SystemZ::VectorBytes + J;
}
}
}
// Now we just have 2 inputs. Put the second operand in Ops[1].
if (Stride > 1) {
Ops[1] = Ops[Stride];
for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
if (Bytes[I] >= int(SystemZ::VectorBytes))
Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
}
// Look for an instruction that can do the permute without resorting
// to VPERM.
unsigned OpNo0, OpNo1;
SDValue Op;
if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
else
Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
static bool isScalarToVector(SDValue Op) {
for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
if (!Op.getOperand(I).isUndef())
return false;
return true;
}
// Return a vector of type VT that contains Value in the first element.
// The other elements don't matter.
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SDValue Value) {
// If we have a constant, replicate it to all elements and let the
// BUILD_VECTOR lowering take care of it.
if (Value.getOpcode() == ISD::Constant ||
Value.getOpcode() == ISD::ConstantFP) {
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
return DAG.getBuildVector(VT, DL, Ops);
}
if (Value.isUndef())
return DAG.getUNDEF(VT);
return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
}
// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
// element 1. Used for cases in which replication is cheap.
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SDValue Op0, SDValue Op1) {
if (Op0.isUndef()) {
if (Op1.isUndef())
return DAG.getUNDEF(VT);
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
}
if (Op1.isUndef())
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
buildScalarToVector(DAG, DL, VT, Op0),
buildScalarToVector(DAG, DL, VT, Op1));
}
// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
// vector for them.
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
SDValue Op1) {
if (Op0.isUndef() && Op1.isUndef())
return DAG.getUNDEF(MVT::v2i64);
// If one of the two inputs is undefined then replicate the other one,
// in order to avoid using another register unnecessarily.
if (Op0.isUndef())
Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
else if (Op1.isUndef())
Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
else {
Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
}
return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
}
// Try to represent constant BUILD_VECTOR node BVN using a
// SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask
// on success.
static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
EVT ElemVT = BVN->getValueType(0).getVectorElementType();
unsigned BytesPerElement = ElemVT.getStoreSize();
for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) {
SDValue Op = BVN->getOperand(I);
if (!Op.isUndef()) {
uint64_t Value;
if (Op.getOpcode() == ISD::Constant)
Value = cast<ConstantSDNode>(Op)->getZExtValue();
else if (Op.getOpcode() == ISD::ConstantFP)
Value = (cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
.getZExtValue());
else
return false;
for (unsigned J = 0; J < BytesPerElement; ++J) {
uint64_t Byte = (Value >> (J * 8)) & 0xff;
if (Byte == 0xff)
Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J);
else if (Byte != 0)
return false;
}
}
}
return true;
}
// Try to load a vector constant in which BitsPerElement-bit value Value
// is replicated to fill the vector. VT is the type of the resulting
// constant, which may have elements of a different size from BitsPerElement.
// Return the SDValue of the constant on success, otherwise return
// an empty value.
static SDValue tryBuildVectorReplicate(SelectionDAG &DAG,
const SystemZInstrInfo *TII,
const SDLoc &DL, EVT VT, uint64_t Value,
unsigned BitsPerElement) {
// Signed 16-bit values can be replicated using VREPI.
// Mark the constants as opaque or DAGCombiner will convert back to
// BUILD_VECTOR.
int64_t SignedValue = SignExtend64(Value, BitsPerElement);
if (isInt<16>(SignedValue)) {
MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
SystemZ::VectorBits / BitsPerElement);
SDValue Op = DAG.getNode(
SystemZISD::REPLICATE, DL, VecVT,
DAG.getConstant(SignedValue, DL, MVT::i32, false, true /*isOpaque*/));
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
// See whether rotating the constant left some N places gives a value that
// is one less than a power of 2 (i.e. all zeros followed by all ones).
// If so we can use VGM.
unsigned Start, End;
if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) {
// isRxSBGMask returns the bit numbers for a full 64-bit value,
// with 0 denoting 1 << 63 and 63 denoting 1. Convert them to
// bit numbers for an BitsPerElement value, so that 0 denotes
// 1 << (BitsPerElement-1).
Start -= 64 - BitsPerElement;
End -= 64 - BitsPerElement;
MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
SystemZ::VectorBits / BitsPerElement);
SDValue Op = DAG.getNode(
SystemZISD::ROTATE_MASK, DL, VecVT,
DAG.getConstant(Start, DL, MVT::i32, false, true /*isOpaque*/),
DAG.getConstant(End, DL, MVT::i32, false, true /*isOpaque*/));
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
return SDValue();
}
// If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
// better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
// the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
// would benefit from this representation and return it if so.
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
BuildVectorSDNode *BVN) {
EVT VT = BVN->getValueType(0);
unsigned NumElements = VT.getVectorNumElements();
// Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
// on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
// need a BUILD_VECTOR, add an additional placeholder operand for that
// BUILD_VECTOR and store its operands in ResidueOps.
GeneralShuffle GS(VT);
SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
bool FoundOne = false;
for (unsigned I = 0; I < NumElements; ++I) {
SDValue Op = BVN->getOperand(I);
if (Op.getOpcode() == ISD::TRUNCATE)
Op = Op.getOperand(0);
if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op.getOperand(1).getOpcode() == ISD::Constant) {
unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
if (!GS.add(Op.getOperand(0), Elem))
return SDValue();
FoundOne = true;
} else if (Op.isUndef()) {
GS.addUndef();
} else {
if (!GS.add(SDValue(), ResidueOps.size()))
return SDValue();
ResidueOps.push_back(BVN->getOperand(I));
}
}
// Nothing to do if there are no EXTRACT_VECTOR_ELTs.
if (!FoundOne)
return SDValue();
// Create the BUILD_VECTOR for the remaining elements, if any.
if (!ResidueOps.empty()) {
while (ResidueOps.size() < NumElements)
ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
for (auto &Op : GS.Ops) {
if (!Op.getNode()) {
Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
break;
}
}
}
return GS.getNode(DAG, SDLoc(BVN));
}
// Combine GPR scalar values Elems into a vector of type VT.
static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SmallVectorImpl<SDValue> &Elems) {
// See whether there is a single replicated value.
SDValue Single;
unsigned int NumElements = Elems.size();
unsigned int Count = 0;
for (auto Elem : Elems) {
if (!Elem.isUndef()) {
if (!Single.getNode())
Single = Elem;
else if (Elem != Single) {
Single = SDValue();
break;
}
Count += 1;
}
}
// There are three cases here:
//
// - if the only defined element is a loaded one, the best sequence
// is a replicating load.
//
// - otherwise, if the only defined element is an i64 value, we will
// end up with the same VLVGP sequence regardless of whether we short-cut
// for replication or fall through to the later code.
//
// - otherwise, if the only defined element is an i32 or smaller value,
// we would need 2 instructions to replicate it: VLVGP followed by VREPx.
// This is only a win if the single defined element is used more than once.
// In other cases we're better off using a single VLVGx.
if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD))
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
// If all elements are loads, use VLREP/VLEs (below).
bool AllLoads = true;
for (auto Elem : Elems)
if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) {
AllLoads = false;
break;
}
// The best way of building a v2i64 from two i64s is to use VLVGP.
if (VT == MVT::v2i64 && !AllLoads)
return joinDwords(DAG, DL, Elems[0], Elems[1]);
// Use a 64-bit merge high to combine two doubles.
if (VT == MVT::v2f64 && !AllLoads)
return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
// Build v4f32 values directly from the FPRs:
//
// <Axxx> <Bxxx> <Cxxxx> <Dxxx>
// V V VMRHF
// <ABxx> <CDxx>
// V VMRHG
// <ABCD>
if (VT == MVT::v4f32 && !AllLoads) {
SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
// Avoid unnecessary undefs by reusing the other operand.
if (Op01.isUndef())
Op01 = Op23;
else if (Op23.isUndef())
Op23 = Op01;
// Merging identical replications is a no-op.
if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
return Op01;
Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
DL, MVT::v2i64, Op01, Op23);
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
// Collect the constant terms.
SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
unsigned NumConstants = 0;
for (unsigned I = 0; I < NumElements; ++I) {
SDValue Elem = Elems[I];
if (Elem.getOpcode() == ISD::Constant ||
Elem.getOpcode() == ISD::ConstantFP) {
NumConstants += 1;
Constants[I] = Elem;
Done[I] = true;
}
}
// If there was at least one constant, fill in the other elements of
// Constants with undefs to get a full vector constant and use that
// as the starting point.
SDValue Result;
SDValue ReplicatedVal;
if (NumConstants > 0) {
for (unsigned I = 0; I < NumElements; ++I)
if (!Constants[I].getNode())
Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
Result = DAG.getBuildVector(VT, DL, Constants);
} else {
// Otherwise try to use VLREP or VLVGP to start the sequence in order to
// avoid a false dependency on any previous contents of the vector
// register.
// Use a VLREP if at least one element is a load. Make sure to replicate
// the load with the most elements having its value.
std::map<const SDNode*, unsigned> UseCounts;
SDNode *LoadMaxUses = nullptr;
for (unsigned I = 0; I < NumElements; ++I)
if (Elems[I].getOpcode() == ISD::LOAD &&
cast<LoadSDNode>(Elems[I])->isUnindexed()) {
SDNode *Ld = Elems[I].getNode();
UseCounts[Ld]++;
if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
LoadMaxUses = Ld;
}
if (LoadMaxUses != nullptr) {
ReplicatedVal = SDValue(LoadMaxUses, 0);
Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal);
} else {
// Try to use VLVGP.
unsigned I1 = NumElements / 2 - 1;
unsigned I2 = NumElements - 1;
bool Def1 = !Elems[I1].isUndef();
bool Def2 = !Elems[I2].isUndef();
if (Def1 || Def2) {
SDValue Elem1 = Elems[Def1 ? I1 : I2];
SDValue Elem2 = Elems[Def2 ? I2 : I1];
Result = DAG.getNode(ISD::BITCAST, DL, VT,
joinDwords(DAG, DL, Elem1, Elem2));
Done[I1] = true;
Done[I2] = true;
} else
Result = DAG.getUNDEF(VT);
}
}
// Use VLVGx to insert the other elements.
for (unsigned I = 0; I < NumElements; ++I)
if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal)
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
DAG.getConstant(I, DL, MVT::i32));
return Result;
}
SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
SDLoc DL(Op);
EVT VT = Op.getValueType();
if (BVN->isConstant()) {
// Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
// preferred way of creating all-zero and all-one vectors so give it
// priority over other methods below.
uint64_t Mask = 0;
if (tryBuildVectorByteMask(BVN, Mask)) {
SDValue Op = DAG.getNode(
SystemZISD::BYTE_MASK, DL, MVT::v16i8,
DAG.getConstant(Mask, DL, MVT::i32, false, true /*isOpaque*/));
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
// Try using some form of replication.
APInt SplatBits, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
8, true) &&
SplatBitSize <= 64) {
// First try assuming that any undefined bits above the highest set bit
// and below the lowest set bit are 1s. This increases the likelihood of
// being able to use a sign-extended element value in VECTOR REPLICATE
// IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
uint64_t SplatBitsZ = SplatBits.getZExtValue();
uint64_t SplatUndefZ = SplatUndef.getZExtValue();
uint64_t Lower = (SplatUndefZ
& ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
uint64_t Upper = (SplatUndefZ
& ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
uint64_t Value = SplatBitsZ | Upper | Lower;
SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value,
SplatBitSize);
if (Op.getNode())
return Op;
// Now try assuming that any undefined bits between the first and
// last defined set bits are set. This increases the chances of
// using a non-wraparound mask.
uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
Value = SplatBitsZ | Middle;
Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize);
if (Op.getNode())
return Op;
}
// Fall back to loading it from memory.
return SDValue();
}
// See if we should use shuffles to construct the vector from other vectors.
if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
return Res;
// Detect SCALAR_TO_VECTOR conversions.
if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
// Otherwise use buildVector to build the vector up from GPRs.
unsigned NumElements = Op.getNumOperands();
SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
for (unsigned I = 0; I < NumElements; ++I)
Ops[I] = Op.getOperand(I);
return buildVector(DAG, DL, VT, Ops);
}
SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const {
auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
SDLoc DL(Op);
EVT VT = Op.getValueType();
unsigned NumElements = VT.getVectorNumElements();
if (VSN->isSplat()) {
SDValue Op0 = Op.getOperand(0);
unsigned Index = VSN->getSplatIndex();
assert(Index < VT.getVectorNumElements() &&
"Splat index should be defined and in first operand");
// See whether the value we're splatting is directly available as a scalar.
if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
Op0.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
// Otherwise keep it as a vector-to-vector operation.
return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
DAG.getConstant(Index, DL, MVT::i32));
}
GeneralShuffle GS(VT);
for (unsigned I = 0; I < NumElements; ++I) {
int Elt = VSN->getMaskElt(I);
if (Elt < 0)
GS.addUndef();
else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
unsigned(Elt) % NumElements))
return SDValue();
}
return GS.getNode(DAG, SDLoc(VSN));
}
SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
// Just insert the scalar into element 0 of an undefined vector.
return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
}
SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
// Handle insertions of floating-point values.
SDLoc DL(Op);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
EVT VT = Op.getValueType();
// Insertions into constant indices of a v2f64 can be done using VPDI.
// However, if the inserted value is a bitcast or a constant then it's
// better to use GPRs, as below.
if (VT == MVT::v2f64 &&
Op1.getOpcode() != ISD::BITCAST &&
Op1.getOpcode() != ISD::ConstantFP &&
Op2.getOpcode() == ISD::Constant) {
uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
unsigned Mask = VT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
}
// Otherwise bitcast to the equivalent integer form and insert via a GPR.
MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
return DAG.getNode(ISD::BITCAST, DL, VT, Res);
}
SDValue
SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
// Handle extractions of floating-point values.
SDLoc DL(Op);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
EVT VT = Op.getValueType();
EVT VecVT = Op0.getValueType();
// Extractions of constant indices can be done directly.
if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
uint64_t Index = CIndexN->getZExtValue();
unsigned Mask = VecVT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
}
// Otherwise bitcast to the equivalent integer form and extract via a GPR.
MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
return DAG.getNode(ISD::BITCAST, DL, VT, Res);
}
SDValue
SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
unsigned UnpackHigh) const {
SDValue PackedOp = Op.getOperand(0);
EVT OutVT = Op.getValueType();
EVT InVT = PackedOp.getValueType();
unsigned ToBits = OutVT.getScalarSizeInBits();
unsigned FromBits = InVT.getScalarSizeInBits();
do {
FromBits *= 2;
EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
SystemZ::VectorBits / FromBits);
PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
} while (FromBits != ToBits);
return PackedOp;
}
SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
unsigned ByScalar) const {
// Look for cases where a vector shift can use the *_BY_SCALAR form.
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDLoc DL(Op);
EVT VT = Op.getValueType();
unsigned ElemBitSize = VT.getScalarSizeInBits();
// See whether the shift vector is a splat represented as BUILD_VECTOR.
if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
APInt SplatBits, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
// Check for constant splats. Use ElemBitSize as the minimum element
// width and reject splats that need wider elements.
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
ElemBitSize, true) &&
SplatBitSize == ElemBitSize) {
SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
DL, MVT::i32);
return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
}
// Check for variable splats.
BitVector UndefElements;
SDValue Splat = BVN->getSplatValue(&UndefElements);
if (Splat) {
// Since i32 is the smallest legal type, we either need a no-op
// or a truncation.
SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
}
}
// See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
// and the shift amount is directly available in a GPR.
if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
if (VSN->isSplat()) {
SDValue VSNOp0 = VSN->getOperand(0);
unsigned Index = VSN->getSplatIndex();
assert(Index < VT.getVectorNumElements() &&
"Splat index should be defined and in first operand");
if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
// Since i32 is the smallest legal type, we either need a no-op
// or a truncation.
SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
VSNOp0.getOperand(Index));
return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
}
}
}
// Otherwise just treat the current form as legal.
return Op;
}
SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
case ISD::FRAMEADDR:
return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR:
return lowerRETURNADDR(Op, DAG);
case ISD::BR_CC:
return lowerBR_CC(Op, DAG);
case ISD::SELECT_CC:
return lowerSELECT_CC(Op, DAG);
case ISD::SETCC:
return lowerSETCC(Op, DAG);
case ISD::GlobalAddress:
return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
case ISD::GlobalTLSAddress:
return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
case ISD::BlockAddress:
return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
case ISD::JumpTable:
return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
case ISD::ConstantPool:
return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
case ISD::BITCAST:
return lowerBITCAST(Op, DAG);
case ISD::VASTART:
return lowerVASTART(Op, DAG);
case ISD::VACOPY:
return lowerVACOPY(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return lowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::GET_DYNAMIC_AREA_OFFSET:
return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
case ISD::SMUL_LOHI:
return lowerSMUL_LOHI(Op, DAG);
case ISD::UMUL_LOHI:
return lowerUMUL_LOHI(Op, DAG);
case ISD::SDIVREM:
return lowerSDIVREM(Op, DAG);
case ISD::UDIVREM:
return lowerUDIVREM(Op, DAG);
case ISD::SADDO:
case ISD::SSUBO:
case ISD::UADDO:
case ISD::USUBO:
return lowerXALUO(Op, DAG);
case ISD::ADDCARRY:
case ISD::SUBCARRY:
return lowerADDSUBCARRY(Op, DAG);
case ISD::OR:
return lowerOR(Op, DAG);
case ISD::CTPOP:
return lowerCTPOP(Op, DAG);
case ISD::ATOMIC_FENCE:
return lowerATOMIC_FENCE(Op, DAG);
case ISD::ATOMIC_SWAP:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
case ISD::ATOMIC_STORE:
return lowerATOMIC_STORE(Op, DAG);
case ISD::ATOMIC_LOAD:
return lowerATOMIC_LOAD(Op, DAG);
case ISD::ATOMIC_LOAD_ADD:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
case ISD::ATOMIC_LOAD_SUB:
return lowerATOMIC_LOAD_SUB(Op, DAG);
case ISD::ATOMIC_LOAD_AND:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
case ISD::ATOMIC_LOAD_OR:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
case ISD::ATOMIC_LOAD_XOR:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
case ISD::ATOMIC_LOAD_NAND:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
case ISD::ATOMIC_LOAD_MIN:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
case ISD::ATOMIC_LOAD_MAX:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
case ISD::ATOMIC_LOAD_UMIN:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
case ISD::ATOMIC_LOAD_UMAX:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return lowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::STACKSAVE:
return lowerSTACKSAVE(Op, DAG);
case ISD::STACKRESTORE:
return lowerSTACKRESTORE(Op, DAG);
case ISD::PREFETCH:
return lowerPREFETCH(Op, DAG);
case ISD::INTRINSIC_W_CHAIN:
return lowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN:
return lowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::BUILD_VECTOR:
return lowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE:
return lowerVECTOR_SHUFFLE(Op, DAG);
case ISD::SCALAR_TO_VECTOR:
return lowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::INSERT_VECTOR_ELT:
return lowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT:
return lowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::SIGN_EXTEND_VECTOR_INREG:
return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
case ISD::ZERO_EXTEND_VECTOR_INREG:
return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
case ISD::SHL:
return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
case ISD::SRL:
return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
case ISD::SRA:
return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
default:
llvm_unreachable("Unexpected node to lower");
}
}
// Lower operations with invalid operand or result types (currently used
// only for 128-bit integer types).
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {
SDLoc DL(In);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
DAG.getIntPtrConstant(0, DL));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
DAG.getIntPtrConstant(1, DL));
SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL,
MVT::Untyped, Hi, Lo);
return SDValue(Pair, 0);
}
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {
SDLoc DL(In);
SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64,
DL, MVT::i64, In);
SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64,
DL, MVT::i64, In);
return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi);
}
void
SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
switch (N->getOpcode()) {
case ISD::ATOMIC_LOAD: {
SDLoc DL(N);
SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128,
DL, Tys, Ops, MVT::i128, MMO);
Results.push_back(lowerGR128ToI128(DAG, Res));
Results.push_back(Res.getValue(1));
break;
}
case ISD::ATOMIC_STORE: {
SDLoc DL(N);
SDVTList Tys = DAG.getVTList(MVT::Other);
SDValue Ops[] = { N->getOperand(0),
lowerI128ToGR128(DAG, N->getOperand(2)),
N->getOperand(1) };
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128,
DL, Tys, Ops, MVT::i128, MMO);
// We have to enforce sequential consistency by performing a
// serialization operation after the store.
if (cast<AtomicSDNode>(N)->getOrdering() ==
AtomicOrdering::SequentiallyConsistent)
Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL,
MVT::Other, Res), 0);
Results.push_back(Res);
break;
}
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
SDLoc DL(N);
SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
lowerI128ToGR128(DAG, N->getOperand(2)),
lowerI128ToGR128(DAG, N->getOperand(3)) };
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128,
DL, Tys, Ops, MVT::i128, MMO);
SDValue Success = emitSETCC(DAG, DL, Res.getValue(1),
SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1));
Results.push_back(lowerGR128ToI128(DAG, Res));
Results.push_back(Success);
Results.push_back(Res.getValue(2));
break;
}
default:
llvm_unreachable("Unexpected node to lower");
}
}
void
SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
return LowerOperationWrapper(N, Results, DAG);
}
const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
switch ((SystemZISD::NodeType)Opcode) {
case SystemZISD::FIRST_NUMBER: break;
OPCODE(RET_FLAG);
OPCODE(CALL);
OPCODE(SIBCALL);
OPCODE(TLS_GDCALL);
OPCODE(TLS_LDCALL);
OPCODE(PCREL_WRAPPER);
OPCODE(PCREL_OFFSET);
OPCODE(IABS);
OPCODE(ICMP);
OPCODE(FCMP);
OPCODE(TM);
OPCODE(BR_CCMASK);
OPCODE(SELECT_CCMASK);
OPCODE(ADJDYNALLOC);
OPCODE(POPCNT);
OPCODE(SMUL_LOHI);
OPCODE(UMUL_LOHI);
OPCODE(SDIVREM);
OPCODE(UDIVREM);
OPCODE(SADDO);
OPCODE(SSUBO);
OPCODE(UADDO);
OPCODE(USUBO);
OPCODE(ADDCARRY);
OPCODE(SUBCARRY);
OPCODE(GET_CCMASK);
OPCODE(MVC);
OPCODE(MVC_LOOP);
OPCODE(NC);
OPCODE(NC_LOOP);
OPCODE(OC);
OPCODE(OC_LOOP);
OPCODE(XC);
OPCODE(XC_LOOP);
OPCODE(CLC);
OPCODE(CLC_LOOP);
OPCODE(STPCPY);
OPCODE(STRCMP);
OPCODE(SEARCH_STRING);
OPCODE(IPM);
OPCODE(MEMBARRIER);
OPCODE(TBEGIN);
OPCODE(TBEGIN_NOFLOAT);
OPCODE(TEND);
OPCODE(BYTE_MASK);
OPCODE(ROTATE_MASK);
OPCODE(REPLICATE);
OPCODE(JOIN_DWORDS);
OPCODE(SPLAT);
OPCODE(MERGE_HIGH);
OPCODE(MERGE_LOW);
OPCODE(SHL_DOUBLE);
OPCODE(PERMUTE_DWORDS);
OPCODE(PERMUTE);
OPCODE(PACK);
OPCODE(PACKS_CC);
OPCODE(PACKLS_CC);
OPCODE(UNPACK_HIGH);
OPCODE(UNPACKL_HIGH);
OPCODE(UNPACK_LOW);
OPCODE(UNPACKL_LOW);
OPCODE(VSHL_BY_SCALAR);
OPCODE(VSRL_BY_SCALAR);
OPCODE(VSRA_BY_SCALAR);
OPCODE(VSUM);
OPCODE(VICMPE);
OPCODE(VICMPH);
OPCODE(VICMPHL);
OPCODE(VICMPES);
OPCODE(VICMPHS);
OPCODE(VICMPHLS);
OPCODE(VFCMPE);
OPCODE(VFCMPH);
OPCODE(VFCMPHE);
OPCODE(VFCMPES);
OPCODE(VFCMPHS);
OPCODE(VFCMPHES);
OPCODE(VFTCI);
OPCODE(VEXTEND);
OPCODE(VROUND);
OPCODE(VTM);
OPCODE(VFAE_CC);
OPCODE(VFAEZ_CC);
OPCODE(VFEE_CC);
OPCODE(VFEEZ_CC);
OPCODE(VFENE_CC);
OPCODE(VFENEZ_CC);
OPCODE(VISTR_CC);
OPCODE(VSTRC_CC);
OPCODE(VSTRCZ_CC);
OPCODE(TDC);
OPCODE(ATOMIC_SWAPW);
OPCODE(ATOMIC_LOADW_ADD);
OPCODE(ATOMIC_LOADW_SUB);
OPCODE(ATOMIC_LOADW_AND);
OPCODE(ATOMIC_LOADW_OR);
OPCODE(ATOMIC_LOADW_XOR);
OPCODE(ATOMIC_LOADW_NAND);
OPCODE(ATOMIC_LOADW_MIN);
OPCODE(ATOMIC_LOADW_MAX);
OPCODE(ATOMIC_LOADW_UMIN);
OPCODE(ATOMIC_LOADW_UMAX);
OPCODE(ATOMIC_CMP_SWAPW);
OPCODE(ATOMIC_CMP_SWAP);
OPCODE(ATOMIC_LOAD_128);
OPCODE(ATOMIC_STORE_128);
OPCODE(ATOMIC_CMP_SWAP_128);
OPCODE(LRV);
OPCODE(STRV);
OPCODE(PREFETCH);
}
return nullptr;
#undef OPCODE
}
// Return true if VT is a vector whose elements are a whole number of bytes
// in width. Also check for presence of vector support.
bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
if (!Subtarget.hasVector())
return false;
return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
}
// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
// producing a result of type ResVT. Op is a possibly bitcast version
// of the input vector and Index is the index (based on type VecVT) that
// should be extracted. Return the new extraction if a simplification
// was possible or if Force is true.
SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
EVT VecVT, SDValue Op,
unsigned Index,
DAGCombinerInfo &DCI,
bool Force) const {
SelectionDAG &DAG = DCI.DAG;
// The number of bytes being extracted.
unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
for (;;) {
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::BITCAST)
// Look through bitcasts.
Op = Op.getOperand(0);
else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
canTreatAsByteVector(Op.getValueType())) {
// Get a VPERM-like permute mask and see whether the bytes covered
// by the extracted element are a contiguous sequence from one
// source operand.
SmallVector<int, SystemZ::VectorBytes> Bytes;
if (!getVPermMask(Op, Bytes))
break;
int First;
if (!getShuffleInput(Bytes, Index * BytesPerElement,
BytesPerElement, First))
break;
if (First < 0)
return DAG.getUNDEF(ResVT);
// Make sure the contiguous sequence starts at a multiple of the
// original element size.
unsigned Byte = unsigned(First) % Bytes.size();
if (Byte % BytesPerElement != 0)
break;
// We can get the extracted value directly from an input.
Index = Byte / BytesPerElement;
Op = Op.getOperand(unsigned(First) / Bytes.size());
Force = true;
} else if (Opcode == ISD::BUILD_VECTOR &&
canTreatAsByteVector(Op.getValueType())) {
// We can only optimize this case if the BUILD_VECTOR elements are
// at least as wide as the extracted value.
EVT OpVT = Op.getValueType();
unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
if (OpBytesPerElement < BytesPerElement)
break;
// Make sure that the least-significant bit of the extracted value
// is the least significant bit of an input.
unsigned End = (Index + 1) * BytesPerElement;
if (End % OpBytesPerElement != 0)
break;
// We're extracting the low part of one operand of the BUILD_VECTOR.
Op = Op.getOperand(End / OpBytesPerElement - 1);
if (!Op.getValueType().isInteger()) {
EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
DCI.AddToWorklist(Op.getNode());
}
EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
if (VT != ResVT) {
DCI.AddToWorklist(Op.getNode());
Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
}
return Op;
} else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
canTreatAsByteVector(Op.getValueType()) &&
canTreatAsByteVector(Op.getOperand(0).getValueType())) {
// Make sure that only the unextended bits are significant.
EVT ExtVT = Op.getValueType();
EVT OpVT = Op.getOperand(0).getValueType();
unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
unsigned Byte = Index * BytesPerElement;
unsigned SubByte = Byte % ExtBytesPerElement;
unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
if (SubByte < MinSubByte ||
SubByte + BytesPerElement > ExtBytesPerElement)
break;
// Get the byte offset of the unextended element
Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
// ...then add the byte offset relative to that element.
Byte += SubByte - MinSubByte;
if (Byte % BytesPerElement != 0)
break;
Op = Op.getOperand(0);
Index = Byte / BytesPerElement;
Force = true;
} else
break;
}
if (Force) {
if (Op.getValueType() != VecVT) {
Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
DCI.AddToWorklist(Op.getNode());
}
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
DAG.getConstant(Index, DL, MVT::i32));
}
return SDValue();
}
// Optimize vector operations in scalar value Op on the basis that Op
// is truncated to TruncVT.
SDValue SystemZTargetLowering::combineTruncateExtract(
const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
// If we have (trunc (extract_vector_elt X, Y)), try to turn it into
// (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
// of type TruncVT.
if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
TruncVT.getSizeInBits() % 8 == 0) {
SDValue Vec = Op.getOperand(0);
EVT VecVT = Vec.getValueType();
if (canTreatAsByteVector(VecVT)) {
if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
unsigned TruncBytes = TruncVT.getStoreSize();
if (BytesPerElement % TruncBytes == 0) {
// Calculate the value of Y' in the above description. We are
// splitting the original elements into Scale equal-sized pieces
// and for truncation purposes want the last (least-significant)
// of these pieces for IndexN. This is easiest to do by calculating
// the start index of the following element and then subtracting 1.
unsigned Scale = BytesPerElement / TruncBytes;
unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
// Defer the creation of the bitcast from X to combineExtract,
// which might be able to optimize the extraction.
VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
VecVT.getStoreSize() / TruncBytes);
EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
}
}
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineZERO_EXTEND(
SDNode *N, DAGCombinerInfo &DCI) const {
// Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) {
auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0));
auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (TrueOp && FalseOp) {
SDLoc DL(N0);
SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT),
DAG.getConstant(FalseOp->getZExtValue(), DL, VT),
N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) };
SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops);
// If N0 has multiple uses, change other uses as well.
if (!N0.hasOneUse()) {
SDValue TruncSelect =
DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect);
DCI.CombineTo(N0.getNode(), TruncSelect);
}
return NewSelect;
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
SDNode *N, DAGCombinerInfo &DCI) const {
// Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
// and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
// into (select_cc LHS, RHS, -1, 0, COND)
SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND)
N0 = N0.getOperand(0);
if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) {
SDLoc DL(N0);
SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT),
N0.getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
return SDValue();
}
SDValue SystemZTargetLowering::combineSIGN_EXTEND(
SDNode *N, DAGCombinerInfo &DCI) const {
// Convert (sext (ashr (shl X, C1), C2)) to
// (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
// cheap as narrower ones.
SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
SDValue Inner = N0.getOperand(0);
if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
EVT ShiftVT = N0.getOperand(1).getValueType();
SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
Inner.getOperand(0));
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
DAG.getConstant(NewShlAmt, SDLoc(Inner),
ShiftVT));
return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
}
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineMERGE(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
unsigned Opcode = N->getOpcode();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (Op0.getOpcode() == ISD::BITCAST)
Op0 = Op0.getOperand(0);
if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
// (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
// for v4f32.
if (Op1 == N->getOperand(0))
return Op1;
// (z_merge_? 0, X) -> (z_unpackl_? 0, X).
EVT VT = Op1.getValueType();
unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
if (ElemBytes <= 4) {
Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
EVT InVT = VT.changeVectorElementTypeToInteger();
EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
SystemZ::VectorBytes / ElemBytes / 2);
if (VT != InVT) {
Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
DCI.AddToWorklist(Op1.getNode());
}
SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
DCI.AddToWorklist(Op.getNode());
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineLOAD(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT LdVT = N->getValueType(0);
if (LdVT.isVector() || LdVT.isInteger())
return SDValue();
// Transform a scalar load that is REPLICATEd as well as having other
// use(s) to the form where the other use(s) use the first element of the
// REPLICATE instead of the load. Otherwise instruction selection will not
// produce a VLREP. Avoid extracting to a GPR, so only do this for floating
// point loads.
SDValue Replicate;
SmallVector<SDNode*, 8> OtherUses;
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
UI != UE; ++UI) {
if (UI->getOpcode() == SystemZISD::REPLICATE) {
if (Replicate)
return SDValue(); // Should never happen
Replicate = SDValue(*UI, 0);
}
else if (UI.getUse().getResNo() == 0)
OtherUses.push_back(*UI);
}
if (!Replicate || OtherUses.empty())
return SDValue();
SDLoc DL(N);
SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT,
Replicate, DAG.getConstant(0, DL, MVT::i32));
// Update uses of the loaded Value while preserving old chains.
for (SDNode *U : OtherUses) {
SmallVector<SDValue, 8> Ops;
for (SDValue Op : U->ops())
Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op);
DAG.UpdateNodeOperands(U, Ops);
}
return SDValue(N, 0);
}
SDValue SystemZTargetLowering::combineSTORE(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
auto *SN = cast<StoreSDNode>(N);
auto &Op1 = N->getOperand(1);
EVT MemVT = SN->getMemoryVT();
// If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
// for the extraction to be done on a vMiN value, so that we can use VSTE.
// If X has wider elements then convert it to:
// (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
if (MemVT.isInteger() && SN->isTruncatingStore()) {
if (SDValue Value =
combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
DCI.AddToWorklist(Value.getNode());
// Rewrite the store with the new form of stored value.
return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
SN->getBasePtr(), SN->getMemoryVT(),
SN->getMemOperand());
}
}
// Combine STORE (BSWAP) into STRVH/STRV/STRVG
if (!SN->isTruncatingStore() &&
Op1.getOpcode() == ISD::BSWAP &&
Op1.getNode()->hasOneUse() &&
(Op1.getValueType() == MVT::i16 ||
Op1.getValueType() == MVT::i32 ||
Op1.getValueType() == MVT::i64)) {
SDValue BSwapOp = Op1.getOperand(0);
if (BSwapOp.getValueType() == MVT::i16)
BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
SDValue Ops[] = {
N->getOperand(0), BSwapOp, N->getOperand(2)
};
return
DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
Ops, MemVT, SN->getMemOperand());
}
return SDValue();
}
SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
SDNode *N, DAGCombinerInfo &DCI) const {
if (!Subtarget.hasVector())
return SDValue();
// Try to simplify a vector extraction.
if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
SDValue Op0 = N->getOperand(0);
EVT VecVT = Op0.getValueType();
return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
IndexN->getZExtValue(), DCI, false);
}
return SDValue();
}
SDValue SystemZTargetLowering::combineJOIN_DWORDS(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
// (join_dwords X, X) == (replicate X)
if (N->getOperand(0) == N->getOperand(1))
return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
N->getOperand(0));
return SDValue();
}
SDValue SystemZTargetLowering::combineFP_ROUND(
SDNode *N, DAGCombinerInfo &DCI) const {
// (fpround (extract_vector_elt X 0))
// (fpround (extract_vector_elt X 1)) ->
// (extract_vector_elt (VROUND X) 0)
// (extract_vector_elt (VROUND X) 2)
//
// This is a special case since the target doesn't really support v2f32s.
SelectionDAG &DAG = DCI.DAG;
SDValue Op0 = N->getOperand(0);
if (N->getValueType(0) == MVT::f32 &&
Op0.hasOneUse() &&
Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0).getValueType() == MVT::v2f64 &&
Op0.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
SDValue Vec = Op0.getOperand(0);
for (auto *U : Vec->uses()) {
if (U != Op0.getNode() &&
U->hasOneUse() &&
U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
U->getOperand(0) == Vec &&
U->getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
SDValue OtherRound = SDValue(*U->use_begin(), 0);
if (OtherRound.getOpcode() == ISD::FP_ROUND &&
OtherRound.getOperand(0) == SDValue(U, 0) &&
OtherRound.getValueType() == MVT::f32) {
SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
MVT::v4f32, Vec);
DCI.AddToWorklist(VRound.getNode());
SDValue Extract1 =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
DCI.AddToWorklist(Extract1.getNode());
DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
SDValue Extract0 =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
return Extract0;
}
}
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineFP_EXTEND(
SDNode *N, DAGCombinerInfo &DCI) const {
// (fpextend (extract_vector_elt X 0))
// (fpextend (extract_vector_elt X 2)) ->
// (extract_vector_elt (VEXTEND X) 0)
// (extract_vector_elt (VEXTEND X) 1)
//
// This is a special case since the target doesn't really support v2f32s.
SelectionDAG &DAG = DCI.DAG;
SDValue Op0 = N->getOperand(0);
if (N->getValueType(0) == MVT::f64 &&
Op0.hasOneUse() &&
Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0).getValueType() == MVT::v4f32 &&
Op0.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
SDValue Vec = Op0.getOperand(0);
for (auto *U : Vec->uses()) {
if (U != Op0.getNode() &&
U->hasOneUse() &&
U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
U->getOperand(0) == Vec &&
U->getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
SDValue OtherExtend = SDValue(*U->use_begin(), 0);
if (OtherExtend.getOpcode() == ISD::FP_EXTEND &&
OtherExtend.getOperand(0) == SDValue(U, 0) &&
OtherExtend.getValueType() == MVT::f64) {
SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
MVT::v2f64, Vec);
DCI.AddToWorklist(VExtend.getNode());
SDValue Extract1 =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
DCI.AddToWorklist(Extract1.getNode());
DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
SDValue Extract0 =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
return Extract0;
}
}
}
}
return SDValue();
}
SDValue SystemZTargetLowering::combineBSWAP(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
// Combine BSWAP (LOAD) into LRVH/LRV/LRVG
if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
N->getOperand(0).hasOneUse() &&
(N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 ||
N->getValueType(0) == MVT::i64)) {
SDValue Load = N->getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(Load);
// Create the byte-swapping load.
SDValue Ops[] = {
LD->getChain(), // Chain
LD->getBasePtr() // Ptr
};
EVT LoadVT = N->getValueType(0);
if (LoadVT == MVT::i16)
LoadVT = MVT::i32;
SDValue BSLoad =
DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
DAG.getVTList(LoadVT, MVT::Other),
Ops, LD->getMemoryVT(), LD->getMemOperand());
// If this is an i16 load, insert the truncate.
SDValue ResVal = BSLoad;
if (N->getValueType(0) == MVT::i16)
ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
// First, combine the bswap away. This makes the value produced by the
// load dead.
DCI.CombineTo(N, ResVal);
// Next, combine the load away, we give it a bogus result value but a real
// chain result. The result value is dead because the bswap is dead.
DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
// Return N so it doesn't get rechecked!
return SDValue(N, 0);
}
return SDValue();
}
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
// We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
// set by the CCReg instruction using the CCValid / CCMask masks,
- // If the CCReg instruction is itself a (ICMP (SELECT_CCMASK)) testing
- // the condition code set by some other instruction, see whether we
- // can directly use that condition code.
- bool Invert = false;
+ // If the CCReg instruction is itself a ICMP testing the condition
+ // code set by some other instruction, see whether we can directly
+ // use that condition code.
- // Verify that we have an appropriate mask for a EQ or NE comparison.
+ // Verify that we have an ICMP against some constant.
if (CCValid != SystemZ::CCMASK_ICMP)
return false;
- if (CCMask == SystemZ::CCMASK_CMP_NE)
- Invert = !Invert;
- else if (CCMask != SystemZ::CCMASK_CMP_EQ)
- return false;
-
- // Verify that we have an ICMP that is the user of a SELECT_CCMASK.
- SDNode *ICmp = CCReg.getNode();
+ auto *ICmp = CCReg.getNode();
if (ICmp->getOpcode() != SystemZISD::ICMP)
return false;
- SDNode *Select = ICmp->getOperand(0).getNode();
- if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
+ auto *CompareLHS = ICmp->getOperand(0).getNode();
+ auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
+ if (!CompareRHS)
return false;
- // Verify that the ICMP compares against one of select values.
- auto *CompareVal = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
- if (!CompareVal)
- return false;
- auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
- if (!TrueVal)
- return false;
- auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
- if (!FalseVal)
- return false;
- if (CompareVal->getZExtValue() == FalseVal->getZExtValue())
- Invert = !Invert;
- else if (CompareVal->getZExtValue() != TrueVal->getZExtValue())
- return false;
+ // Optimize the case where CompareLHS is a SELECT_CCMASK.
+ if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
+ // Verify that we have an appropriate mask for a EQ or NE comparison.
+ bool Invert = false;
+ if (CCMask == SystemZ::CCMASK_CMP_NE)
+ Invert = !Invert;
+ else if (CCMask != SystemZ::CCMASK_CMP_EQ)
+ return false;
- // Compute the effective CC mask for the new branch or select.
- auto *NewCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
- auto *NewCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
- if (!NewCCValid || !NewCCMask)
- return false;
- CCValid = NewCCValid->getZExtValue();
- CCMask = NewCCMask->getZExtValue();
- if (Invert)
- CCMask ^= CCValid;
+ // Verify that the ICMP compares against one of select values.
+ auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
+ if (!TrueVal)
+ return false;
+ auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
+ if (!FalseVal)
+ return false;
+ if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
+ Invert = !Invert;
+ else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
+ return false;
- // Return the updated CCReg link.
- CCReg = Select->getOperand(4);
- return true;
+ // Compute the effective CC mask for the new branch or select.
+ auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
+ auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
+ if (!NewCCValid || !NewCCMask)
+ return false;
+ CCValid = NewCCValid->getZExtValue();
+ CCMask = NewCCMask->getZExtValue();
+ if (Invert)
+ CCMask ^= CCValid;
+
+ // Return the updated CCReg link.
+ CCReg = CompareLHS->getOperand(4);
+ return true;
+ }
+
+ // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
+ if (CompareLHS->getOpcode() == ISD::SRA) {
+ auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
+ if (!SRACount || SRACount->getZExtValue() != 30)
+ return false;
+ auto *SHL = CompareLHS->getOperand(0).getNode();
+ if (SHL->getOpcode() != ISD::SHL)
+ return false;
+ auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
+ if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC)
+ return false;
+ auto *IPM = SHL->getOperand(0).getNode();
+ if (IPM->getOpcode() != SystemZISD::IPM)
+ return false;
+
+ // Avoid introducing CC spills (because SRA would clobber CC).
+ if (!CompareLHS->hasOneUse())
+ return false;
+ // Verify that the ICMP compares against zero.
+ if (CompareRHS->getZExtValue() != 0)
+ return false;
+
+ // Compute the effective CC mask for the new branch or select.
+ switch (CCMask) {
+ case SystemZ::CCMASK_CMP_EQ: break;
+ case SystemZ::CCMASK_CMP_NE: break;
+ case SystemZ::CCMASK_CMP_LT: CCMask = SystemZ::CCMASK_CMP_GT; break;
+ case SystemZ::CCMASK_CMP_GT: CCMask = SystemZ::CCMASK_CMP_LT; break;
+ case SystemZ::CCMASK_CMP_LE: CCMask = SystemZ::CCMASK_CMP_GE; break;
+ case SystemZ::CCMASK_CMP_GE: CCMask = SystemZ::CCMASK_CMP_LE; break;
+ default: return false;
+ }
+
+ // Return the updated CCReg link.
+ CCReg = IPM->getOperand(0);
+ return true;
+ }
+
+ return false;
}
SDValue SystemZTargetLowering::combineBR_CCMASK(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
// Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
if (!CCValid || !CCMask)
return SDValue();
int CCValidVal = CCValid->getZExtValue();
int CCMaskVal = CCMask->getZExtValue();
SDValue Chain = N->getOperand(0);
SDValue CCReg = N->getOperand(4);
if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
Chain,
DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32),
DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32),
N->getOperand(3), CCReg);
return SDValue();
}
SDValue SystemZTargetLowering::combineSELECT_CCMASK(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
// Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
if (!CCValid || !CCMask)
return SDValue();
int CCValidVal = CCValid->getZExtValue();
int CCMaskVal = CCMask->getZExtValue();
SDValue CCReg = N->getOperand(4);
if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
N->getOperand(0),
N->getOperand(1),
DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32),
DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32),
CCReg);
return SDValue();
}
SDValue SystemZTargetLowering::combineGET_CCMASK(
SDNode *N, DAGCombinerInfo &DCI) const {
// Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
if (!CCValid || !CCMask)
return SDValue();
int CCValidVal = CCValid->getZExtValue();
int CCMaskVal = CCMask->getZExtValue();
SDValue Select = N->getOperand(0);
if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
return SDValue();
auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
if (!SelectCCValid || !SelectCCMask)
return SDValue();
int SelectCCValidVal = SelectCCValid->getZExtValue();
int SelectCCMaskVal = SelectCCMask->getZExtValue();
auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
if (!TrueVal || !FalseVal)
return SDValue();
if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
;
else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
SelectCCMaskVal ^= SelectCCValidVal;
else
return SDValue();
if (SelectCCValidVal & ~CCValidVal)
return SDValue();
if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
return SDValue();
return Select->getOperand(4);
}
SDValue SystemZTargetLowering::combineIntDIVREM(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
// In the case where the divisor is a vector of constants a cheaper
// sequence of instructions can replace the divide. BuildSDIV is called to
// do this during DAG combining, but it only succeeds when it can build a
// multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and
// since it is not Legal but Custom it can only happen before
// legalization. Therefore we must scalarize this early before Combine
// 1. For widened vectors, this is already the result of type legalization.
if (VT.isVector() && isTypeLegal(VT) &&
DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1)))
return DAG.UnrollVectorOp(N);
return SDValue();
}
SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
switch(N->getOpcode()) {
default: break;
case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI);
case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI);
case SystemZISD::MERGE_HIGH:
case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
case ISD::LOAD: return combineLOAD(N, DCI);
case ISD::STORE: return combineSTORE(N, DCI);
case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI);
case ISD::BSWAP: return combineBSWAP(N, DCI);
case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI);
case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI);
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM: return combineIntDIVREM(N, DCI);
}
return SDValue();
}
// Return the demanded elements for the OpNo source operand of Op. DemandedElts
// are for Op.
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
unsigned OpNo) {
EVT VT = Op.getValueType();
unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1);
APInt SrcDemE;
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
case Intrinsic::s390_vpksg:
case Intrinsic::s390_vpkshs: // PACKS_CC
case Intrinsic::s390_vpksfs:
case Intrinsic::s390_vpksgs:
case Intrinsic::s390_vpklsh: // PACKLS
case Intrinsic::s390_vpklsf:
case Intrinsic::s390_vpklsg:
case Intrinsic::s390_vpklshs: // PACKLS_CC
case Intrinsic::s390_vpklsfs:
case Intrinsic::s390_vpklsgs:
// VECTOR PACK truncates the elements of two source vectors into one.
SrcDemE = DemandedElts;
if (OpNo == 2)
SrcDemE.lshrInPlace(NumElts / 2);
SrcDemE = SrcDemE.trunc(NumElts / 2);
break;
// VECTOR UNPACK extends half the elements of the source vector.
case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
case Intrinsic::s390_vuphh:
case Intrinsic::s390_vuphf:
case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
case Intrinsic::s390_vuplhh:
case Intrinsic::s390_vuplhf:
SrcDemE = APInt(NumElts * 2, 0);
SrcDemE.insertBits(DemandedElts, 0);
break;
case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
case Intrinsic::s390_vuplhw:
case Intrinsic::s390_vuplf:
case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
case Intrinsic::s390_vupllh:
case Intrinsic::s390_vupllf:
SrcDemE = APInt(NumElts * 2, 0);
SrcDemE.insertBits(DemandedElts, NumElts);
break;
case Intrinsic::s390_vpdi: {
// VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
SrcDemE = APInt(NumElts, 0);
if (!DemandedElts[OpNo - 1])
break;
unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
// Demand input element 0 or 1, given by the mask bit value.
SrcDemE.setBit((Mask & MaskBit)? 1 : 0);
break;
}
case Intrinsic::s390_vsldb: {
// VECTOR SHIFT LEFT DOUBLE BY BYTE
assert(VT == MVT::v16i8 && "Unexpected type.");
unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand.");
unsigned NumSrc0Els = 16 - FirstIdx;
SrcDemE = APInt(NumElts, 0);
if (OpNo == 1) {
APInt DemEls = DemandedElts.trunc(NumSrc0Els);
SrcDemE.insertBits(DemEls, FirstIdx);
} else {
APInt DemEls = DemandedElts.lshr(NumSrc0Els);
SrcDemE.insertBits(DemEls, 0);
}
break;
}
case Intrinsic::s390_vperm:
SrcDemE = APInt(NumElts, 1);
break;
default:
llvm_unreachable("Unhandled intrinsic.");
break;
}
} else {
switch (Opcode) {
case SystemZISD::JOIN_DWORDS:
// Scalar operand.
SrcDemE = APInt(1, 1);
break;
case SystemZISD::SELECT_CCMASK:
SrcDemE = DemandedElts;
break;
default:
llvm_unreachable("Unhandled opcode.");
break;
}
}
return SrcDemE;
}
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG, unsigned Depth,
unsigned OpNo) {
APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
KnownBits LHSKnown =
DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
KnownBits RHSKnown =
DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
Known.Zero = LHSKnown.Zero & RHSKnown.Zero;
Known.One = LHSKnown.One & RHSKnown.One;
}
void
SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
Known.resetAll();
// Intrinsic CC result is returned in the two low bits.
unsigned tmp0, tmp1; // not used
if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) {
Known.Zero.setBitsFrom(2);
return;
}
EVT VT = Op.getValueType();
if (Op.getResNo() != 0 || VT == MVT::Untyped)
return;
assert (Known.getBitWidth() == VT.getScalarSizeInBits() &&
"KnownBits does not match VT in bitwidth");
assert ((!VT.isVector() ||
(DemandedElts.getBitWidth() == VT.getVectorNumElements())) &&
"DemandedElts does not match VT number of elements");
unsigned BitWidth = Known.getBitWidth();
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
bool IsLogical = false;
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
case Intrinsic::s390_vpksg:
case Intrinsic::s390_vpkshs: // PACKS_CC
case Intrinsic::s390_vpksfs:
case Intrinsic::s390_vpksgs:
case Intrinsic::s390_vpklsh: // PACKLS
case Intrinsic::s390_vpklsf:
case Intrinsic::s390_vpklsg:
case Intrinsic::s390_vpklshs: // PACKLS_CC
case Intrinsic::s390_vpklsfs:
case Intrinsic::s390_vpklsgs:
case Intrinsic::s390_vpdi:
case Intrinsic::s390_vsldb:
case Intrinsic::s390_vperm:
computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1);
break;
case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
case Intrinsic::s390_vuplhh:
case Intrinsic::s390_vuplhf:
case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
case Intrinsic::s390_vupllh:
case Intrinsic::s390_vupllf:
IsLogical = true;
LLVM_FALLTHROUGH;
case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
case Intrinsic::s390_vuphh:
case Intrinsic::s390_vuphf:
case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
case Intrinsic::s390_vuplhw:
case Intrinsic::s390_vuplf: {
SDValue SrcOp = Op.getOperand(1);
unsigned SrcBitWidth = SrcOp.getScalarValueSizeInBits();
APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0);
Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1);
if (IsLogical) {
Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(SrcBitWidth);
} else
Known = Known.sext(BitWidth);
break;
}
default:
break;
}
} else {
switch (Opcode) {
case SystemZISD::JOIN_DWORDS:
case SystemZISD::SELECT_CCMASK:
computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0);
break;
case SystemZISD::REPLICATE: {
SDValue SrcOp = Op.getOperand(0);
Known = DAG.computeKnownBits(SrcOp, Depth + 1);
if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp))
Known = Known.sext(BitWidth); // VREPI sign extends the immedate.
break;
}
default:
break;
}
}
// Known has the width of the source operand(s). Adjust if needed to match
// the passed bitwidth.
if (Known.getBitWidth() != BitWidth)
Known = Known.zextOrTrunc(BitWidth);
}
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts,
const SelectionDAG &DAG, unsigned Depth,
unsigned OpNo) {
APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
if (LHS == 1) return 1; // Early out.
APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
if (RHS == 1) return 1; // Early out.
unsigned Common = std::min(LHS, RHS);
unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits();
EVT VT = Op.getValueType();
unsigned VTBits = VT.getScalarSizeInBits();
if (SrcBitWidth > VTBits) { // PACK
unsigned SrcExtraBits = SrcBitWidth - VTBits;
if (Common > SrcExtraBits)
return (Common - SrcExtraBits);
return 1;
}
assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth.");
return Common;
}
unsigned
SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
unsigned Depth) const {
if (Op.getResNo() != 0)
return 1;
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
case Intrinsic::s390_vpksg:
case Intrinsic::s390_vpkshs: // PACKS_CC
case Intrinsic::s390_vpksfs:
case Intrinsic::s390_vpksgs:
case Intrinsic::s390_vpklsh: // PACKLS
case Intrinsic::s390_vpklsf:
case Intrinsic::s390_vpklsg:
case Intrinsic::s390_vpklshs: // PACKLS_CC
case Intrinsic::s390_vpklsfs:
case Intrinsic::s390_vpklsgs:
case Intrinsic::s390_vpdi:
case Intrinsic::s390_vsldb:
case Intrinsic::s390_vperm:
return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1);
case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
case Intrinsic::s390_vuphh:
case Intrinsic::s390_vuphf:
case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
case Intrinsic::s390_vuplhw:
case Intrinsic::s390_vuplf: {
SDValue PackedOp = Op.getOperand(1);
APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1);
unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1);
EVT VT = Op.getValueType();
unsigned VTBits = VT.getScalarSizeInBits();
Tmp += VTBits - PackedOp.getScalarValueSizeInBits();
return Tmp;
}
default:
break;
}
} else {
switch (Opcode) {
case SystemZISD::SELECT_CCMASK:
return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0);
default:
break;
}
}
return 1;
}
//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//
// Create a new basic block after MBB.
static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
MachineFunction &MF = *MBB->getParent();
MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
return NewMBB;
}
// Split MBB after MI and return the new block (the one that contains
// instructions after MI).
static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
return NewMBB;
}
// Split MBB before MI and return the new block (the one that contains MI).
static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
return NewMBB;
}
// Force base value Base into a register before MI. Return the register.
static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
const SystemZInstrInfo *TII) {
if (Base.isReg())
return Base.getReg();
MachineBasicBlock *MBB = MI.getParent();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
.add(Base)
.addImm(0)
.addReg(0);
return Reg;
}
// The CC operand of MI might be missing a kill marker because there
// were multiple uses of CC, and ISel didn't know which to mark.
// Figure out whether MI should have had a kill marker.
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) {
// Scan forward through BB for a use/def of CC.
MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI)));
for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
if (mi.readsRegister(SystemZ::CC))
return false;
if (mi.definesRegister(SystemZ::CC))
break; // Should have kill-flag - update below.
}
// If we hit the end of the block, check whether CC is live into a
// successor.
if (miI == MBB->end()) {
for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI)
if ((*SI)->isLiveIn(SystemZ::CC))
return false;
}
return true;
}
// Return true if it is OK for this Select pseudo-opcode to be cascaded
// together with other Select pseudo-opcodes into a single basic-block with
// a conditional jump around it.
static bool isSelectPseudo(MachineInstr &MI) {
switch (MI.getOpcode()) {
case SystemZ::Select32:
case SystemZ::Select64:
case SystemZ::SelectF32:
case SystemZ::SelectF64:
case SystemZ::SelectF128:
case SystemZ::SelectVR32:
case SystemZ::SelectVR64:
case SystemZ::SelectVR128:
return true;
default:
return false;
}
}
// Helper function, which inserts PHI functions into SinkMBB:
// %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects
// in [MIItBegin, MIItEnd) range.
static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin,
MachineBasicBlock::iterator MIItEnd,
MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB,
MachineBasicBlock *SinkMBB) {
MachineFunction *MF = TrueMBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
unsigned CCValid = MIItBegin->getOperand(3).getImm();
unsigned CCMask = MIItBegin->getOperand(4).getImm();
DebugLoc DL = MIItBegin->getDebugLoc();
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
// As we are creating the PHIs, we have to be careful if there is more than
// one. Later Selects may reference the results of earlier Selects, but later
// PHIs have to reference the individual true/false inputs from earlier PHIs.
// That also means that PHI construction must work forward from earlier to
// later, and that the code must maintain a mapping from earlier PHI's
// destination registers, and the registers that went into the PHI.
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
unsigned DestReg = MIIt->getOperand(0).getReg();
unsigned TrueReg = MIIt->getOperand(1).getReg();
unsigned FalseReg = MIIt->getOperand(2).getReg();
// If this Select we are generating is the opposite condition from
// the jump we generated, then we have to swap the operands for the
// PHI that is going to be generated.
if (MIIt->getOperand(4).getImm() == (CCValid ^ CCMask))
std::swap(TrueReg, FalseReg);
if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end())
TrueReg = RegRewriteTable[TrueReg].first;
if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end())
FalseReg = RegRewriteTable[FalseReg].second;
BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg)
.addReg(TrueReg).addMBB(TrueMBB)
.addReg(FalseReg).addMBB(FalseMBB);
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
}
}
// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr &MI,
MachineBasicBlock *MBB) const {
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
unsigned CCValid = MI.getOperand(3).getImm();
unsigned CCMask = MI.getOperand(4).getImm();
DebugLoc DL = MI.getDebugLoc();
// If we have a sequence of Select* pseudo instructions using the
// same condition code value, we want to expand all of them into
// a single pair of basic blocks using the same condition.
MachineInstr *LastMI = &MI;
MachineBasicBlock::iterator NextMIIt =
std::next(MachineBasicBlock::iterator(MI));
if (isSelectPseudo(MI))
while (NextMIIt != MBB->end() && isSelectPseudo(*NextMIIt) &&
NextMIIt->getOperand(3).getImm() == CCValid &&
(NextMIIt->getOperand(4).getImm() == CCMask ||
NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask))) {
LastMI = &*NextMIIt;
++NextMIIt;
}
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
// Unless CC was killed in the last Select instruction, mark it as
// live-in to both FalseMBB and JoinMBB.
if (!LastMI->killsRegister(SystemZ::CC) && !checkCCKill(*LastMI, JoinMBB)) {
FalseMBB->addLiveIn(SystemZ::CC);
JoinMBB->addLiveIn(SystemZ::CC);
}
// StartMBB:
// BRC CCMask, JoinMBB
// # fallthrough to FalseMBB
MBB = StartMBB;
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
MBB->addSuccessor(JoinMBB);
MBB->addSuccessor(FalseMBB);
// FalseMBB:
// # fallthrough to JoinMBB
MBB = FalseMBB;
MBB->addSuccessor(JoinMBB);
// JoinMBB:
// %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
// ...
MBB = JoinMBB;
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator MIItEnd =
std::next(MachineBasicBlock::iterator(LastMI));
createPHIsForSelects(MIItBegin, MIItEnd, StartMBB, FalseMBB, MBB);
StartMBB->erase(MIItBegin, MIItEnd);
return JoinMBB;
}
// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
// StoreOpcode is the store to use and Invert says whether the store should
// happen when the condition is false rather than true. If a STORE ON
// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
MachineBasicBlock *MBB,
unsigned StoreOpcode,
unsigned STOCOpcode,
bool Invert) const {
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
unsigned SrcReg = MI.getOperand(0).getReg();
MachineOperand Base = MI.getOperand(1);
int64_t Disp = MI.getOperand(2).getImm();
unsigned IndexReg = MI.getOperand(3).getReg();
unsigned CCValid = MI.getOperand(4).getImm();
unsigned CCMask = MI.getOperand(5).getImm();
DebugLoc DL = MI.getDebugLoc();
StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
// Use STOCOpcode if possible. We could use different store patterns in
// order to avoid matching the index register, but the performance trade-offs
// might be more complicated in that case.
if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
if (Invert)
CCMask ^= CCValid;
// ISel pattern matching also adds a load memory operand of the same
// address, so take special care to find the storing memory operand.
MachineMemOperand *MMO = nullptr;
for (auto *I : MI.memoperands())
if (I->isStore()) {
MMO = I;
break;
}
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
.addReg(SrcReg)
.add(Base)
.addImm(Disp)
.addImm(CCValid)
.addImm(CCMask)
.addMemOperand(MMO);
MI.eraseFromParent();
return MBB;
}
// Get the condition needed to branch around the store.
if (!Invert)
CCMask ^= CCValid;
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
// Unless CC was killed in the CondStore instruction, mark it as
// live-in to both FalseMBB and JoinMBB.
if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) {
FalseMBB->addLiveIn(SystemZ::CC);
JoinMBB->addLiveIn(SystemZ::CC);
}
// StartMBB:
// BRC CCMask, JoinMBB
// # fallthrough to FalseMBB
MBB = StartMBB;
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
MBB->addSuccessor(JoinMBB);
MBB->addSuccessor(FalseMBB);
// FalseMBB:
// store %SrcReg, %Disp(%Index,%Base)
// # fallthrough to JoinMBB
MBB = FalseMBB;
BuildMI(MBB, DL, TII->get(StoreOpcode))
.addReg(SrcReg)
.add(Base)
.addImm(Disp)
.addReg(IndexReg);
MBB->addSuccessor(JoinMBB);
MI.eraseFromParent();
return JoinMBB;
}
// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
// BitSize is the width of the field in bits, or 0 if this is a partword
// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
// is one of the operands. Invert says whether the field should be
// inverted after performing BinOpcode (e.g. for NAND).
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
unsigned BitSize, bool Invert) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
// Extract the operands. Base can be a register or a frame index.
// Src2 can be a register or immediate.
unsigned Dest = MI.getOperand(0).getReg();
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
// Subword operations use 32-bit registers.
const TargetRegisterClass *RC = (BitSize <= 32 ?
&SystemZ::GR32BitRegClass :
&SystemZ::GR64BitRegClass);
unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
// Get the right opcodes for the displacement.
LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
unsigned OrigVal = MRI.createVirtualRegister(RC);
unsigned OldVal = MRI.createVirtualRegister(RC);
unsigned NewVal = (BinOpcode || IsSubWord ?
MRI.createVirtualRegister(RC) : Src2.getReg());
unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert a basic block for the main loop.
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
// StartMBB:
// ...
// %OrigVal = L Disp(%Base)
// # fall through to LoopMMB
MBB = StartMBB;
BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
MBB->addSuccessor(LoopMBB);
// LoopMBB:
// %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
// %RotatedOldVal = RLL %OldVal, 0(%BitShift)
// %RotatedNewVal = OP %RotatedOldVal, %Src2
// %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
// %Dest = CS %OldVal, %NewVal, Disp(%Base)
// JNE LoopMBB
// # fall through to DoneMMB
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
.addReg(OrigVal).addMBB(StartMBB)
.addReg(Dest).addMBB(LoopMBB);
if (IsSubWord)
BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
.addReg(OldVal).addReg(BitShift).addImm(0);
if (Invert) {
// Perform the operation normally and then invert every bit of the field.
unsigned Tmp = MRI.createVirtualRegister(RC);
BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);
if (BitSize <= 32)
// XILF with the upper BitSize bits set.
BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
.addReg(Tmp).addImm(-1U << (32 - BitSize));
else {
// Use LCGR and add -1 to the result, which is more compact than
// an XILF, XILH pair.
unsigned Tmp2 = MRI.createVirtualRegister(RC);
BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
.addReg(Tmp2).addImm(-1);
}
} else if (BinOpcode)
// A simply binary operation.
BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
.addReg(RotatedOldVal)
.add(Src2);
else if (IsSubWord)
// Use RISBG to rotate Src2 into position and use it to replace the
// field in RotatedOldVal.
BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
.addReg(RotatedOldVal).addReg(Src2.getReg())
.addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
if (IsSubWord)
BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
.addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
.addReg(OldVal)
.addReg(NewVal)
.add(Base)
.addImm(Disp);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
MI.eraseFromParent();
return DoneMBB;
}
// Implement EmitInstrWithCustomInserter for pseudo
// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
// instruction that should be used to compare the current field with the
// minimum or maximum value. KeepOldMask is the BRC condition-code mask
// for when the current field should be kept. BitSize is the width of
// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
unsigned KeepOldMask, unsigned BitSize) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
// Extract the operands. Base can be a register or a frame index.
unsigned Dest = MI.getOperand(0).getReg();
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
unsigned Src2 = MI.getOperand(3).getReg();
unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
// Subword operations use 32-bit registers.
const TargetRegisterClass *RC = (BitSize <= 32 ?
&SystemZ::GR32BitRegClass :
&SystemZ::GR64BitRegClass);
unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
// Get the right opcodes for the displacement.
LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
unsigned OrigVal = MRI.createVirtualRegister(RC);
unsigned OldVal = MRI.createVirtualRegister(RC);
unsigned NewVal = MRI.createVirtualRegister(RC);
unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert 3 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
// StartMBB:
// ...
// %OrigVal = L Disp(%Base)
// # fall through to LoopMMB
MBB = StartMBB;
BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
MBB->addSuccessor(LoopMBB);
// LoopMBB:
// %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
// %RotatedOldVal = RLL %OldVal, 0(%BitShift)
// CompareOpcode %RotatedOldVal, %Src2
// BRC KeepOldMask, UpdateMBB
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
.addReg(OrigVal).addMBB(StartMBB)
.addReg(Dest).addMBB(UpdateMBB);
if (IsSubWord)
BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
.addReg(OldVal).addReg(BitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CompareOpcode))
.addReg(RotatedOldVal).addReg(Src2);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
MBB->addSuccessor(UpdateMBB);
MBB->addSuccessor(UseAltMBB);
// UseAltMBB:
// %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
// # fall through to UpdateMMB
MBB = UseAltMBB;
if (IsSubWord)
BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
.addReg(RotatedOldVal).addReg(Src2)
.addImm(32).addImm(31 + BitSize).addImm(0);
MBB->addSuccessor(UpdateMBB);
// UpdateMBB:
// %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
// [ %RotatedAltVal, UseAltMBB ]
// %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
// %Dest = CS %OldVal, %NewVal, Disp(%Base)
// JNE LoopMBB
// # fall through to DoneMMB
MBB = UpdateMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
.addReg(RotatedOldVal).addMBB(LoopMBB)
.addReg(RotatedAltVal).addMBB(UseAltMBB);
if (IsSubWord)
BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
.addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
.addReg(OldVal)
.addReg(NewVal)
.add(Base)
.addImm(Disp);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
MI.eraseFromParent();
return DoneMBB;
}
// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
// instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
MachineBasicBlock *MBB) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
unsigned Dest = MI.getOperand(0).getReg();
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
unsigned OrigCmpVal = MI.getOperand(3).getReg();
unsigned OrigSwapVal = MI.getOperand(4).getReg();
unsigned BitShift = MI.getOperand(5).getReg();
unsigned NegBitShift = MI.getOperand(6).getReg();
int64_t BitSize = MI.getOperand(7).getImm();
DebugLoc DL = MI.getDebugLoc();
const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
// Get the right opcodes for the displacement.
unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
unsigned OrigOldVal = MRI.createVirtualRegister(RC);
unsigned OldVal = MRI.createVirtualRegister(RC);
unsigned CmpVal = MRI.createVirtualRegister(RC);
unsigned SwapVal = MRI.createVirtualRegister(RC);
unsigned StoreVal = MRI.createVirtualRegister(RC);
unsigned RetryOldVal = MRI.createVirtualRegister(RC);
unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
// Insert 2 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
// StartMBB:
// ...
// %OrigOldVal = L Disp(%Base)
// # fall through to LoopMMB
MBB = StartMBB;
BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
.add(Base)
.addImm(Disp)
.addReg(0);
MBB->addSuccessor(LoopMBB);
// LoopMBB:
// %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
// %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
// %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
// %Dest = RLL %OldVal, BitSize(%BitShift)
// ^^ The low BitSize bits contain the field
// of interest.
// %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
// ^^ Replace the upper 32-BitSize bits of the
// comparison value with those that we loaded,
// so that we can use a full word comparison.
// CR %Dest, %RetryCmpVal
// JNE DoneMBB
// # Fall through to SetMBB
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
.addReg(OrigOldVal).addMBB(StartMBB)
.addReg(RetryOldVal).addMBB(SetMBB);
BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
.addReg(OrigCmpVal).addMBB(StartMBB)
.addReg(RetryCmpVal).addMBB(SetMBB);
BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
.addReg(OrigSwapVal).addMBB(StartMBB)
.addReg(RetrySwapVal).addMBB(SetMBB);
BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
.addReg(OldVal).addReg(BitShift).addImm(BitSize);
BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
.addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
BuildMI(MBB, DL, TII->get(SystemZ::CR))
.addReg(Dest).addReg(RetryCmpVal);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ICMP)
.addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
MBB->addSuccessor(DoneMBB);
MBB->addSuccessor(SetMBB);
// SetMBB:
// %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
// ^^ Replace the upper 32-BitSize bits of the new
// value with those that we loaded.
// %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
// ^^ Rotate the new field to its proper position.
// %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
// JNE LoopMBB
// # fall through to ExitMMB
MBB = SetMBB;
BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
.addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
.addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
.addReg(OldVal)
.addReg(StoreVal)
.add(Base)
.addImm(Disp);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
// If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
// to the block after the loop. At this point, CC may have been defined
// either by the CR in LoopMBB or by the CS in SetMBB.
if (!MI.registerDefIsDead(SystemZ::CC))
DoneMBB->addLiveIn(SystemZ::CC);
MI.eraseFromParent();
return DoneMBB;
}
// Emit a move from two GR64s to a GR128.
MachineBasicBlock *
SystemZTargetLowering::emitPair128(MachineInstr &MI,
MachineBasicBlock *MBB) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI.getDebugLoc();
unsigned Dest = MI.getOperand(0).getReg();
unsigned Hi = MI.getOperand(1).getReg();
unsigned Lo = MI.getOperand(2).getReg();
unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2)
.addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
.addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64);
MI.eraseFromParent();
return MBB;
}
// Emit an extension from a GR64 to a GR128. ClearEven is true
// if the high register of the GR128 value must be cleared or false if
// it's "don't care".
MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
MachineBasicBlock *MBB,
bool ClearEven) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI.getDebugLoc();
unsigned Dest = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
if (ClearEven) {
unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
.addImm(0);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
.addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
In128 = NewIn128;
}
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
.addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);
MI.eraseFromParent();
return MBB;
}
MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI.getDebugLoc();
MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
uint64_t DestDisp = MI.getOperand(1).getImm();
MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
uint64_t SrcDisp = MI.getOperand(3).getImm();
uint64_t Length = MI.getOperand(4).getImm();
// When generating more than one CLC, all but the last will need to
// branch to the end when a difference is found.
MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
splitBlockAfter(MI, MBB) : nullptr);
// Check for the loop form, in which operand 5 is the trip count.
if (MI.getNumExplicitOperands() > 5) {
bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
uint64_t StartCountReg = MI.getOperand(5).getReg();
uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
forceReg(MI, DestBase, TII));
const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
RC = &SystemZ::GR64BitRegClass;
uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
uint64_t NextCountReg = MRI.createVirtualRegister(RC);
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
// StartMBB:
// # fall through to LoopMMB
MBB->addSuccessor(LoopMBB);
// LoopMBB:
// %ThisDestReg = phi [ %StartDestReg, StartMBB ],
// [ %NextDestReg, NextMBB ]
// %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
// [ %NextSrcReg, NextMBB ]
// %ThisCountReg = phi [ %StartCountReg, StartMBB ],
// [ %NextCountReg, NextMBB ]
// ( PFD 2, 768+DestDisp(%ThisDestReg) )
// Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
// ( JLH EndMBB )
//
// The prefetch is used only for MVC. The JLH is used only for CLC.
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
.addReg(StartDestReg).addMBB(StartMBB)
.addReg(NextDestReg).addMBB(NextMBB);
if (!HaveSingleBase)
BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
.addReg(StartSrcReg).addMBB(StartMBB)
.addReg(NextSrcReg).addMBB(NextMBB);
BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
.addReg(StartCountReg).addMBB(StartMBB)
.addReg(NextCountReg).addMBB(NextMBB);
if (Opcode == SystemZ::MVC)
BuildMI(MBB, DL, TII->get(SystemZ::PFD))
.addImm(SystemZ::PFD_WRITE)
.addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
BuildMI(MBB, DL, TII->get(Opcode))
.addReg(ThisDestReg).addImm(DestDisp).addImm(256)
.addReg(ThisSrcReg).addImm(SrcDisp);
if (EndMBB) {
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
.addMBB(EndMBB);
MBB->addSuccessor(EndMBB);
MBB->addSuccessor(NextMBB);
}
// NextMBB:
// %NextDestReg = LA 256(%ThisDestReg)
// %NextSrcReg = LA 256(%ThisSrcReg)
// %NextCountReg = AGHI %ThisCountReg, -1
// CGHI %NextCountReg, 0
// JLH LoopMBB
// # fall through to DoneMMB
//
// The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
MBB = NextMBB;
BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
.addReg(ThisDestReg).addImm(256).addReg(0);
if (!HaveSingleBase)
BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
.addReg(ThisSrcReg).addImm(256).addReg(0);
BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
.addReg(ThisCountReg).addImm(-1);
BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
.addReg(NextCountReg).addImm(0);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
.addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
DestBase = MachineOperand::CreateReg(NextDestReg, false);
SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
Length &= 255;
if (EndMBB && !Length)
// If the loop handled the whole CLC range, DoneMBB will be empty with
// CC live-through into EndMBB, so add it as live-in.
DoneMBB->addLiveIn(SystemZ::CC);
MBB = DoneMBB;
}
// Handle any remaining bytes with straight-line code.
while (Length > 0) {
uint64_t ThisLength = std::min(Length, uint64_t(256));
// The previous iteration might have created out-of-range displacements.
// Apply them using LAY if so.
if (!isUInt<12>(DestDisp)) {
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
.add(DestBase)
.addImm(DestDisp)
.addReg(0);
DestBase = MachineOperand::CreateReg(Reg, false);
DestDisp = 0;
}
if (!isUInt<12>(SrcDisp)) {
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
.add(SrcBase)
.addImm(SrcDisp)
.addReg(0);
SrcBase = MachineOperand::CreateReg(Reg, false);
SrcDisp = 0;
}
BuildMI(*MBB, MI, DL, TII->get(Opcode))
.add(DestBase)
.addImm(DestDisp)
.addImm(ThisLength)
.add(SrcBase)
.addImm(SrcDisp)
.setMemRefs(MI.memoperands());
DestDisp += ThisLength;
SrcDisp += ThisLength;
Length -= ThisLength;
// If there's another CLC to go, branch to the end if a difference
// was found.
if (EndMBB && Length > 0) {
MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
.addMBB(EndMBB);
MBB->addSuccessor(EndMBB);
MBB->addSuccessor(NextMBB);
MBB = NextMBB;
}
}
if (EndMBB) {
MBB->addSuccessor(EndMBB);
MBB = EndMBB;
MBB->addLiveIn(SystemZ::CC);
}
MI.eraseFromParent();
return MBB;
}
// Decompose string pseudo-instruction MI into a loop that continually performs
// Opcode until CC != 3.
MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI.getDebugLoc();
uint64_t End1Reg = MI.getOperand(0).getReg();
uint64_t Start1Reg = MI.getOperand(1).getReg();
uint64_t Start2Reg = MI.getOperand(2).getReg();
uint64_t CharReg = MI.getOperand(3).getReg();
const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
uint64_t This1Reg = MRI.createVirtualRegister(RC);
uint64_t This2Reg = MRI.createVirtualRegister(RC);
uint64_t End2Reg = MRI.createVirtualRegister(RC);
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
// StartMBB:
// # fall through to LoopMMB
MBB->addSuccessor(LoopMBB);
// LoopMBB:
// %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
// %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
// R0L = %CharReg
// %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
// JO LoopMBB
// # fall through to DoneMMB
//
// The load of R0L can be hoisted by post-RA LICM.
MBB = LoopMBB;
BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
.addReg(Start1Reg).addMBB(StartMBB)
.addReg(End1Reg).addMBB(LoopMBB);
BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
.addReg(Start2Reg).addMBB(StartMBB)
.addReg(End2Reg).addMBB(LoopMBB);
BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
BuildMI(MBB, DL, TII->get(Opcode))
.addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
.addReg(This1Reg).addReg(This2Reg);
BuildMI(MBB, DL, TII->get(SystemZ::BRC))
.addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
DoneMBB->addLiveIn(SystemZ::CC);
MI.eraseFromParent();
return DoneMBB;
}
// Update TBEGIN instruction with final opcode and register clobbers.
MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
bool NoFloat) const {
MachineFunction &MF = *MBB->getParent();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
// Update opcode.
MI.setDesc(TII->get(Opcode));
// We cannot handle a TBEGIN that clobbers the stack or frame pointer.
// Make sure to add the corresponding GRSM bits if they are missing.
uint64_t Control = MI.getOperand(2).getImm();
static const unsigned GPRControlBit[16] = {
0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
};
Control |= GPRControlBit[15];
if (TFI->hasFP(MF))
Control |= GPRControlBit[11];
MI.getOperand(2).setImm(Control);
// Add GPR clobbers.
for (int I = 0; I < 16; I++) {
if ((Control & GPRControlBit[I]) == 0) {
unsigned Reg = SystemZMC::GR64Regs[I];
MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
}
// Add FPR/VR clobbers.
if (!NoFloat && (Control & 4) != 0) {
if (Subtarget.hasVector()) {
for (int I = 0; I < 32; I++) {
unsigned Reg = SystemZMC::VR128Regs[I];
MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
} else {
for (int I = 0; I < 16; I++) {
unsigned Reg = SystemZMC::FP64Regs[I];
MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
}
}
return MBB;
}
MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo *MRI = &MF.getRegInfo();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
DebugLoc DL = MI.getDebugLoc();
unsigned SrcReg = MI.getOperand(0).getReg();
// Create new virtual register of the same class as source.
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
unsigned DstReg = MRI->createVirtualRegister(RC);
// Replace pseudo with a normal load-and-test that models the def as
// well.
BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
.addReg(SrcReg);
MI.eraseFromParent();
return MBB;
}
MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *MBB) const {
switch (MI.getOpcode()) {
case SystemZ::Select32:
case SystemZ::Select64:
case SystemZ::SelectF32:
case SystemZ::SelectF64:
case SystemZ::SelectF128:
case SystemZ::SelectVR32:
case SystemZ::SelectVR64:
case SystemZ::SelectVR128:
return emitSelect(MI, MBB);
case SystemZ::CondStore8Mux:
return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
case SystemZ::CondStore8MuxInv:
return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
case SystemZ::CondStore16Mux:
return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
case SystemZ::CondStore16MuxInv:
return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
case SystemZ::CondStore32Mux:
return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
case SystemZ::CondStore32MuxInv:
return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
case SystemZ::CondStore8:
return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
case SystemZ::CondStore8Inv:
return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
case SystemZ::CondStore16:
return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
case SystemZ::CondStore16Inv:
return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
case SystemZ::CondStore32:
return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
case SystemZ::CondStore32Inv:
return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
case SystemZ::CondStore64:
return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
case SystemZ::CondStore64Inv:
return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
case SystemZ::CondStoreF32:
return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
case SystemZ::CondStoreF32Inv:
return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
case SystemZ::CondStoreF64:
return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
case SystemZ::CondStoreF64Inv:
return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
case SystemZ::PAIR128:
return emitPair128(MI, MBB);
case SystemZ::AEXT128:
return emitExt128(MI, MBB, false);
case SystemZ::ZEXT128:
return emitExt128(MI, MBB, true);
case SystemZ::ATOMIC_SWAPW:
return emitAtomicLoadBinary(MI, MBB, 0, 0);
case SystemZ::ATOMIC_SWAP_32:
return emitAtomicLoadBinary(MI, MBB, 0, 32);
case SystemZ::ATOMIC_SWAP_64:
return emitAtomicLoadBinary(MI, MBB, 0, 64);
case SystemZ::ATOMIC_LOADW_AR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
case SystemZ::ATOMIC_LOADW_AFI:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
case SystemZ::ATOMIC_LOAD_AR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
case SystemZ::ATOMIC_LOAD_AHI:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
case SystemZ::ATOMIC_LOAD_AFI:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
case SystemZ::ATOMIC_LOAD_AGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
case SystemZ::ATOMIC_LOAD_AGHI:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
case SystemZ::ATOMIC_LOAD_AGFI:
return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
case SystemZ::ATOMIC_LOADW_SR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
case SystemZ::ATOMIC_LOAD_SR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
case SystemZ::ATOMIC_LOAD_SGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
case SystemZ::ATOMIC_LOADW_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
case SystemZ::ATOMIC_LOADW_NILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
case SystemZ::ATOMIC_LOAD_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
case SystemZ::ATOMIC_LOAD_NILL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
case SystemZ::ATOMIC_LOAD_NILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
case SystemZ::ATOMIC_LOAD_NILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
case SystemZ::ATOMIC_LOAD_NGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
case SystemZ::ATOMIC_LOAD_NILL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
case SystemZ::ATOMIC_LOAD_NILH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
case SystemZ::ATOMIC_LOAD_NIHL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
case SystemZ::ATOMIC_LOAD_NIHH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
case SystemZ::ATOMIC_LOAD_NILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
case SystemZ::ATOMIC_LOAD_NIHF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
case SystemZ::ATOMIC_LOADW_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
case SystemZ::ATOMIC_LOADW_OILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
case SystemZ::ATOMIC_LOAD_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
case SystemZ::ATOMIC_LOAD_OILL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
case SystemZ::ATOMIC_LOAD_OILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
case SystemZ::ATOMIC_LOAD_OILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
case SystemZ::ATOMIC_LOAD_OGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
case SystemZ::ATOMIC_LOAD_OILL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
case SystemZ::ATOMIC_LOAD_OILH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
case SystemZ::ATOMIC_LOAD_OIHL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
case SystemZ::ATOMIC_LOAD_OIHH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
case SystemZ::ATOMIC_LOAD_OILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
case SystemZ::ATOMIC_LOAD_OIHF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
case SystemZ::ATOMIC_LOADW_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
case SystemZ::ATOMIC_LOADW_XILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
case SystemZ::ATOMIC_LOAD_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
case SystemZ::ATOMIC_LOAD_XILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
case SystemZ::ATOMIC_LOAD_XGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
case SystemZ::ATOMIC_LOAD_XILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
case SystemZ::ATOMIC_LOAD_XIHF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
case SystemZ::ATOMIC_LOADW_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
case SystemZ::ATOMIC_LOADW_NILHi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
case SystemZ::ATOMIC_LOAD_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
case SystemZ::ATOMIC_LOAD_NILLi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
case SystemZ::ATOMIC_LOAD_NILHi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
case SystemZ::ATOMIC_LOAD_NILFi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
case SystemZ::ATOMIC_LOAD_NGRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
case SystemZ::ATOMIC_LOAD_NILL64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
case SystemZ::ATOMIC_LOAD_NILH64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
case SystemZ::ATOMIC_LOAD_NIHL64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
case SystemZ::ATOMIC_LOAD_NIHH64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
case SystemZ::ATOMIC_LOAD_NILF64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
case SystemZ::ATOMIC_LOAD_NIHF64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
case SystemZ::ATOMIC_LOADW_MIN:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
SystemZ::CCMASK_CMP_LE, 0);
case SystemZ::ATOMIC_LOAD_MIN_32:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
SystemZ::CCMASK_CMP_LE, 32);
case SystemZ::ATOMIC_LOAD_MIN_64:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
SystemZ::CCMASK_CMP_LE, 64);
case SystemZ::ATOMIC_LOADW_MAX:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
SystemZ::CCMASK_CMP_GE, 0);
case SystemZ::ATOMIC_LOAD_MAX_32:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
SystemZ::CCMASK_CMP_GE, 32);
case SystemZ::ATOMIC_LOAD_MAX_64:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
SystemZ::CCMASK_CMP_GE, 64);
case SystemZ::ATOMIC_LOADW_UMIN:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
SystemZ::CCMASK_CMP_LE, 0);
case SystemZ::ATOMIC_LOAD_UMIN_32:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
SystemZ::CCMASK_CMP_LE, 32);
case SystemZ::ATOMIC_LOAD_UMIN_64:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
SystemZ::CCMASK_CMP_LE, 64);
case SystemZ::ATOMIC_LOADW_UMAX:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
SystemZ::CCMASK_CMP_GE, 0);
case SystemZ::ATOMIC_LOAD_UMAX_32:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
SystemZ::CCMASK_CMP_GE, 32);
case SystemZ::ATOMIC_LOAD_UMAX_64:
return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
SystemZ::CCMASK_CMP_GE, 64);
case SystemZ::ATOMIC_CMP_SWAPW:
return emitAtomicCmpSwapW(MI, MBB);
case SystemZ::MVCSequence:
case SystemZ::MVCLoop:
return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
case SystemZ::NCSequence:
case SystemZ::NCLoop:
return emitMemMemWrapper(MI, MBB, SystemZ::NC);
case SystemZ::OCSequence:
case SystemZ::OCLoop:
return emitMemMemWrapper(MI, MBB, SystemZ::OC);
case SystemZ::XCSequence:
case SystemZ::XCLoop:
return emitMemMemWrapper(MI, MBB, SystemZ::XC);
case SystemZ::CLCSequence:
case SystemZ::CLCLoop:
return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
case SystemZ::CLSTLoop:
return emitStringWrapper(MI, MBB, SystemZ::CLST);
case SystemZ::MVSTLoop:
return emitStringWrapper(MI, MBB, SystemZ::MVST);
case SystemZ::SRSTLoop:
return emitStringWrapper(MI, MBB, SystemZ::SRST);
case SystemZ::TBEGIN:
return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
case SystemZ::TBEGIN_nofloat:
return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
case SystemZ::TBEGINC:
return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
case SystemZ::LTEBRCompare_VecPseudo:
return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
case SystemZ::LTDBRCompare_VecPseudo:
return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
case SystemZ::LTXBRCompare_VecPseudo:
return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, MBB);
default:
llvm_unreachable("Unexpected instr type to insert");
}
}
// This is only used by the isel schedulers, and is needed only to prevent
// compiler from crashing when list-ilp is used.
const TargetRegisterClass *
SystemZTargetLowering::getRepRegClassFor(MVT VT) const {
if (VT == MVT::Untyped)
return &SystemZ::ADDR128BitRegClass;
return TargetLowering::getRepRegClassFor(VT);
}
Index: vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.cpp (revision 344166)
@@ -1,1892 +1,1818 @@
//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the SystemZ implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "SystemZInstrInfo.h"
#include "MCTargetDesc/SystemZMCTargetDesc.h"
#include "SystemZ.h"
#include "SystemZInstrBuilder.h"
#include "SystemZSubtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <cstdint>
#include <iterator>
using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "SystemZGenInstrInfo.inc"
#define DEBUG_TYPE "systemz-II"
STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");
// Return a mask with Count low bits set.
static uint64_t allOnes(unsigned int Count) {
return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
}
// Reg should be a 32-bit GPR. Return true if it is a high register rather
// than a low register.
static bool isHighReg(unsigned int Reg) {
if (SystemZ::GRH32BitRegClass.contains(Reg))
return true;
assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
return false;
}
// Pin the vtable to this file.
void SystemZInstrInfo::anchor() {}
SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
: SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
RI(), STI(sti) {
}
// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
// each having the opcode given by NewOpcode.
void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
unsigned NewOpcode) const {
MachineBasicBlock *MBB = MI->getParent();
MachineFunction &MF = *MBB->getParent();
// Get two load or store instructions. Use the original instruction for one
// of them (arbitrarily the second here) and create a clone for the other.
MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
MBB->insert(MI, EarlierMI);
// Set up the two 64-bit registers and remember super reg and its flags.
MachineOperand &HighRegOp = EarlierMI->getOperand(0);
MachineOperand &LowRegOp = MI->getOperand(0);
unsigned Reg128 = LowRegOp.getReg();
unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
if (MI->mayStore()) {
// Add implicit uses of the super register in case one of the subregs is
// undefined. We could track liveness and skip storing an undefined
// subreg, but this is hopefully rare (discovered with llvm-stress).
// If Reg128 was killed, set kill flag on MI.
unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
}
// The address in the first (high) instruction is already correct.
// Adjust the offset in the second (low) instruction.
MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
MachineOperand &LowOffsetOp = MI->getOperand(2);
LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
// Clear the kill flags on the registers in the first instruction.
if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
EarlierMI->getOperand(0).setIsKill(false);
EarlierMI->getOperand(1).setIsKill(false);
EarlierMI->getOperand(3).setIsKill(false);
// Set the opcodes.
unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
assert(HighOpcode && LowOpcode && "Both offsets should be in range");
EarlierMI->setDesc(get(HighOpcode));
MI->setDesc(get(LowOpcode));
}
// Split ADJDYNALLOC instruction MI.
void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
MachineBasicBlock *MBB = MI->getParent();
MachineFunction &MF = *MBB->getParent();
MachineFrameInfo &MFFrame = MF.getFrameInfo();
MachineOperand &OffsetMO = MI->getOperand(2);
uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
SystemZMC::CallFrameSize +
OffsetMO.getImm());
unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
assert(NewOpcode && "No support for huge argument lists yet");
MI->setDesc(get(NewOpcode));
OffsetMO.setImm(Offset);
}
// MI is an RI-style pseudo instruction. Replace it with LowOpcode
// if the first operand is a low GR32 and HighOpcode if the first operand
// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
// and HighOpcode takes an unsigned 32-bit operand. In those cases,
// MI has the same kind of operand as LowOpcode, so needs to be converted
// if HighOpcode is used.
void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode,
bool ConvertHigh) const {
unsigned Reg = MI.getOperand(0).getReg();
bool IsHigh = isHighReg(Reg);
MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
if (IsHigh && ConvertHigh)
MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
}
// MI is a three-operand RIE-style pseudo instruction. Replace it with
// LowOpcodeK if the registers are both low GR32s, otherwise use a move
// followed by HighOpcode or LowOpcode, depending on whether the target
// is a high or low GR32.
void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK,
unsigned HighOpcode) const {
unsigned DestReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
bool DestIsHigh = isHighReg(DestReg);
bool SrcIsHigh = isHighReg(SrcReg);
if (!DestIsHigh && !SrcIsHigh)
MI.setDesc(get(LowOpcodeK));
else {
emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
SystemZ::LR, 32, MI.getOperand(1).isKill(),
MI.getOperand(1).isUndef());
MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
MI.getOperand(1).setReg(DestReg);
MI.tieOperands(0, 1);
}
}
// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
// if the first operand is a low GR32 and HighOpcode if the first operand
// is a high GR32.
void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
unsigned Reg = MI.getOperand(0).getReg();
unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
MI.getOperand(2).getImm());
MI.setDesc(get(Opcode));
}
// MI is a load-on-condition pseudo instruction with a single register
// (source or destination) operand. Replace it with LowOpcode if the
// register is a low GR32 and HighOpcode if the register is a high GR32.
void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
unsigned Reg = MI.getOperand(0).getReg();
unsigned Opcode = isHighReg(Reg) ? HighOpcode : LowOpcode;
MI.setDesc(get(Opcode));
}
// MI is a load-register-on-condition pseudo instruction. Replace it with
// LowOpcode if source and destination are both low GR32s and HighOpcode if
// source and destination are both high GR32s.
void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
unsigned DestReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(2).getReg();
bool DestIsHigh = isHighReg(DestReg);
bool SrcIsHigh = isHighReg(SrcReg);
if (!DestIsHigh && !SrcIsHigh)
MI.setDesc(get(LowOpcode));
else if (DestIsHigh && SrcIsHigh)
MI.setDesc(get(HighOpcode));
else
LOCRMuxJumps++;
// If we were unable to implement the pseudo with a single instruction, we
// need to convert it back into a branch sequence. This cannot be done here
// since the caller of expandPostRAPseudo does not handle changes to the CFG
// correctly. This change is defered to the SystemZExpandPseudo pass.
}
// MI is an RR-style pseudo instruction that zero-extends the low Size bits
// of one GRX32 into another. Replace it with LowOpcode if both operands
// are low registers, otherwise use RISB[LH]G.
void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const {
MachineInstrBuilder MIB =
emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
// Keep the remaining operands as-is.
for (unsigned I = 2; I < MI.getNumOperands(); ++I)
MIB.add(MI.getOperand(I));
MI.eraseFromParent();
}
void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
MachineBasicBlock *MBB = MI->getParent();
MachineFunction &MF = *MBB->getParent();
const unsigned Reg64 = MI->getOperand(0).getReg();
const unsigned Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
// EAR can only load the low subregister so us a shift for %a0 to produce
// the GR containing %a0 and %a1.
// ear <reg>, %a0
BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
.addReg(SystemZ::A0)
.addReg(Reg64, RegState::ImplicitDefine);
// sllg <reg>, <reg>, 32
BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
.addReg(Reg64)
.addReg(0)
.addImm(32);
// ear <reg>, %a1
BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
.addReg(SystemZ::A1);
// lg <reg>, 40(<reg>)
MI->setDesc(get(SystemZ::LG));
MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
}
// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
// are low registers, otherwise use RISB[LH]G. Size is the number of bits
// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
// KillSrc is true if this move is the last use of SrcReg.
MachineInstrBuilder
SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, unsigned LowLowOpcode,
unsigned Size, bool KillSrc,
bool UndefSrc) const {
unsigned Opcode;
bool DestIsHigh = isHighReg(DestReg);
bool SrcIsHigh = isHighReg(SrcReg);
if (DestIsHigh && SrcIsHigh)
Opcode = SystemZ::RISBHH;
else if (DestIsHigh && !SrcIsHigh)
Opcode = SystemZ::RISBHL;
else if (!DestIsHigh && SrcIsHigh)
Opcode = SystemZ::RISBLH;
else {
return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
}
unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
.addReg(DestReg, RegState::Undef)
.addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
.addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
}
MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
if (NewMI)
return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
return MI;
};
switch (MI.getOpcode()) {
case SystemZ::LOCRMux:
case SystemZ::LOCFHR:
case SystemZ::LOCR:
case SystemZ::LOCGR: {
auto &WorkingMI = cloneIfNew(MI);
// Invert condition.
unsigned CCValid = WorkingMI.getOperand(3).getImm();
unsigned CCMask = WorkingMI.getOperand(4).getImm();
WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
default:
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
}
// If MI is a simple load or store for a frame object, return the register
// it loads or stores and set FrameIndex to the index of the frame object.
// Return 0 otherwise.
//
// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
unsigned Flag) {
const MCInstrDesc &MCID = MI.getDesc();
if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
return 0;
}
unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
}
unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
int &DestFrameIndex,
int &SrcFrameIndex) const {
// Check for MVC 0(Length,FI1),0(FI2)
const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
MI.getOperand(4).getImm() != 0)
return false;
// Check that Length covers the full slots.
int64_t Length = MI.getOperand(2).getImm();
unsigned FI1 = MI.getOperand(0).getIndex();
unsigned FI2 = MI.getOperand(3).getIndex();
if (MFI.getObjectSize(FI1) != Length ||
MFI.getObjectSize(FI2) != Length)
return false;
DestFrameIndex = FI1;
SrcFrameIndex = FI2;
return true;
}
bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
// Most of the code and comments here are boilerplate.
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
// analysis.
if (!I->isBranch())
return true;
// Can't handle indirect branches.
SystemZII::Branch Branch(getBranchInfo(*I));
if (!Branch.Target->isMBB())
return true;
// Punt on compound branches.
if (Branch.Type != SystemZII::BranchNormal)
return true;
if (Branch.CCMask == SystemZ::CCMASK_ANY) {
// Handle unconditional branches.
if (!AllowModify) {
TBB = Branch.Target->getMBB();
continue;
}
// If the block has any instructions after a JMP, delete them.
while (std::next(I) != MBB.end())
std::next(I)->eraseFromParent();
Cond.clear();
FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
continue;
}
// TBB is used to indicate the unconditinal destination.
TBB = Branch.Target->getMBB();
continue;
}
// Working from the bottom, handle the first conditional branch.
if (Cond.empty()) {
// FIXME: add X86-style branch swap
FBB = TBB;
TBB = Branch.Target->getMBB();
Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
continue;
}
// Handle subsequent conditional branches.
assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
// Only handle the case where all conditional branches branch to the same
// destination.
if (TBB != Branch.Target->getMBB())
return true;
// If the conditions are the same, we can leave them alone.
unsigned OldCCValid = Cond[0].getImm();
unsigned OldCCMask = Cond[1].getImm();
if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
continue;
// FIXME: Try combining conditions like X86 does. Should be easy on Z!
return false;
}
return false;
}
unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
// Most of the code and comments here are boilerplate.
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
if (!I->isBranch())
break;
if (!getBranchInfo(*I).Target->isMBB())
break;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
++Count;
}
return Count;
}
bool SystemZInstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 2 && "Invalid condition");
Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
return false;
}
unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded) const {
// In this function we output 32-bit branches, which should always
// have enough range. They can be shortened and relaxed by later code
// in the pipeline, if desired.
// Shouldn't be a fall through.
assert(TBB && "insertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
"SystemZ branch conditions have one component!");
assert(!BytesAdded && "code size not handled");
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
return 1;
}
// Conditional branch.
unsigned Count = 0;
unsigned CCValid = Cond[0].getImm();
unsigned CCMask = Cond[1].getImm();
BuildMI(&MBB, DL, get(SystemZ::BRC))
.addImm(CCValid).addImm(CCMask).addMBB(TBB);
++Count;
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
++Count;
}
return Count;
}
bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask,
int &Value) const {
assert(MI.isCompare() && "Caller should have checked for a comparison");
if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
MI.getOperand(1).isImm()) {
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
Value = MI.getOperand(1).getImm();
Mask = ~0;
return true;
}
return false;
}
-// If Reg is a virtual register, return its definition, otherwise return null.
-static MachineInstr *getDef(unsigned Reg,
- const MachineRegisterInfo *MRI) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return nullptr;
- return MRI->getUniqueVRegDef(Reg);
-}
-
-// Return true if MI is a shift of type Opcode by Imm bits.
-static bool isShift(MachineInstr *MI, unsigned Opcode, int64_t Imm) {
- return (MI->getOpcode() == Opcode &&
- !MI->getOperand(2).getReg() &&
- MI->getOperand(3).getImm() == Imm);
-}
-
-// If the destination of MI has no uses, delete it as dead.
-static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
- if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
- MI->eraseFromParent();
-}
-
-// Compare compares SrcReg against zero. Check whether SrcReg contains
-// the result of an IPM sequence whose input CC survives until Compare,
-// and whether Compare is therefore redundant. Delete it and return
-// true if so.
-static bool removeIPMBasedCompare(MachineInstr &Compare, unsigned SrcReg,
- const MachineRegisterInfo *MRI,
- const TargetRegisterInfo *TRI) {
- MachineInstr *LGFR = nullptr;
- MachineInstr *RLL = getDef(SrcReg, MRI);
- if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
- LGFR = RLL;
- RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
- }
- if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
- return false;
-
- MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
- if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC))
- return false;
-
- MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
- if (!IPM || IPM->getOpcode() != SystemZ::IPM)
- return false;
-
- // Check that there are no assignments to CC between the IPM and Compare,
- if (IPM->getParent() != Compare.getParent())
- return false;
- MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare.getIterator();
- for (++MBBI; MBBI != MBBE; ++MBBI) {
- MachineInstr &MI = *MBBI;
- if (MI.modifiesRegister(SystemZ::CC, TRI))
- return false;
- }
-
- Compare.eraseFromParent();
- if (LGFR)
- eraseIfDead(LGFR, MRI);
- eraseIfDead(RLL, MRI);
- eraseIfDead(SRL, MRI);
- eraseIfDead(IPM, MRI);
-
- return true;
-}
-
-bool SystemZInstrInfo::optimizeCompareInstr(
- MachineInstr &Compare, unsigned SrcReg, unsigned SrcReg2, int Mask,
- int Value, const MachineRegisterInfo *MRI) const {
- assert(!SrcReg2 && "Only optimizing constant comparisons so far");
- bool IsLogical = (Compare.getDesc().TSFlags & SystemZII::IsLogical) != 0;
- return Value == 0 && !IsLogical &&
- removeIPMBasedCompare(Compare, SrcReg, MRI, &RI);
-}
-
bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Pred,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles,
int &FalseCycles) const {
// Not all subtargets have LOCR instructions.
if (!STI.hasLoadStoreOnCond())
return false;
if (Pred.size() != 2)
return false;
// Check register classes.
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC =
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
if (!RC)
return false;
// We have LOCR instructions for 32 and 64 bit general purpose registers.
if ((STI.hasLoadStoreOnCond2() &&
SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
CondCycles = 2;
TrueCycles = 2;
FalseCycles = 2;
return true;
}
// Can't do anything else.
return false;
}
void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Pred,
unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(Pred.size() == 2 && "Invalid condition");
unsigned CCValid = Pred[0].getImm();
unsigned CCMask = Pred[1].getImm();
unsigned Opc;
if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
if (STI.hasLoadStoreOnCond2())
Opc = SystemZ::LOCRMux;
else {
Opc = SystemZ::LOCR;
MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
unsigned TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
unsigned FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
TrueReg = TReg;
FalseReg = FReg;
}
} else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC))
Opc = SystemZ::LOCGR;
else
llvm_unreachable("Invalid register class");
BuildMI(MBB, I, DL, get(Opc), DstReg)
.addReg(FalseReg).addReg(TrueReg)
.addImm(CCValid).addImm(CCMask);
}
bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
unsigned Reg,
MachineRegisterInfo *MRI) const {
unsigned DefOpc = DefMI.getOpcode();
if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
DefOpc != SystemZ::LGHI)
return false;
if (DefMI.getOperand(0).getReg() != Reg)
return false;
int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
unsigned UseOpc = UseMI.getOpcode();
unsigned NewUseOpc;
unsigned UseIdx;
int CommuteIdx = -1;
switch (UseOpc) {
case SystemZ::LOCRMux:
if (!STI.hasLoadStoreOnCond2())
return false;
NewUseOpc = SystemZ::LOCHIMux;
if (UseMI.getOperand(2).getReg() == Reg)
UseIdx = 2;
else if (UseMI.getOperand(1).getReg() == Reg)
UseIdx = 2, CommuteIdx = 1;
else
return false;
break;
case SystemZ::LOCGR:
if (!STI.hasLoadStoreOnCond2())
return false;
NewUseOpc = SystemZ::LOCGHI;
if (UseMI.getOperand(2).getReg() == Reg)
UseIdx = 2;
else if (UseMI.getOperand(1).getReg() == Reg)
UseIdx = 2, CommuteIdx = 1;
else
return false;
break;
default:
return false;
}
if (CommuteIdx != -1)
if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
return false;
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
UseMI.setDesc(get(NewUseOpc));
UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
if (DeleteDef)
DefMI.eraseFromParent();
return true;
}
bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::Return ||
Opcode == SystemZ::Trap ||
Opcode == SystemZ::CallJG ||
Opcode == SystemZ::CallBR)
return true;
return false;
}
bool SystemZInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &MBB,
unsigned NumCycles, unsigned ExtraPredCycles,
BranchProbability Probability) const {
// Avoid using conditional returns at the end of a loop (since then
// we'd need to emit an unconditional branch to the beginning anyway,
// making the loop body longer). This doesn't apply for low-probability
// loops (eg. compare-and-swap retry), so just decide based on branch
// probability instead of looping structure.
// However, since Compare and Trap instructions cost the same as a regular
// Compare instruction, we should allow the if conversion to convert this
// into a Conditional Compare regardless of the branch probability.
if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
MBB.succ_empty() && Probability < BranchProbability(1, 8))
return false;
// For now only convert single instructions.
return NumCycles == 1;
}
bool SystemZInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &TMBB,
unsigned NumCyclesT, unsigned ExtraPredCyclesT,
MachineBasicBlock &FMBB,
unsigned NumCyclesF, unsigned ExtraPredCyclesF,
BranchProbability Probability) const {
// For now avoid converting mutually-exclusive cases.
return false;
}
bool SystemZInstrInfo::
isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
BranchProbability Probability) const {
// For now only duplicate single instructions.
return NumCycles == 1;
}
bool SystemZInstrInfo::PredicateInstruction(
MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
assert(Pred.size() == 2 && "Invalid condition");
unsigned CCValid = Pred[0].getImm();
unsigned CCMask = Pred[1].getImm();
assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::Trap) {
MI.setDesc(get(SystemZ::CondTrap));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(CCValid).addImm(CCMask)
.addReg(SystemZ::CC, RegState::Implicit);
return true;
}
if (Opcode == SystemZ::Return) {
MI.setDesc(get(SystemZ::CondReturn));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(CCValid).addImm(CCMask)
.addReg(SystemZ::CC, RegState::Implicit);
return true;
}
if (Opcode == SystemZ::CallJG) {
MachineOperand FirstOp = MI.getOperand(0);
const uint32_t *RegMask = MI.getOperand(1).getRegMask();
MI.RemoveOperand(1);
MI.RemoveOperand(0);
MI.setDesc(get(SystemZ::CallBRCL));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(CCValid)
.addImm(CCMask)
.add(FirstOp)
.addRegMask(RegMask)
.addReg(SystemZ::CC, RegState::Implicit);
return true;
}
if (Opcode == SystemZ::CallBR) {
const uint32_t *RegMask = MI.getOperand(0).getRegMask();
MI.RemoveOperand(0);
MI.setDesc(get(SystemZ::CallBCR));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(CCValid).addImm(CCMask)
.addRegMask(RegMask)
.addReg(SystemZ::CC, RegState::Implicit);
return true;
}
return false;
}
void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
// super register in case one of the subregs is undefined.
// This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
.addReg(SrcReg, RegState::Implicit);
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
.addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
return;
}
if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
false);
return;
}
// Move 128-bit floating-point values between VR128 and FP128.
if (SystemZ::VR128BitRegClass.contains(DestReg) &&
SystemZ::FP128BitRegClass.contains(SrcReg)) {
unsigned SrcRegHi =
RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
unsigned SrcRegLo =
RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
.addReg(SrcRegHi, getKillRegState(KillSrc))
.addReg(SrcRegLo, getKillRegState(KillSrc));
return;
}
if (SystemZ::FP128BitRegClass.contains(DestReg) &&
SystemZ::VR128BitRegClass.contains(SrcReg)) {
unsigned DestRegHi =
RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
unsigned DestRegLo =
RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
if (DestRegHi != SrcReg)
copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
.addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
return;
}
// Move CC value from/to a GR32.
if (SrcReg == SystemZ::CC) {
auto MIB = BuildMI(MBB, MBBI, DL, get(SystemZ::IPM), DestReg);
if (KillSrc) {
const MachineFunction *MF = MBB.getParent();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MIB->addRegisterKilled(SrcReg, TRI);
}
return;
}
if (DestReg == SystemZ::CC) {
BuildMI(MBB, MBBI, DL, get(SystemZ::TMLH))
.addReg(SrcReg, getKillRegState(KillSrc))
.addImm(3 << (SystemZ::IPM_CC - 16));
return;
}
// Everything else needs only one instruction.
unsigned Opcode;
if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LGR;
else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
// For z13 we prefer LDR over LER to avoid partial register dependencies.
Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LDR;
else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LXR;
else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::VLR32;
else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::VLR64;
else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::VLR;
else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::CPYA;
else if (SystemZ::AR32BitRegClass.contains(DestReg) &&
SystemZ::GR32BitRegClass.contains(SrcReg))
Opcode = SystemZ::SAR;
else if (SystemZ::GR32BitRegClass.contains(DestReg) &&
SystemZ::AR32BitRegClass.contains(SrcReg))
Opcode = SystemZ::EAR;
else
llvm_unreachable("Impossible reg-to-reg copy");
BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
void SystemZInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
// together for now and lower them after register allocation.
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
.addReg(SrcReg, getKillRegState(isKill)),
FrameIdx);
}
void SystemZInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
int FrameIdx, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
// together for now and lower them after register allocation.
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
FrameIdx);
}
// Return true if MI is a simple load or store with a 12-bit displacement
// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
const MCInstrDesc &MCID = MI->getDesc();
return ((MCID.TSFlags & Flag) &&
isUInt<12>(MI->getOperand(2).getImm()) &&
MI->getOperand(3).getReg() == 0);
}
namespace {
struct LogicOp {
LogicOp() = default;
LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
: RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
explicit operator bool() const { return RegSize; }
unsigned RegSize = 0;
unsigned ImmLSB = 0;
unsigned ImmSize = 0;
};
} // end anonymous namespace
static LogicOp interpretAndImmediate(unsigned Opcode) {
switch (Opcode) {
case SystemZ::NILMux: return LogicOp(32, 0, 16);
case SystemZ::NIHMux: return LogicOp(32, 16, 16);
case SystemZ::NILL64: return LogicOp(64, 0, 16);
case SystemZ::NILH64: return LogicOp(64, 16, 16);
case SystemZ::NIHL64: return LogicOp(64, 32, 16);
case SystemZ::NIHH64: return LogicOp(64, 48, 16);
case SystemZ::NIFMux: return LogicOp(32, 0, 32);
case SystemZ::NILF64: return LogicOp(64, 0, 32);
case SystemZ::NIHF64: return LogicOp(64, 32, 32);
default: return LogicOp();
}
}
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
if (OldMI->registerDefIsDead(SystemZ::CC)) {
MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
if (CCDef != nullptr)
CCDef->setIsDead(true);
}
}
// Used to return from convertToThreeAddress after replacing two-address
// instruction OldMI with three-address instruction NewMI.
static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
MachineInstr *NewMI,
LiveVariables *LV) {
if (LV) {
unsigned NumOps = OldMI->getNumOperands();
for (unsigned I = 1; I < NumOps; ++I) {
MachineOperand &Op = OldMI->getOperand(I);
if (Op.isReg() && Op.isKill())
LV->replaceKillInstruction(Op.getReg(), *OldMI, *NewMI);
}
}
transferDeadCC(OldMI, NewMI);
return NewMI;
}
MachineInstr *SystemZInstrInfo::convertToThreeAddress(
MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Opcode = MI.getOpcode();
unsigned NumOps = MI.getNumOperands();
// Try to convert something like SLL into SLLK, if supported.
// We prefer to keep the two-operand form where possible both
// because it tends to be shorter and because some instructions
// have memory forms that can be used during spilling.
if (STI.hasDistinctOps()) {
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
unsigned DestReg = Dest.getReg();
unsigned SrcReg = Src.getReg();
// AHIMux is only really a three-operand instruction when both operands
// are low registers. Try to constrain both operands to be low if
// possible.
if (Opcode == SystemZ::AHIMux &&
TargetRegisterInfo::isVirtualRegister(DestReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg) &&
MRI.getRegClass(DestReg)->contains(SystemZ::R1L) &&
MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) {
MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass);
MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass);
}
int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
if (ThreeOperandOpcode >= 0) {
// Create three address instruction without adding the implicit
// operands. Those will instead be copied over from the original
// instruction by the loop below.
MachineInstrBuilder MIB(
*MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(),
/*NoImplicit=*/true));
MIB.add(Dest);
// Keep the kill state, but drop the tied flag.
MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
// Keep the remaining operands as-is.
for (unsigned I = 2; I < NumOps; ++I)
MIB.add(MI.getOperand(I));
MBB->insert(MI, MIB);
return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
// Try to convert an AND into an RISBG-type instruction.
if (LogicOp And = interpretAndImmediate(Opcode)) {
uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
// AND IMMEDIATE leaves the other bits of the register unchanged.
Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
unsigned Start, End;
if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
unsigned NewOpcode;
if (And.RegSize == 64) {
NewOpcode = SystemZ::RISBG;
// Prefer RISBGN if available, since it does not clobber CC.
if (STI.hasMiscellaneousExtensions())
NewOpcode = SystemZ::RISBGN;
} else {
NewOpcode = SystemZ::RISBMux;
Start &= 31;
End &= 31;
}
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
MachineInstrBuilder MIB =
BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
.add(Dest)
.addReg(0)
.addReg(Src.getReg(), getKillRegState(Src.isKill()),
Src.getSubReg())
.addImm(Start)
.addImm(End + 128)
.addImm(0);
return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
return nullptr;
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS) const {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Size = MFI.getObjectSize(FrameIndex);
unsigned Opcode = MI.getOpcode();
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
// Check CC liveness, since new instruction introduces a dead
// def of CC.
MCRegUnitIterator CCUnit(SystemZ::CC, TRI);
LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit);
++CCUnit;
assert(!CCUnit.isValid() && "CC only has one reg unit.");
SlotIndex MISlot =
LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
if (!CCLiveRange.liveAt(MISlot)) {
// LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
MI.getDebugLoc(), get(SystemZ::AGSI))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI.getOperand(2).getImm());
BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator());
return BuiltMI;
}
}
return nullptr;
}
// All other cases require a single operand.
if (Ops.size() != 1)
return nullptr;
unsigned OpNum = Ops[0];
assert(Size * 8 ==
TRI->getRegSizeInBits(*MF.getRegInfo()
.getRegClass(MI.getOperand(OpNum).getReg())) &&
"Invalid size combination");
if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
isInt<8>(MI.getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
MachineInstr *BuiltMI =
BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI.getOperand(2).getImm());
transferDeadCC(&MI, BuiltMI);
return BuiltMI;
}
if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
(Opcode == SystemZ::ALGFI && OpNum == 0 &&
isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
// AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
MachineInstr *BuiltMI =
BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm((int8_t)MI.getOperand(2).getImm());
transferDeadCC(&MI, BuiltMI);
return BuiltMI;
}
if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
(Opcode == SystemZ::SLGFI && OpNum == 0 &&
isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
// SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
MachineInstr *BuiltMI =
BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm((int8_t)-MI.getOperand(2).getImm());
transferDeadCC(&MI, BuiltMI);
return BuiltMI;
}
if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
bool Op0IsGPR = (Opcode == SystemZ::LGDR);
bool Op1IsGPR = (Opcode == SystemZ::LDGR);
// If we're spilling the destination of an LDGR or LGDR, store the
// source register instead.
if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(StoreOpcode))
.add(MI.getOperand(1))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
}
// If we're spilling the source of an LDGR or LGDR, load the
// destination register instead.
if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(LoadOpcode))
.add(MI.getOperand(0))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
}
}
// Look for cases where the source of a simple store or the destination
// of a simple load is being spilled. Try to use MVC instead.
//
// Although MVC is in practice a fast choice in these cases, it is still
// logically a bytewise copy. This means that we cannot use it if the
// load or store is volatile. We also wouldn't be able to use MVC if
// the two memories partially overlap, but that case cannot occur here,
// because we know that one of the memories is a full frame index.
//
// For performance reasons, we also want to avoid using MVC if the addresses
// might be equal. We don't worry about that case here, because spill slot
// coloring happens later, and because we have special code to remove
// MVCs that turn out to be redundant.
if (OpNum == 0 && MI.hasOneMemOperand()) {
MachineMemOperand *MMO = *MI.memoperands_begin();
if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads.
if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(Size)
.add(MI.getOperand(1))
.addImm(MI.getOperand(2).getImm())
.addMemOperand(MMO);
}
// Handle conversion of stores.
if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
.add(MI.getOperand(1))
.addImm(MI.getOperand(2).getImm())
.addImm(Size)
.addFrameIndex(FrameIndex)
.addImm(0)
.addMemOperand(MMO);
}
}
}
// If the spilled operand is the final one, try to change <INSN>R
// into <INSN>.
int MemOpcode = SystemZ::getMemOpcode(Opcode);
if (MemOpcode >= 0) {
unsigned NumOps = MI.getNumExplicitOperands();
if (OpNum == NumOps - 1) {
const MCInstrDesc &MemDesc = get(MemOpcode);
uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
assert(AccessBytes != 0 && "Size of access should be known");
assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes;
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI.getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I)
MIB.add(MI.getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset);
if (MemDesc.TSFlags & SystemZII::HasIndex)
MIB.addReg(0);
transferDeadCC(&MI, MIB);
return MIB;
}
}
return nullptr;
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS) const {
return nullptr;
}
bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case SystemZ::L128:
splitMove(MI, SystemZ::LG);
return true;
case SystemZ::ST128:
splitMove(MI, SystemZ::STG);
return true;
case SystemZ::LX:
splitMove(MI, SystemZ::LD);
return true;
case SystemZ::STX:
splitMove(MI, SystemZ::STD);
return true;
case SystemZ::LBMux:
expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
return true;
case SystemZ::LHMux:
expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
return true;
case SystemZ::LLCRMux:
expandZExtPseudo(MI, SystemZ::LLCR, 8);
return true;
case SystemZ::LLHRMux:
expandZExtPseudo(MI, SystemZ::LLHR, 16);
return true;
case SystemZ::LLCMux:
expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
return true;
case SystemZ::LLHMux:
expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
return true;
case SystemZ::LMux:
expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
return true;
case SystemZ::LOCMux:
expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
return true;
case SystemZ::LOCHIMux:
expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
return true;
case SystemZ::LOCRMux:
expandLOCRPseudo(MI, SystemZ::LOCR, SystemZ::LOCFHR);
return true;
case SystemZ::STCMux:
expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
return true;
case SystemZ::STHMux:
expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
return true;
case SystemZ::STMux:
expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
return true;
case SystemZ::STOCMux:
expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
return true;
case SystemZ::LHIMux:
expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
return true;
case SystemZ::IIFMux:
expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
return true;
case SystemZ::IILMux:
expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
return true;
case SystemZ::IIHMux:
expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
return true;
case SystemZ::NIFMux:
expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
return true;
case SystemZ::NILMux:
expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
return true;
case SystemZ::NIHMux:
expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
return true;
case SystemZ::OIFMux:
expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
return true;
case SystemZ::OILMux:
expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
return true;
case SystemZ::OIHMux:
expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
return true;
case SystemZ::XIFMux:
expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
return true;
case SystemZ::TMLMux:
expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
return true;
case SystemZ::TMHMux:
expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
return true;
case SystemZ::AHIMux:
expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
return true;
case SystemZ::AHIMuxK:
expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
return true;
case SystemZ::AFIMux:
expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
return true;
case SystemZ::CHIMux:
expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
return true;
case SystemZ::CFIMux:
expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
return true;
case SystemZ::CLFIMux:
expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
return true;
case SystemZ::CMux:
expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
return true;
case SystemZ::CLMux:
expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
return true;
case SystemZ::RISBMux: {
bool DestIsHigh = isHighReg(MI.getOperand(0).getReg());
bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg());
if (SrcIsHigh == DestIsHigh)
MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
else {
MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
}
return true;
}
case SystemZ::ADJDYNALLOC:
splitAdjDynAlloc(MI);
return true;
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(&MI);
return true;
default:
return false;
}
}
unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
if (MI.getOpcode() == TargetOpcode::INLINEASM) {
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
return MI.getDesc().getSize();
}
SystemZII::Branch
SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case SystemZ::BR:
case SystemZ::BI:
case SystemZ::J:
case SystemZ::JG:
return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
SystemZ::CCMASK_ANY, &MI.getOperand(0));
case SystemZ::BRC:
case SystemZ::BRCL:
return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
MI.getOperand(1).getImm(), &MI.getOperand(2));
case SystemZ::BRCT:
case SystemZ::BRCTH:
return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::BRCTG:
return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::CIJ:
case SystemZ::CRJ:
return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLIJ:
case SystemZ::CLRJ:
return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CGIJ:
case SystemZ::CGRJ:
return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLGIJ:
case SystemZ::CLGRJ:
return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
MI.getOperand(2).getImm(), &MI.getOperand(3));
default:
llvm_unreachable("Unrecognized branch opcode");
}
}
void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
unsigned &LoadOpcode,
unsigned &StoreOpcode) const {
if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
LoadOpcode = SystemZ::L;
StoreOpcode = SystemZ::ST;
} else if (RC == &SystemZ::GRH32BitRegClass) {
LoadOpcode = SystemZ::LFH;
StoreOpcode = SystemZ::STFH;
} else if (RC == &SystemZ::GRX32BitRegClass) {
LoadOpcode = SystemZ::LMux;
StoreOpcode = SystemZ::STMux;
} else if (RC == &SystemZ::GR64BitRegClass ||
RC == &SystemZ::ADDR64BitRegClass) {
LoadOpcode = SystemZ::LG;
StoreOpcode = SystemZ::STG;
} else if (RC == &SystemZ::GR128BitRegClass ||
RC == &SystemZ::ADDR128BitRegClass) {
LoadOpcode = SystemZ::L128;
StoreOpcode = SystemZ::ST128;
} else if (RC == &SystemZ::FP32BitRegClass) {
LoadOpcode = SystemZ::LE;
StoreOpcode = SystemZ::STE;
} else if (RC == &SystemZ::FP64BitRegClass) {
LoadOpcode = SystemZ::LD;
StoreOpcode = SystemZ::STD;
} else if (RC == &SystemZ::FP128BitRegClass) {
LoadOpcode = SystemZ::LX;
StoreOpcode = SystemZ::STX;
} else if (RC == &SystemZ::VR32BitRegClass) {
LoadOpcode = SystemZ::VL32;
StoreOpcode = SystemZ::VST32;
} else if (RC == &SystemZ::VR64BitRegClass) {
LoadOpcode = SystemZ::VL64;
StoreOpcode = SystemZ::VST64;
} else if (RC == &SystemZ::VF128BitRegClass ||
RC == &SystemZ::VR128BitRegClass) {
LoadOpcode = SystemZ::VL;
StoreOpcode = SystemZ::VST;
} else
llvm_unreachable("Unsupported regclass to load or store");
}
unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
int64_t Offset) const {
const MCInstrDesc &MCID = get(Opcode);
int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
// Get the instruction to use for unsigned 12-bit displacements.
int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
if (Disp12Opcode >= 0)
return Disp12Opcode;
// All address-related instructions can use unsigned 12-bit
// displacements.
return Opcode;
}
if (isInt<20>(Offset) && isInt<20>(Offset2)) {
// Get the instruction to use for signed 20-bit displacements.
int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
if (Disp20Opcode >= 0)
return Disp20Opcode;
// Check whether Opcode allows signed 20-bit displacements.
if (MCID.TSFlags & SystemZII::Has20BitOffset)
return Opcode;
}
return 0;
}
unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
switch (Opcode) {
case SystemZ::L: return SystemZ::LT;
case SystemZ::LY: return SystemZ::LT;
case SystemZ::LG: return SystemZ::LTG;
case SystemZ::LGF: return SystemZ::LTGF;
case SystemZ::LR: return SystemZ::LTR;
case SystemZ::LGFR: return SystemZ::LTGFR;
case SystemZ::LGR: return SystemZ::LTGR;
case SystemZ::LER: return SystemZ::LTEBR;
case SystemZ::LDR: return SystemZ::LTDBR;
case SystemZ::LXR: return SystemZ::LTXBR;
case SystemZ::LCDFR: return SystemZ::LCDBR;
case SystemZ::LPDFR: return SystemZ::LPDBR;
case SystemZ::LNDFR: return SystemZ::LNDBR;
case SystemZ::LCDFR_32: return SystemZ::LCEBR;
case SystemZ::LPDFR_32: return SystemZ::LPEBR;
case SystemZ::LNDFR_32: return SystemZ::LNEBR;
// On zEC12 we prefer to use RISBGN. But if there is a chance to
// actually use the condition code, we may turn it back into RISGB.
// Note that RISBG is not really a "load-and-test" instruction,
// but sets the same condition code values, so is OK to use here.
case SystemZ::RISBGN: return SystemZ::RISBG;
default: return 0;
}
}
// Return true if Mask matches the regexp 0*1+0*, given that zero masks
// have already been filtered out. Store the first set bit in LSB and
// the number of set bits in Length if so.
static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
unsigned First = findFirstSet(Mask);
uint64_t Top = (Mask >> First) + 1;
if ((Top & -Top) == Top) {
LSB = First;
Length = findFirstSet(Top);
return true;
}
return false;
}
bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
unsigned &Start, unsigned &End) const {
// Reject trivial all-zero masks.
Mask &= allOnes(BitSize);
if (Mask == 0)
return false;
// Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
// the msb and End specifies the index of the lsb.
unsigned LSB, Length;
if (isStringOfOnes(Mask, LSB, Length)) {
Start = 63 - (LSB + Length - 1);
End = 63 - LSB;
return true;
}
// Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
// of the low 1s and End specifies the lsb of the high 1s.
if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
assert(LSB > 0 && "Bottom bit must be set");
assert(LSB + Length < BitSize && "Top bit must be set");
Start = 63 - (LSB - 1);
End = 63 - (LSB + Length);
return true;
}
return false;
}
unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
SystemZII::FusedCompareType Type,
const MachineInstr *MI) const {
switch (Opcode) {
case SystemZ::CHI:
case SystemZ::CGHI:
if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
return 0;
break;
case SystemZ::CLFI:
case SystemZ::CLGFI:
if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
return 0;
break;
case SystemZ::CL:
case SystemZ::CLG:
if (!STI.hasMiscellaneousExtensions())
return 0;
if (!(MI && MI->getOperand(3).getReg() == 0))
return 0;
break;
}
switch (Type) {
case SystemZII::CompareAndBranch:
switch (Opcode) {
case SystemZ::CR:
return SystemZ::CRJ;
case SystemZ::CGR:
return SystemZ::CGRJ;
case SystemZ::CHI:
return SystemZ::CIJ;
case SystemZ::CGHI:
return SystemZ::CGIJ;
case SystemZ::CLR:
return SystemZ::CLRJ;
case SystemZ::CLGR:
return SystemZ::CLGRJ;
case SystemZ::CLFI:
return SystemZ::CLIJ;
case SystemZ::CLGFI:
return SystemZ::CLGIJ;
default:
return 0;
}
case SystemZII::CompareAndReturn:
switch (Opcode) {
case SystemZ::CR:
return SystemZ::CRBReturn;
case SystemZ::CGR:
return SystemZ::CGRBReturn;
case SystemZ::CHI:
return SystemZ::CIBReturn;
case SystemZ::CGHI:
return SystemZ::CGIBReturn;
case SystemZ::CLR:
return SystemZ::CLRBReturn;
case SystemZ::CLGR:
return SystemZ::CLGRBReturn;
case SystemZ::CLFI:
return SystemZ::CLIBReturn;
case SystemZ::CLGFI:
return SystemZ::CLGIBReturn;
default:
return 0;
}
case SystemZII::CompareAndSibcall:
switch (Opcode) {
case SystemZ::CR:
return SystemZ::CRBCall;
case SystemZ::CGR:
return SystemZ::CGRBCall;
case SystemZ::CHI:
return SystemZ::CIBCall;
case SystemZ::CGHI:
return SystemZ::CGIBCall;
case SystemZ::CLR:
return SystemZ::CLRBCall;
case SystemZ::CLGR:
return SystemZ::CLGRBCall;
case SystemZ::CLFI:
return SystemZ::CLIBCall;
case SystemZ::CLGFI:
return SystemZ::CLGIBCall;
default:
return 0;
}
case SystemZII::CompareAndTrap:
switch (Opcode) {
case SystemZ::CR:
return SystemZ::CRT;
case SystemZ::CGR:
return SystemZ::CGRT;
case SystemZ::CHI:
return SystemZ::CIT;
case SystemZ::CGHI:
return SystemZ::CGIT;
case SystemZ::CLR:
return SystemZ::CLRT;
case SystemZ::CLGR:
return SystemZ::CLGRT;
case SystemZ::CLFI:
return SystemZ::CLFIT;
case SystemZ::CLGFI:
return SystemZ::CLGIT;
case SystemZ::CL:
return SystemZ::CLT;
case SystemZ::CLG:
return SystemZ::CLGT;
default:
return 0;
}
}
return 0;
}
unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
if (!STI.hasLoadAndTrap())
return 0;
switch (Opcode) {
case SystemZ::L:
case SystemZ::LY:
return SystemZ::LAT;
case SystemZ::LG:
return SystemZ::LGAT;
case SystemZ::LFH:
return SystemZ::LFHAT;
case SystemZ::LLGF:
return SystemZ::LLGFAT;
case SystemZ::LLGT:
return SystemZ::LLGTAT;
}
return 0;
}
void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Reg, uint64_t Value) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned Opcode;
if (isInt<16>(Value))
Opcode = SystemZ::LGHI;
else if (SystemZ::isImmLL(Value))
Opcode = SystemZ::LLILL;
else if (SystemZ::isImmLH(Value)) {
Opcode = SystemZ::LLILH;
Value >>= 16;
} else {
assert(isInt<32>(Value) && "Huge values not handled yet");
Opcode = SystemZ::LGFI;
}
BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
}
bool SystemZInstrInfo::
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA) const {
if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
return false;
// If mem-operands show that the same address Value is used by both
// instructions, check for non-overlapping offsets and widths. Not
// sure if a register based analysis would be an improvement...
MachineMemOperand *MMOa = *MIa.memoperands_begin();
MachineMemOperand *MMOb = *MIb.memoperands_begin();
const Value *VALa = MMOa->getValue();
const Value *VALb = MMOb->getValue();
bool SameVal = (VALa && VALb && (VALa == VALb));
if (!SameVal) {
const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
if (PSVa && PSVb && (PSVa == PSVb))
SameVal = true;
}
if (SameVal) {
int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
int WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowOffset + LowWidth <= HighOffset)
return true;
}
return false;
}
Index: vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.h
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.h (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZInstrInfo.h (revision 344166)
@@ -1,326 +1,323 @@
//===-- SystemZInstrInfo.h - SystemZ instruction information ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the SystemZ implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZINSTRINFO_H
#define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZINSTRINFO_H
#include "SystemZ.h"
#include "SystemZRegisterInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include <cstdint>
#define GET_INSTRINFO_HEADER
#include "SystemZGenInstrInfo.inc"
namespace llvm {
class SystemZSubtarget;
namespace SystemZII {
enum {
// See comments in SystemZInstrFormats.td.
SimpleBDXLoad = (1 << 0),
SimpleBDXStore = (1 << 1),
Has20BitOffset = (1 << 2),
HasIndex = (1 << 3),
Is128Bit = (1 << 4),
AccessSizeMask = (31 << 5),
AccessSizeShift = 5,
CCValuesMask = (15 << 10),
CCValuesShift = 10,
CompareZeroCCMaskMask = (15 << 14),
CompareZeroCCMaskShift = 14,
CCMaskFirst = (1 << 18),
CCMaskLast = (1 << 19),
IsLogical = (1 << 20)
};
static inline unsigned getAccessSize(unsigned int Flags) {
return (Flags & AccessSizeMask) >> AccessSizeShift;
}
static inline unsigned getCCValues(unsigned int Flags) {
return (Flags & CCValuesMask) >> CCValuesShift;
}
static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
}
// SystemZ MachineOperand target flags.
enum {
// Masks out the bits for the access model.
MO_SYMBOL_MODIFIER = (3 << 0),
// @GOT (aka @GOTENT)
MO_GOT = (1 << 0),
// @INDNTPOFF
MO_INDNTPOFF = (2 << 0)
};
// Classifies a branch.
enum BranchType {
// An instruction that branches on the current value of CC.
BranchNormal,
// An instruction that peforms a 32-bit signed comparison and branches
// on the result.
BranchC,
// An instruction that peforms a 32-bit unsigned comparison and branches
// on the result.
BranchCL,
// An instruction that peforms a 64-bit signed comparison and branches
// on the result.
BranchCG,
// An instruction that peforms a 64-bit unsigned comparison and branches
// on the result.
BranchCLG,
// An instruction that decrements a 32-bit register and branches if
// the result is nonzero.
BranchCT,
// An instruction that decrements a 64-bit register and branches if
// the result is nonzero.
BranchCTG
};
// Information about a branch instruction.
struct Branch {
// The type of the branch.
BranchType Type;
// CCMASK_<N> is set if CC might be equal to N.
unsigned CCValid;
// CCMASK_<N> is set if the branch should be taken when CC == N.
unsigned CCMask;
// The target of the branch.
const MachineOperand *Target;
Branch(BranchType type, unsigned ccValid, unsigned ccMask,
const MachineOperand *target)
: Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
};
// Kinds of fused compares in compare-and-* instructions. Together with type
// of the converted compare, this identifies the compare-and-*
// instruction.
enum FusedCompareType {
// Relative branch - CRJ etc.
CompareAndBranch,
// Indirect branch, used for return - CRBReturn etc.
CompareAndReturn,
// Indirect branch, used for sibcall - CRBCall etc.
CompareAndSibcall,
// Trap
CompareAndTrap
};
} // end namespace SystemZII
class SystemZInstrInfo : public SystemZGenInstrInfo {
const SystemZRegisterInfo RI;
SystemZSubtarget &STI;
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
void expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode,
bool ConvertHigh) const;
void expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK, unsigned HighOpcode) const;
void expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const;
void expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const;
void expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const;
void expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const;
void expandLoadStackGuard(MachineInstr *MI) const;
MachineInstrBuilder
emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
unsigned LowLowOpcode, unsigned Size, bool KillSrc,
bool UndefSrc) const;
virtual void anchor();
protected:
/// Commutes the operands in the given instruction by changing the operands
/// order and/or changing the instruction's opcode and/or the immediate value
/// operand.
///
/// The arguments 'CommuteOpIdx1' and 'CommuteOpIdx2' specify the operands
/// to be commuted.
///
/// Do not call this method for a non-commutable instruction or
/// non-commutable operands.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned CommuteOpIdx1,
unsigned CommuteOpIdx2) const override;
public:
explicit SystemZInstrInfo(SystemZSubtarget &STI);
// Override TargetInstrInfo.
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved = nullptr) const override;
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded = nullptr) const override;
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
- bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
- unsigned SrcReg2, int Mask, int Value,
- const MachineRegisterInfo *MRI) const override;
bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int&, int&, int&) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const override;
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
MachineRegisterInfo *MRI) const override;
bool isPredicable(const MachineInstr &MI) const override;
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
BranchProbability Probability) const override;
bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
unsigned NumCyclesT, unsigned ExtraPredCyclesT,
MachineBasicBlock &FMBB,
unsigned NumCyclesF, unsigned ExtraPredCyclesF,
BranchProbability Probability) const override;
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
BranchProbability Probability) const override;
bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr &MI,
LiveVariables *LV) const override;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS = nullptr) const override;
bool expandPostRAPseudo(MachineInstr &MBBI) const override;
bool reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
override;
// Return the SystemZRegisterInfo, which this class owns.
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
// Return the size in bytes of MI.
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
// Return true if MI is a conditional or unconditional branch.
// When returning true, set Cond to the mask of condition-code
// values on which the instruction will branch, and set Target
// to the operand that contains the branch target. This target
// can be a register or a basic block.
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const;
// Get the load and store opcodes for a given register class.
void getLoadStoreOpcodes(const TargetRegisterClass *RC,
unsigned &LoadOpcode, unsigned &StoreOpcode) const;
// Opcode is the opcode of an instruction that has an address operand,
// and the caller wants to perform that instruction's operation on an
// address that has displacement Offset. Return the opcode of a suitable
// instruction (which might be Opcode itself) or 0 if no such instruction
// exists.
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const;
// If Opcode is a load instruction that has a LOAD AND TEST form,
// return the opcode for the testing form, otherwise return 0.
unsigned getLoadAndTest(unsigned Opcode) const;
// Return true if ROTATE AND ... SELECTED BITS can be used to select bits
// Mask of the R2 operand, given that only the low BitSize bits of Mask are
// significant. Set Start and End to the I3 and I4 operands if so.
bool isRxSBGMask(uint64_t Mask, unsigned BitSize,
unsigned &Start, unsigned &End) const;
// If Opcode is a COMPARE opcode for which an associated fused COMPARE AND *
// operation exists, return the opcode for the latter, otherwise return 0.
// MI, if nonnull, is the compare instruction.
unsigned getFusedCompare(unsigned Opcode,
SystemZII::FusedCompareType Type,
const MachineInstr *MI = nullptr) const;
// If Opcode is a LOAD opcode for with an associated LOAD AND TRAP
// operation exists, returh the opcode for the latter, otherwise return 0.
unsigned getLoadAndTrap(unsigned Opcode) const;
// Emit code before MBBI in MI to move immediate value Value into
// physical register Reg.
void loadImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Reg, uint64_t Value) const;
// Sometimes, it is possible for the target to tell, even without
// aliasing information, that two MIs access different memory
// addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
bool
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
};
} // end namespace llvm
#endif // LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZINSTRINFO_H
Index: vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp (revision 344166)
@@ -1,274 +1,276 @@
//===-- SystemZSelectionDAGInfo.cpp - SystemZ SelectionDAG Info -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SystemZSelectionDAGInfo class.
//
//===----------------------------------------------------------------------===//
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/SelectionDAG.h"
using namespace llvm;
#define DEBUG_TYPE "systemz-selectiondag-info"
// Decide whether it is best to use a loop or straight-line code for
// a block operation of Size bytes with source address Src and destination
// address Dest. Sequence is the opcode to use for straight-line code
// (such as MVC) and Loop is the opcode to use for loops (such as MVC_LOOP).
// Return the chain for the completed operation.
static SDValue emitMemMem(SelectionDAG &DAG, const SDLoc &DL, unsigned Sequence,
unsigned Loop, SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size) {
EVT PtrVT = Src.getValueType();
// The heuristic we use is to prefer loops for anything that would
// require 7 or more MVCs. With these kinds of sizes there isn't
// much to choose between straight-line code and looping code,
// since the time will be dominated by the MVCs themselves.
// However, the loop has 4 or 5 instructions (depending on whether
// the base addresses can be proved equal), so there doesn't seem
// much point using a loop for 5 * 256 bytes or fewer. Anything in
// the range (5 * 256, 6 * 256) will need another instruction after
// the loop, so it doesn't seem worth using a loop then either.
// The next value up, 6 * 256, can be implemented in the same
// number of straight-line MVCs as 6 * 256 - 1.
if (Size > 6 * 256)
return DAG.getNode(Loop, DL, MVT::Other, Chain, Dst, Src,
DAG.getConstant(Size, DL, PtrVT),
DAG.getConstant(Size / 256, DL, PtrVT));
return DAG.getNode(Sequence, DL, MVT::Other, Chain, Dst, Src,
DAG.getConstant(Size, DL, PtrVT));
}
SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemcpy(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool IsVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
if (IsVolatile)
return SDValue();
if (auto *CSize = dyn_cast<ConstantSDNode>(Size))
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
Chain, Dst, Src, CSize->getZExtValue());
return SDValue();
}
// Handle a memset of 1, 2, 4 or 8 bytes with the operands given by
// Chain, Dst, ByteVal and Size. These cases are expected to use
// MVI, MVHHI, MVHI and MVGHI respectively.
static SDValue memsetStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Dst, uint64_t ByteVal, uint64_t Size,
unsigned Align, MachinePointerInfo DstPtrInfo) {
uint64_t StoreVal = ByteVal;
for (unsigned I = 1; I < Size; ++I)
StoreVal |= ByteVal << (I * 8);
return DAG.getStore(
Chain, DL, DAG.getConstant(StoreVal, DL, MVT::getIntegerVT(Size * 8)),
Dst, DstPtrInfo, Align);
}
SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemset(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst,
SDValue Byte, SDValue Size, unsigned Align, bool IsVolatile,
MachinePointerInfo DstPtrInfo) const {
EVT PtrVT = Dst.getValueType();
if (IsVolatile)
return SDValue();
if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
if (Bytes == 0)
return SDValue();
if (auto *CByte = dyn_cast<ConstantSDNode>(Byte)) {
// Handle cases that can be done using at most two of
// MVI, MVHI, MVHHI and MVGHI. The latter two can only be
// used if ByteVal is all zeros or all ones; in other casees,
// we can move at most 2 halfwords.
uint64_t ByteVal = CByte->getZExtValue();
if (ByteVal == 0 || ByteVal == 255 ?
Bytes <= 16 && countPopulation(Bytes) <= 2 :
Bytes <= 4) {
unsigned Size1 = Bytes == 16 ? 8 : 1 << findLastSet(Bytes);
unsigned Size2 = Bytes - Size1;
SDValue Chain1 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size1,
Align, DstPtrInfo);
if (Size2 == 0)
return Chain1;
Dst = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
DAG.getConstant(Size1, DL, PtrVT));
DstPtrInfo = DstPtrInfo.getWithOffset(Size1);
SDValue Chain2 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size2,
std::min(Align, Size1), DstPtrInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
}
} else {
// Handle one and two bytes using STC.
if (Bytes <= 2) {
SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, Align);
if (Bytes == 1)
return Chain1;
SDValue Dst2 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
DAG.getConstant(1, DL, PtrVT));
SDValue Chain2 =
DAG.getStore(Chain, DL, Byte, Dst2, DstPtrInfo.getWithOffset(1),
/* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
}
}
assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already");
// Handle the special case of a memset of 0, which can use XC.
auto *CByte = dyn_cast<ConstantSDNode>(Byte);
if (CByte && CByte->getZExtValue() == 0)
return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP,
Chain, Dst, Dst, Bytes);
// Copy the byte to the first location and then use MVC to copy
// it to the rest.
Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, Align);
SDValue DstPlus1 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
DAG.getConstant(1, DL, PtrVT));
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
Chain, DstPlus1, Dst, Bytes - 1);
}
return SDValue();
}
// Use CLC to compare [Src1, Src1 + Size) with [Src2, Src2 + Size),
// deciding whether to use a loop or straight-line code.
static SDValue emitCLC(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src1, SDValue Src2, uint64_t Size) {
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
EVT PtrVT = Src1.getValueType();
// A two-CLC sequence is a clear win over a loop, not least because it
// needs only one branch. A three-CLC sequence needs the same number
// of branches as a loop (i.e. 2), but is shorter. That brings us to
// lengths greater than 768 bytes. It seems relatively likely that
// a difference will be found within the first 768 bytes, so we just
// optimize for the smallest number of branch instructions, in order
// to avoid polluting the prediction buffer too much. A loop only ever
// needs 2 branches, whereas a straight-line sequence would need 3 or more.
if (Size > 3 * 256)
return DAG.getNode(SystemZISD::CLC_LOOP, DL, VTs, Chain, Src1, Src2,
DAG.getConstant(Size, DL, PtrVT),
DAG.getConstant(Size / 256, DL, PtrVT));
return DAG.getNode(SystemZISD::CLC, DL, VTs, Chain, Src1, Src2,
DAG.getConstant(Size, DL, PtrVT));
}
// Convert the current CC value into an integer that is 0 if CC == 0,
-// less than zero if CC == 1 and greater than zero if CC >= 2.
+// greater than zero if CC == 1 and less than zero if CC >= 2.
// The sequence starts with IPM, which puts CC into bits 29 and 28
// of an integer and clears bits 30 and 31.
static SDValue addIPMSequence(const SDLoc &DL, SDValue CCReg,
SelectionDAG &DAG) {
SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
- SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
- DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
- SDValue ROTL = DAG.getNode(ISD::ROTL, DL, MVT::i32, SRL,
- DAG.getConstant(31, DL, MVT::i32));
- return ROTL;
+ SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, IPM,
+ DAG.getConstant(30 - SystemZ::IPM_CC, DL, MVT::i32));
+ SDValue SRA = DAG.getNode(ISD::SRA, DL, MVT::i32, SHL,
+ DAG.getConstant(30, DL, MVT::i32));
+ return SRA;
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemcmp(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1,
SDValue Src2, SDValue Size, MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
assert(Bytes > 0 && "Caller should have handled 0-size case");
- SDValue CCReg = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);
+ // Swap operands to invert CC == 1 vs. CC == 2 cases.
+ SDValue CCReg = emitCLC(DAG, DL, Chain, Src2, Src1, Bytes);
Chain = CCReg.getValue(1);
return std::make_pair(addIPMSequence(DL, CCReg, DAG), Chain);
}
return std::make_pair(SDValue(), SDValue());
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemchr(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const {
// Use SRST to find the character. End is its address on success.
EVT PtrVT = Src.getValueType();
SDVTList VTs = DAG.getVTList(PtrVT, MVT::i32, MVT::Other);
Length = DAG.getZExtOrTrunc(Length, DL, PtrVT);
Char = DAG.getZExtOrTrunc(Char, DL, MVT::i32);
Char = DAG.getNode(ISD::AND, DL, MVT::i32, Char,
DAG.getConstant(255, DL, MVT::i32));
SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, Length);
SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
Limit, Src, Char);
SDValue CCReg = End.getValue(1);
Chain = End.getValue(2);
// Now select between End and null, depending on whether the character
// was found.
SDValue Ops[] = {End, DAG.getConstant(0, DL, PtrVT),
DAG.getConstant(SystemZ::CCMASK_SRST, DL, MVT::i32),
DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, DL, MVT::i32),
CCReg};
End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, PtrVT, Ops);
return std::make_pair(End, Chain);
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrcpy(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest,
SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo,
bool isStpcpy) const {
SDVTList VTs = DAG.getVTList(Dest.getValueType(), MVT::Other);
SDValue EndDest = DAG.getNode(SystemZISD::STPCPY, DL, VTs, Chain, Dest, Src,
DAG.getConstant(0, DL, MVT::i32));
return std::make_pair(isStpcpy ? EndDest : Dest, EndDest.getValue(1));
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrcmp(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1,
SDValue Src2, MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::i32, MVT::Other);
- SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src1, Src2,
+ // Swap operands to invert CC == 1 vs. CC == 2 cases.
+ SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src2, Src1,
DAG.getConstant(0, DL, MVT::i32));
SDValue CCReg = Unused.getValue(1);
Chain = Unused.getValue(2);
return std::make_pair(addIPMSequence(DL, CCReg, DAG), Chain);
}
// Search from Src for a null character, stopping once Src reaches Limit.
// Return a pair of values, the first being the number of nonnull characters
// and the second being the out chain.
//
// This can be used for strlen by setting Limit to 0.
static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG,
const SDLoc &DL,
SDValue Chain, SDValue Src,
SDValue Limit) {
EVT PtrVT = Src.getValueType();
SDVTList VTs = DAG.getVTList(PtrVT, MVT::i32, MVT::Other);
SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
Limit, Src, DAG.getConstant(0, DL, MVT::i32));
Chain = End.getValue(2);
SDValue Len = DAG.getNode(ISD::SUB, DL, PtrVT, End, Src);
return std::make_pair(Len, Chain);
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrlen(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
MachinePointerInfo SrcPtrInfo) const {
EVT PtrVT = Src.getValueType();
return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, DL, PtrVT));
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrnlen(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const {
EVT PtrVT = Src.getValueType();
MaxLength = DAG.getZExtOrTrunc(MaxLength, DL, PtrVT);
SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, MaxLength);
return getBoundedStrlen(DAG, DL, Chain, Src, Limit);
}
Index: vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp (revision 344166)
@@ -1,146 +1,153 @@
//==-- WebAssemblyTargetStreamer.cpp - WebAssembly Target Streamer Methods --=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines WebAssembly-specific target streamer classes.
/// These are for implementing support for target-specific assembly directives.
///
//===----------------------------------------------------------------------===//
#include "WebAssemblyTargetStreamer.h"
#include "InstPrinter/WebAssemblyInstPrinter.h"
#include "WebAssemblyMCTargetDesc.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionWasm.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbolWasm.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
WebAssemblyTargetStreamer::WebAssemblyTargetStreamer(MCStreamer &S)
: MCTargetStreamer(S) {}
void WebAssemblyTargetStreamer::emitValueType(wasm::ValType Type) {
Streamer.EmitIntValue(uint8_t(Type), 1);
}
WebAssemblyTargetAsmStreamer::WebAssemblyTargetAsmStreamer(
MCStreamer &S, formatted_raw_ostream &OS)
: WebAssemblyTargetStreamer(S), OS(OS) {}
WebAssemblyTargetWasmStreamer::WebAssemblyTargetWasmStreamer(MCStreamer &S)
: WebAssemblyTargetStreamer(S) {}
static void printTypes(formatted_raw_ostream &OS,
ArrayRef<wasm::ValType> Types) {
bool First = true;
for (auto Type : Types) {
if (First)
First = false;
else
OS << ", ";
OS << WebAssembly::typeToString(Type);
}
OS << '\n';
}
void WebAssemblyTargetAsmStreamer::emitLocal(ArrayRef<wasm::ValType> Types) {
if (!Types.empty()) {
OS << "\t.local \t";
printTypes(OS, Types);
}
}
void WebAssemblyTargetAsmStreamer::emitEndFunc() { OS << "\t.endfunc\n"; }
void WebAssemblyTargetAsmStreamer::emitSignature(
const wasm::WasmSignature *Sig) {
OS << "(";
emitParamList(Sig);
OS << ") -> (";
emitReturnList(Sig);
OS << ")";
}
void WebAssemblyTargetAsmStreamer::emitParamList(
const wasm::WasmSignature *Sig) {
auto &Params = Sig->Params;
for (auto &Ty : Params) {
if (&Ty != &Params[0])
OS << ", ";
OS << WebAssembly::typeToString(Ty);
}
}
void WebAssemblyTargetAsmStreamer::emitReturnList(
const wasm::WasmSignature *Sig) {
auto &Returns = Sig->Returns;
for (auto &Ty : Returns) {
if (&Ty != &Returns[0])
OS << ", ";
OS << WebAssembly::typeToString(Ty);
}
}
void WebAssemblyTargetAsmStreamer::emitFunctionType(const MCSymbolWasm *Sym) {
assert(Sym->isFunction());
OS << "\t.functype\t" << Sym->getName() << " ";
emitSignature(Sym->getSignature());
OS << "\n";
}
void WebAssemblyTargetAsmStreamer::emitGlobalType(const MCSymbolWasm *Sym) {
assert(Sym->isGlobal());
OS << "\t.globaltype\t" << Sym->getName() << ", "
<< WebAssembly::typeToString(
static_cast<wasm::ValType>(Sym->getGlobalType().Type))
<< '\n';
}
void WebAssemblyTargetAsmStreamer::emitEventType(const MCSymbolWasm *Sym) {
assert(Sym->isEvent());
OS << "\t.eventtype\t" << Sym->getName() << " ";
emitParamList(Sym->getSignature());
OS << "\n";
}
void WebAssemblyTargetAsmStreamer::emitImportModule(const MCSymbolWasm *Sym,
- StringRef ModuleName) {
- OS << "\t.import_module\t" << Sym->getName() << ", " << ModuleName << '\n';
+ StringRef ImportModule) {
+ OS << "\t.import_module\t" << Sym->getName() << ", "
+ << ImportModule << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) {
+ OS << "\t.import_name\t" << Sym->getName() << ", "
+ << ImportName << '\n';
}
void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr *Value) {
OS << "\t.indidx \t" << *Value << '\n';
}
void WebAssemblyTargetWasmStreamer::emitLocal(ArrayRef<wasm::ValType> Types) {
SmallVector<std::pair<wasm::ValType, uint32_t>, 4> Grouped;
for (auto Type : Types) {
if (Grouped.empty() || Grouped.back().first != Type)
Grouped.push_back(std::make_pair(Type, 1));
else
++Grouped.back().second;
}
Streamer.EmitULEB128IntValue(Grouped.size());
for (auto Pair : Grouped) {
Streamer.EmitULEB128IntValue(Pair.second);
emitValueType(Pair.first);
}
}
void WebAssemblyTargetWasmStreamer::emitEndFunc() {
llvm_unreachable(".end_func is not needed for direct wasm output");
}
void WebAssemblyTargetWasmStreamer::emitIndIdx(const MCExpr *Value) {
llvm_unreachable(".indidx encoding not yet implemented");
}
Index: vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h (revision 344166)
@@ -1,105 +1,112 @@
//==-- WebAssemblyTargetStreamer.h - WebAssembly Target Streamer -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares WebAssembly-specific target streamer classes.
/// These are for implementing support for target-specific assembly directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/MachineValueType.h"
namespace llvm {
class MCWasmStreamer;
class MCSymbolWasm;
/// WebAssembly-specific streamer interface, to implement support
/// WebAssembly-specific assembly directives.
class WebAssemblyTargetStreamer : public MCTargetStreamer {
public:
explicit WebAssemblyTargetStreamer(MCStreamer &S);
/// .local
virtual void emitLocal(ArrayRef<wasm::ValType> Types) = 0;
/// .endfunc
virtual void emitEndFunc() = 0;
/// .functype
virtual void emitFunctionType(const MCSymbolWasm *Sym) = 0;
/// .indidx
virtual void emitIndIdx(const MCExpr *Value) = 0;
/// .globaltype
virtual void emitGlobalType(const MCSymbolWasm *Sym) = 0;
/// .eventtype
virtual void emitEventType(const MCSymbolWasm *Sym) = 0;
/// .import_module
virtual void emitImportModule(const MCSymbolWasm *Sym,
- StringRef ModuleName) = 0;
+ StringRef ImportModule) = 0;
+ /// .import_name
+ virtual void emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) = 0;
protected:
void emitValueType(wasm::ValType Type);
};
/// This part is for ascii assembly output
class WebAssemblyTargetAsmStreamer final : public WebAssemblyTargetStreamer {
formatted_raw_ostream &OS;
void emitSignature(const wasm::WasmSignature *Sig);
void emitParamList(const wasm::WasmSignature *Sig);
void emitReturnList(const wasm::WasmSignature *Sig);
public:
WebAssemblyTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
void emitLocal(ArrayRef<wasm::ValType> Types) override;
void emitEndFunc() override;
void emitFunctionType(const MCSymbolWasm *Sym) override;
void emitIndIdx(const MCExpr *Value) override;
void emitGlobalType(const MCSymbolWasm *Sym) override;
void emitEventType(const MCSymbolWasm *Sym) override;
- void emitImportModule(const MCSymbolWasm *Sym, StringRef ModuleName) override;
+ void emitImportModule(const MCSymbolWasm *Sym, StringRef ImportModule) override;
+ void emitImportName(const MCSymbolWasm *Sym, StringRef ImportName) override;
};
/// This part is for Wasm object output
class WebAssemblyTargetWasmStreamer final : public WebAssemblyTargetStreamer {
public:
explicit WebAssemblyTargetWasmStreamer(MCStreamer &S);
void emitLocal(ArrayRef<wasm::ValType> Types) override;
void emitEndFunc() override;
void emitFunctionType(const MCSymbolWasm *Sym) override {}
void emitIndIdx(const MCExpr *Value) override;
void emitGlobalType(const MCSymbolWasm *Sym) override {}
void emitEventType(const MCSymbolWasm *Sym) override {}
void emitImportModule(const MCSymbolWasm *Sym,
- StringRef ModuleName) override {}
+ StringRef ImportModule) override {}
+ void emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) override {}
};
/// This part is for null output
class WebAssemblyTargetNullStreamer final : public WebAssemblyTargetStreamer {
public:
explicit WebAssemblyTargetNullStreamer(MCStreamer &S)
: WebAssemblyTargetStreamer(S) {}
void emitLocal(ArrayRef<wasm::ValType>) override {}
void emitEndFunc() override {}
void emitFunctionType(const MCSymbolWasm *) override {}
void emitIndIdx(const MCExpr *) override {}
void emitGlobalType(const MCSymbolWasm *) override {}
void emitEventType(const MCSymbolWasm *) override {}
void emitImportModule(const MCSymbolWasm *, StringRef) override {}
+ void emitImportName(const MCSymbolWasm *, StringRef) override {}
};
} // end namespace llvm
#endif
Index: vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp (revision 344166)
@@ -1,335 +1,342 @@
//===-- WebAssemblyAsmPrinter.cpp - WebAssembly LLVM assembly writer ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains a printer that converts from our internal
/// representation of machine-dependent LLVM code to the WebAssembly assembly
/// language.
///
//===----------------------------------------------------------------------===//
#include "WebAssemblyAsmPrinter.h"
#include "InstPrinter/WebAssemblyInstPrinter.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
#include "WebAssembly.h"
#include "WebAssemblyMCInstLower.h"
#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblyRegisterInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionWasm.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolWasm.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
//===----------------------------------------------------------------------===//
// Helpers.
//===----------------------------------------------------------------------===//
MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
const TargetRegisterClass *TRC = MRI->getRegClass(RegNo);
for (MVT T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64, MVT::v16i8, MVT::v8i16,
MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64})
if (TRI->isTypeLegalForClass(*TRC, T))
return T;
LLVM_DEBUG(errs() << "Unknown type for register number: " << RegNo);
llvm_unreachable("Unknown register type");
return MVT::Other;
}
std::string WebAssemblyAsmPrinter::regToString(const MachineOperand &MO) {
unsigned RegNo = MO.getReg();
assert(TargetRegisterInfo::isVirtualRegister(RegNo) &&
"Unlowered physical register encountered during assembly printing");
assert(!MFI->isVRegStackified(RegNo));
unsigned WAReg = MFI->getWAReg(RegNo);
assert(WAReg != WebAssemblyFunctionInfo::UnusedReg);
return '$' + utostr(WAReg);
}
WebAssemblyTargetStreamer *WebAssemblyAsmPrinter::getTargetStreamer() {
MCTargetStreamer *TS = OutStreamer->getTargetStreamer();
return static_cast<WebAssemblyTargetStreamer *>(TS);
}
//===----------------------------------------------------------------------===//
// WebAssemblyAsmPrinter Implementation.
//===----------------------------------------------------------------------===//
void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
for (auto &It : OutContext.getSymbols()) {
// Emit a .globaltype and .eventtype declaration.
auto Sym = cast<MCSymbolWasm>(It.getValue());
if (Sym->getType() == wasm::WASM_SYMBOL_TYPE_GLOBAL)
getTargetStreamer()->emitGlobalType(Sym);
else if (Sym->getType() == wasm::WASM_SYMBOL_TYPE_EVENT)
getTargetStreamer()->emitEventType(Sym);
}
for (const auto &F : M) {
// Emit function type info for all undefined functions
if (F.isDeclarationForLinker() && !F.isIntrinsic()) {
SmallVector<MVT, 4> Results;
SmallVector<MVT, 4> Params;
ComputeSignatureVTs(F.getFunctionType(), F, TM, Params, Results);
auto *Sym = cast<MCSymbolWasm>(getSymbol(&F));
Sym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
if (!Sym->getSignature()) {
auto Signature = SignatureFromMVTs(Results, Params);
Sym->setSignature(Signature.get());
addSignature(std::move(Signature));
}
// FIXME: this was originally intended for post-linking and was only used
// for imports that were only called indirectly (i.e. s2wasm could not
// infer the type from a call). With object files it applies to all
// imports. so fix the names and the tests, or rethink how import
// delcarations work in asm files.
getTargetStreamer()->emitFunctionType(Sym);
if (TM.getTargetTriple().isOSBinFormatWasm() &&
F.hasFnAttribute("wasm-import-module")) {
StringRef Name =
F.getFnAttribute("wasm-import-module").getValueAsString();
- Sym->setModuleName(Name);
+ Sym->setImportModule(Name);
getTargetStreamer()->emitImportModule(Sym, Name);
+ }
+ if (TM.getTargetTriple().isOSBinFormatWasm() &&
+ F.hasFnAttribute("wasm-import-name")) {
+ StringRef Name =
+ F.getFnAttribute("wasm-import-name").getValueAsString();
+ Sym->setImportName(Name);
+ getTargetStreamer()->emitImportName(Sym, Name);
}
}
}
for (const auto &G : M.globals()) {
if (!G.hasInitializer() && G.hasExternalLinkage()) {
if (G.getValueType()->isSized()) {
uint16_t Size = M.getDataLayout().getTypeAllocSize(G.getValueType());
OutStreamer->emitELFSize(getSymbol(&G),
MCConstantExpr::create(Size, OutContext));
}
}
}
if (const NamedMDNode *Named = M.getNamedMetadata("wasm.custom_sections")) {
for (const Metadata *MD : Named->operands()) {
const MDTuple *Tuple = dyn_cast<MDTuple>(MD);
if (!Tuple || Tuple->getNumOperands() != 2)
continue;
const MDString *Name = dyn_cast<MDString>(Tuple->getOperand(0));
const MDString *Contents = dyn_cast<MDString>(Tuple->getOperand(1));
if (!Name || !Contents)
continue;
OutStreamer->PushSection();
std::string SectionName = (".custom_section." + Name->getString()).str();
MCSectionWasm *mySection =
OutContext.getWasmSection(SectionName, SectionKind::getMetadata());
OutStreamer->SwitchSection(mySection);
OutStreamer->EmitBytes(Contents->getString());
OutStreamer->PopSection();
}
}
}
void WebAssemblyAsmPrinter::EmitConstantPool() {
assert(MF->getConstantPool()->getConstants().empty() &&
"WebAssembly disables constant pools");
}
void WebAssemblyAsmPrinter::EmitJumpTableInfo() {
// Nothing to do; jump tables are incorporated into the instruction stream.
}
void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
const Function &F = MF->getFunction();
SmallVector<MVT, 1> ResultVTs;
SmallVector<MVT, 4> ParamVTs;
ComputeSignatureVTs(F.getFunctionType(), F, TM, ParamVTs, ResultVTs);
auto Signature = SignatureFromMVTs(ResultVTs, ParamVTs);
auto *WasmSym = cast<MCSymbolWasm>(CurrentFnSym);
WasmSym->setSignature(Signature.get());
addSignature(std::move(Signature));
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
// FIXME: clean up how params and results are emitted (use signatures)
getTargetStreamer()->emitFunctionType(WasmSym);
// Emit the function index.
if (MDNode *Idx = F.getMetadata("wasm.index")) {
assert(Idx->getNumOperands() == 1);
getTargetStreamer()->emitIndIdx(AsmPrinter::lowerConstant(
cast<ConstantAsMetadata>(Idx->getOperand(0))->getValue()));
}
SmallVector<wasm::ValType, 16> Locals;
ValTypesFromMVTs(MFI->getLocals(), Locals);
getTargetStreamer()->emitLocal(Locals);
AsmPrinter::EmitFunctionBodyStart();
}
void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
LLVM_DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n');
switch (MI->getOpcode()) {
case WebAssembly::ARGUMENT_i32:
case WebAssembly::ARGUMENT_i32_S:
case WebAssembly::ARGUMENT_i64:
case WebAssembly::ARGUMENT_i64_S:
case WebAssembly::ARGUMENT_f32:
case WebAssembly::ARGUMENT_f32_S:
case WebAssembly::ARGUMENT_f64:
case WebAssembly::ARGUMENT_f64_S:
case WebAssembly::ARGUMENT_v16i8:
case WebAssembly::ARGUMENT_v16i8_S:
case WebAssembly::ARGUMENT_v8i16:
case WebAssembly::ARGUMENT_v8i16_S:
case WebAssembly::ARGUMENT_v4i32:
case WebAssembly::ARGUMENT_v4i32_S:
case WebAssembly::ARGUMENT_v2i64:
case WebAssembly::ARGUMENT_v2i64_S:
case WebAssembly::ARGUMENT_v4f32:
case WebAssembly::ARGUMENT_v4f32_S:
case WebAssembly::ARGUMENT_v2f64:
case WebAssembly::ARGUMENT_v2f64_S:
// These represent values which are live into the function entry, so there's
// no instruction to emit.
break;
case WebAssembly::FALLTHROUGH_RETURN_I32:
case WebAssembly::FALLTHROUGH_RETURN_I32_S:
case WebAssembly::FALLTHROUGH_RETURN_I64:
case WebAssembly::FALLTHROUGH_RETURN_I64_S:
case WebAssembly::FALLTHROUGH_RETURN_F32:
case WebAssembly::FALLTHROUGH_RETURN_F32_S:
case WebAssembly::FALLTHROUGH_RETURN_F64:
case WebAssembly::FALLTHROUGH_RETURN_F64_S:
case WebAssembly::FALLTHROUGH_RETURN_v16i8:
case WebAssembly::FALLTHROUGH_RETURN_v16i8_S:
case WebAssembly::FALLTHROUGH_RETURN_v8i16:
case WebAssembly::FALLTHROUGH_RETURN_v8i16_S:
case WebAssembly::FALLTHROUGH_RETURN_v4i32:
case WebAssembly::FALLTHROUGH_RETURN_v4i32_S:
case WebAssembly::FALLTHROUGH_RETURN_v2i64:
case WebAssembly::FALLTHROUGH_RETURN_v2i64_S:
case WebAssembly::FALLTHROUGH_RETURN_v4f32:
case WebAssembly::FALLTHROUGH_RETURN_v4f32_S:
case WebAssembly::FALLTHROUGH_RETURN_v2f64:
case WebAssembly::FALLTHROUGH_RETURN_v2f64_S: {
// These instructions represent the implicit return at the end of a
// function body. Always pops one value off the stack.
if (isVerbose()) {
OutStreamer->AddComment("fallthrough-return-value");
OutStreamer->AddBlankLine();
}
break;
}
case WebAssembly::FALLTHROUGH_RETURN_VOID:
case WebAssembly::FALLTHROUGH_RETURN_VOID_S:
// This instruction represents the implicit return at the end of a
// function body with no return value.
if (isVerbose()) {
OutStreamer->AddComment("fallthrough-return-void");
OutStreamer->AddBlankLine();
}
break;
default: {
WebAssemblyMCInstLower MCInstLowering(OutContext, *this);
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
EmitToStreamer(*OutStreamer, TmpInst);
break;
}
}
}
const MCExpr *WebAssemblyAsmPrinter::lowerConstant(const Constant *CV) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
if (GV->getValueType()->isFunctionTy()) {
return MCSymbolRefExpr::create(
getSymbol(GV), MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext);
}
return AsmPrinter::lowerConstant(CV);
}
bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
unsigned OpNo, unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &OS) {
if (AsmVariant != 0)
report_fatal_error("There are no defined alternate asm variants");
// First try the generic code, which knows about modifiers like 'c' and 'n'.
if (!AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, OS))
return false;
if (!ExtraCode) {
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
case MachineOperand::MO_Immediate:
OS << MO.getImm();
return false;
case MachineOperand::MO_Register:
// FIXME: only opcode that still contains registers, as required by
// MachineInstr::getDebugVariable().
assert(MI->getOpcode() == WebAssembly::INLINEASM);
OS << regToString(MO);
return false;
case MachineOperand::MO_GlobalAddress:
getSymbol(MO.getGlobal())->print(OS, MAI);
printOffset(MO.getOffset(), OS);
return false;
case MachineOperand::MO_ExternalSymbol:
GetExternalSymbolSymbol(MO.getSymbolName())->print(OS, MAI);
printOffset(MO.getOffset(), OS);
return false;
case MachineOperand::MO_MachineBasicBlock:
MO.getMBB()->getSymbol()->print(OS, MAI);
return false;
default:
break;
}
}
return true;
}
bool WebAssemblyAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNo,
unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &OS) {
if (AsmVariant != 0)
report_fatal_error("There are no defined alternate asm variants");
// The current approach to inline asm is that "r" constraints are expressed
// as local indices, rather than values on the operand stack. This simplifies
// using "r" as it eliminates the need to push and pop the values in a
// particular order, however it also makes it impossible to have an "m"
// constraint. So we don't support it.
return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, AsmVariant, ExtraCode, OS);
}
// Force static initialization.
extern "C" void LLVMInitializeWebAssemblyAsmPrinter() {
RegisterAsmPrinter<WebAssemblyAsmPrinter> X(getTheWebAssemblyTarget32());
RegisterAsmPrinter<WebAssemblyAsmPrinter> Y(getTheWebAssemblyTarget64());
}
Index: vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp (revision 344166)
@@ -1,310 +1,322 @@
//===-- WebAssemblyFixFunctionBitcasts.cpp - Fix function bitcasts --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Fix bitcasted functions.
///
/// WebAssembly requires caller and callee signatures to match, however in LLVM,
/// some amount of slop is vaguely permitted. Detect mismatch by looking for
/// bitcasts of functions and rewrite them to use wrapper functions instead.
///
/// This doesn't catch all cases, such as when a function's address is taken in
/// one place and casted in another, but it works for many common cases.
///
/// Note that LLVM already optimizes away function bitcasts in common cases by
/// dropping arguments as needed, so this pass only ends up getting used in less
/// common cases.
///
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "wasm-fix-function-bitcasts"
-static cl::opt<bool>
- TemporaryWorkarounds("wasm-temporary-workarounds",
- cl::desc("Apply certain temporary workarounds"),
- cl::init(true), cl::Hidden);
-
namespace {
class FixFunctionBitcasts final : public ModulePass {
StringRef getPassName() const override {
return "WebAssembly Fix Function Bitcasts";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
ModulePass::getAnalysisUsage(AU);
}
bool runOnModule(Module &M) override;
public:
static char ID;
FixFunctionBitcasts() : ModulePass(ID) {}
};
} // End anonymous namespace
char FixFunctionBitcasts::ID = 0;
INITIALIZE_PASS(FixFunctionBitcasts, DEBUG_TYPE,
"Fix mismatching bitcasts for WebAssembly", false, false)
ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() {
return new FixFunctionBitcasts();
}
// Recursively descend the def-use lists from V to find non-bitcast users of
// bitcasts of V.
static void FindUses(Value *V, Function &F,
SmallVectorImpl<std::pair<Use *, Function *>> &Uses,
SmallPtrSetImpl<Constant *> &ConstantBCs) {
for (Use &U : V->uses()) {
if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser()))
FindUses(BC, F, Uses, ConstantBCs);
else if (U.get()->getType() != F.getType()) {
CallSite CS(U.getUser());
if (!CS)
// Skip uses that aren't immediately called
continue;
Value *Callee = CS.getCalledValue();
if (Callee != V)
// Skip calls where the function isn't the callee
continue;
if (isa<Constant>(U.get())) {
// Only add constant bitcasts to the list once; they get RAUW'd
auto c = ConstantBCs.insert(cast<Constant>(U.get()));
if (!c.second)
continue;
}
Uses.push_back(std::make_pair(&U, &F));
}
}
}
// Create a wrapper function with type Ty that calls F (which may have a
// different type). Attempt to support common bitcasted function idioms:
// - Call with more arguments than needed: arguments are dropped
// - Call with fewer arguments than needed: arguments are filled in with undef
// - Return value is not needed: drop it
// - Return value needed but not present: supply an undef
//
// If the all the argument types of trivially castable to one another (i.e.
// I32 vs pointer type) then we don't create a wrapper at all (return nullptr
// instead).
//
// If there is a type mismatch that we know would result in an invalid wasm
// module then generate wrapper that contains unreachable (i.e. abort at
// runtime). Such programs are deep into undefined behaviour territory,
// but we choose to fail at runtime rather than generate and invalid module
// or fail at compiler time. The reason we delay the error is that we want
// to support the CMake which expects to be able to compile and link programs
// that refer to functions with entirely incorrect signatures (this is how
// CMake detects the existence of a function in a toolchain).
//
// For bitcasts that involve struct types we don't know at this stage if they
// would be equivalent at the wasm level and so we can't know if we need to
// generate a wrapper.
static Function *CreateWrapper(Function *F, FunctionType *Ty) {
Module *M = F->getParent();
Function *Wrapper = Function::Create(Ty, Function::PrivateLinkage,
F->getName() + "_bitcast", M);
BasicBlock *BB = BasicBlock::Create(M->getContext(), "body", Wrapper);
const DataLayout &DL = BB->getModule()->getDataLayout();
// Determine what arguments to pass.
SmallVector<Value *, 4> Args;
Function::arg_iterator AI = Wrapper->arg_begin();
Function::arg_iterator AE = Wrapper->arg_end();
FunctionType::param_iterator PI = F->getFunctionType()->param_begin();
FunctionType::param_iterator PE = F->getFunctionType()->param_end();
bool TypeMismatch = false;
bool WrapperNeeded = false;
Type *ExpectedRtnType = F->getFunctionType()->getReturnType();
Type *RtnType = Ty->getReturnType();
if ((F->getFunctionType()->getNumParams() != Ty->getNumParams()) ||
(F->getFunctionType()->isVarArg() != Ty->isVarArg()) ||
(ExpectedRtnType != RtnType))
WrapperNeeded = true;
for (; AI != AE && PI != PE; ++AI, ++PI) {
Type *ArgType = AI->getType();
Type *ParamType = *PI;
if (ArgType == ParamType) {
Args.push_back(&*AI);
} else {
if (CastInst::isBitOrNoopPointerCastable(ArgType, ParamType, DL)) {
Instruction *PtrCast =
CastInst::CreateBitOrPointerCast(AI, ParamType, "cast");
BB->getInstList().push_back(PtrCast);
Args.push_back(PtrCast);
} else if (ArgType->isStructTy() || ParamType->isStructTy()) {
LLVM_DEBUG(dbgs() << "CreateWrapper: struct param type in bitcast: "
<< F->getName() << "\n");
WrapperNeeded = false;
} else {
LLVM_DEBUG(dbgs() << "CreateWrapper: arg type mismatch calling: "
<< F->getName() << "\n");
LLVM_DEBUG(dbgs() << "Arg[" << Args.size() << "] Expected: "
<< *ParamType << " Got: " << *ArgType << "\n");
TypeMismatch = true;
break;
}
}
}
if (WrapperNeeded && !TypeMismatch) {
for (; PI != PE; ++PI)
Args.push_back(UndefValue::get(*PI));
if (F->isVarArg())
for (; AI != AE; ++AI)
Args.push_back(&*AI);
CallInst *Call = CallInst::Create(F, Args, "", BB);
Type *ExpectedRtnType = F->getFunctionType()->getReturnType();
Type *RtnType = Ty->getReturnType();
// Determine what value to return.
if (RtnType->isVoidTy()) {
ReturnInst::Create(M->getContext(), BB);
} else if (ExpectedRtnType->isVoidTy()) {
LLVM_DEBUG(dbgs() << "Creating dummy return: " << *RtnType << "\n");
ReturnInst::Create(M->getContext(), UndefValue::get(RtnType), BB);
} else if (RtnType == ExpectedRtnType) {
ReturnInst::Create(M->getContext(), Call, BB);
} else if (CastInst::isBitOrNoopPointerCastable(ExpectedRtnType, RtnType,
DL)) {
Instruction *Cast =
CastInst::CreateBitOrPointerCast(Call, RtnType, "cast");
BB->getInstList().push_back(Cast);
ReturnInst::Create(M->getContext(), Cast, BB);
} else if (RtnType->isStructTy() || ExpectedRtnType->isStructTy()) {
LLVM_DEBUG(dbgs() << "CreateWrapper: struct return type in bitcast: "
<< F->getName() << "\n");
WrapperNeeded = false;
} else {
LLVM_DEBUG(dbgs() << "CreateWrapper: return type mismatch calling: "
<< F->getName() << "\n");
LLVM_DEBUG(dbgs() << "Expected: " << *ExpectedRtnType
<< " Got: " << *RtnType << "\n");
TypeMismatch = true;
}
}
if (TypeMismatch) {
// Create a new wrapper that simply contains `unreachable`.
Wrapper->eraseFromParent();
Wrapper = Function::Create(Ty, Function::PrivateLinkage,
F->getName() + "_bitcast_invalid", M);
BasicBlock *BB = BasicBlock::Create(M->getContext(), "body", Wrapper);
new UnreachableInst(M->getContext(), BB);
Wrapper->setName(F->getName() + "_bitcast_invalid");
} else if (!WrapperNeeded) {
LLVM_DEBUG(dbgs() << "CreateWrapper: no wrapper needed: " << F->getName()
<< "\n");
Wrapper->eraseFromParent();
return nullptr;
}
LLVM_DEBUG(dbgs() << "CreateWrapper: " << F->getName() << "\n");
return Wrapper;
}
+// Test whether a main function with type FuncTy should be rewritten to have
+// type MainTy.
+bool shouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) {
+ // Only fix the main function if it's the standard zero-arg form. That way,
+ // the standard cases will work as expected, and users will see signature
+ // mismatches from the linker for non-standard cases.
+ return FuncTy->getReturnType() == MainTy->getReturnType() &&
+ FuncTy->getNumParams() == 0 &&
+ !FuncTy->isVarArg();
+}
+
bool FixFunctionBitcasts::runOnModule(Module &M) {
LLVM_DEBUG(dbgs() << "********** Fix Function Bitcasts **********\n");
Function *Main = nullptr;
CallInst *CallMain = nullptr;
SmallVector<std::pair<Use *, Function *>, 0> Uses;
SmallPtrSet<Constant *, 2> ConstantBCs;
// Collect all the places that need wrappers.
for (Function &F : M) {
FindUses(&F, F, Uses, ConstantBCs);
// If we have a "main" function, and its type isn't
// "int main(int argc, char *argv[])", create an artificial call with it
// bitcasted to that type so that we generate a wrapper for it, so that
// the C runtime can call it.
- if (!TemporaryWorkarounds && !F.isDeclaration() && F.getName() == "main") {
+ if (F.getName() == "main") {
Main = &F;
LLVMContext &C = M.getContext();
Type *MainArgTys[] = {Type::getInt32Ty(C),
PointerType::get(Type::getInt8PtrTy(C), 0)};
FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys,
/*isVarArg=*/false);
- if (F.getFunctionType() != MainTy) {
+ if (shouldFixMainFunction(F.getFunctionType(), MainTy)) {
LLVM_DEBUG(dbgs() << "Found `main` function with incorrect type: "
<< *F.getFunctionType() << "\n");
Value *Args[] = {UndefValue::get(MainArgTys[0]),
UndefValue::get(MainArgTys[1])};
Value *Casted =
ConstantExpr::getBitCast(Main, PointerType::get(MainTy, 0));
CallMain = CallInst::Create(Casted, Args, "call_main");
Use *UseMain = &CallMain->getOperandUse(2);
Uses.push_back(std::make_pair(UseMain, &F));
}
}
}
DenseMap<std::pair<Function *, FunctionType *>, Function *> Wrappers;
for (auto &UseFunc : Uses) {
Use *U = UseFunc.first;
Function *F = UseFunc.second;
PointerType *PTy = cast<PointerType>(U->get()->getType());
FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType());
// If the function is casted to something like i8* as a "generic pointer"
// to be later casted to something else, we can't generate a wrapper for it.
// Just ignore such casts for now.
if (!Ty)
continue;
auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
if (Pair.second)
Pair.first->second = CreateWrapper(F, Ty);
Function *Wrapper = Pair.first->second;
if (!Wrapper)
continue;
if (isa<Constant>(U->get()))
U->get()->replaceAllUsesWith(Wrapper);
else
U->set(Wrapper);
}
// If we created a wrapper for main, rename the wrapper so that it's the
// one that gets called from startup.
if (CallMain) {
Main->setName("__original_main");
Function *MainWrapper =
cast<Function>(CallMain->getCalledValue()->stripPointerCasts());
- MainWrapper->setName("main");
- MainWrapper->setLinkage(Main->getLinkage());
- MainWrapper->setVisibility(Main->getVisibility());
- Main->setLinkage(Function::PrivateLinkage);
- Main->setVisibility(Function::DefaultVisibility);
delete CallMain;
+ if (Main->isDeclaration()) {
+ // The wrapper is not needed in this case as we don't need to export
+ // it to anyone else.
+ MainWrapper->eraseFromParent();
+ } else {
+ // Otherwise give the wrapper the same linkage as the original main
+ // function, so that it can be called from the same places.
+ MainWrapper->setName("main");
+ MainWrapper->setLinkage(Main->getLinkage());
+ MainWrapper->setVisibility(Main->getVisibility());
+ }
}
return true;
}
Index: vendor/llvm/dist-release_80/lib/Target/X86/AsmParser/X86AsmParser.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/AsmParser/X86AsmParser.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/AsmParser/X86AsmParser.cpp (revision 344166)
@@ -1,3487 +1,3486 @@
//===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "InstPrinter/X86IntelInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCExpr.h"
#include "MCTargetDesc/X86TargetStreamer.h"
#include "X86AsmInstrumentation.h"
#include "X86AsmParserCommon.h"
#include "X86Operand.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <memory>
using namespace llvm;
static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
return true;
}
return false;
}
namespace {
static const char OpPrecedence[] = {
0, // IC_OR
1, // IC_XOR
2, // IC_AND
3, // IC_LSHIFT
3, // IC_RSHIFT
4, // IC_PLUS
4, // IC_MINUS
5, // IC_MULTIPLY
5, // IC_DIVIDE
5, // IC_MOD
6, // IC_NOT
7, // IC_NEG
8, // IC_RPAREN
9, // IC_LPAREN
0, // IC_IMM
0 // IC_REGISTER
};
class X86AsmParser : public MCTargetAsmParser {
ParseInstructionInfo *InstInfo;
std::unique_ptr<X86AsmInstrumentation> Instrumentation;
bool Code16GCC;
private:
SMLoc consumeToken() {
MCAsmParser &Parser = getParser();
SMLoc Result = Parser.getTok().getLoc();
Parser.Lex();
return Result;
}
X86TargetStreamer &getTargetStreamer() {
assert(getParser().getStreamer().getTargetStreamer() &&
"do not have a target streamer");
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<X86TargetStreamer &>(TS);
}
unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
uint64_t &ErrorInfo, bool matchingInlineAsm,
unsigned VariantID = 0) {
// In Code16GCC mode, match as 32-bit.
if (Code16GCC)
SwitchMode(X86::Mode32Bit);
unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
matchingInlineAsm, VariantID);
if (Code16GCC)
SwitchMode(X86::Mode16Bit);
return rv;
}
enum InfixCalculatorTok {
IC_OR = 0,
IC_XOR,
IC_AND,
IC_LSHIFT,
IC_RSHIFT,
IC_PLUS,
IC_MINUS,
IC_MULTIPLY,
IC_DIVIDE,
IC_MOD,
IC_NOT,
IC_NEG,
IC_RPAREN,
IC_LPAREN,
IC_IMM,
IC_REGISTER
};
enum IntelOperatorKind {
IOK_INVALID = 0,
IOK_LENGTH,
IOK_SIZE,
IOK_TYPE,
IOK_OFFSET
};
class InfixCalculator {
typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
SmallVector<ICToken, 4> PostfixStack;
bool isUnaryOperator(const InfixCalculatorTok Op) {
return Op == IC_NEG || Op == IC_NOT;
}
public:
int64_t popOperand() {
assert (!PostfixStack.empty() && "Poped an empty stack!");
ICToken Op = PostfixStack.pop_back_val();
if (!(Op.first == IC_IMM || Op.first == IC_REGISTER))
return -1; // The invalid Scale value will be caught later by checkScale
return Op.second;
}
void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) {
assert ((Op == IC_IMM || Op == IC_REGISTER) &&
"Unexpected operand!");
PostfixStack.push_back(std::make_pair(Op, Val));
}
void popOperator() { InfixOperatorStack.pop_back(); }
void pushOperator(InfixCalculatorTok Op) {
// Push the new operator if the stack is empty.
if (InfixOperatorStack.empty()) {
InfixOperatorStack.push_back(Op);
return;
}
// Push the new operator if it has a higher precedence than the operator
// on the top of the stack or the operator on the top of the stack is a
// left parentheses.
unsigned Idx = InfixOperatorStack.size() - 1;
InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
InfixOperatorStack.push_back(Op);
return;
}
// The operator on the top of the stack has higher precedence than the
// new operator.
unsigned ParenCount = 0;
while (1) {
// Nothing to process.
if (InfixOperatorStack.empty())
break;
Idx = InfixOperatorStack.size() - 1;
StackOp = InfixOperatorStack[Idx];
if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
break;
// If we have an even parentheses count and we see a left parentheses,
// then stop processing.
if (!ParenCount && StackOp == IC_LPAREN)
break;
if (StackOp == IC_RPAREN) {
++ParenCount;
InfixOperatorStack.pop_back();
} else if (StackOp == IC_LPAREN) {
--ParenCount;
InfixOperatorStack.pop_back();
} else {
InfixOperatorStack.pop_back();
PostfixStack.push_back(std::make_pair(StackOp, 0));
}
}
// Push the new operator.
InfixOperatorStack.push_back(Op);
}
int64_t execute() {
// Push any remaining operators onto the postfix stack.
while (!InfixOperatorStack.empty()) {
InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val();
if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
PostfixStack.push_back(std::make_pair(StackOp, 0));
}
if (PostfixStack.empty())
return 0;
SmallVector<ICToken, 16> OperandStack;
for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) {
ICToken Op = PostfixStack[i];
if (Op.first == IC_IMM || Op.first == IC_REGISTER) {
OperandStack.push_back(Op);
} else if (isUnaryOperator(Op.first)) {
assert (OperandStack.size() > 0 && "Too few operands.");
ICToken Operand = OperandStack.pop_back_val();
assert (Operand.first == IC_IMM &&
"Unary operation with a register!");
switch (Op.first) {
default:
report_fatal_error("Unexpected operator!");
break;
case IC_NEG:
OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
break;
case IC_NOT:
OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
break;
}
} else {
assert (OperandStack.size() > 1 && "Too few operands.");
int64_t Val;
ICToken Op2 = OperandStack.pop_back_val();
ICToken Op1 = OperandStack.pop_back_val();
switch (Op.first) {
default:
report_fatal_error("Unexpected operator!");
break;
case IC_PLUS:
Val = Op1.second + Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_MINUS:
Val = Op1.second - Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_MULTIPLY:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Multiply operation with an immediate and a register!");
Val = Op1.second * Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_DIVIDE:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Divide operation with an immediate and a register!");
assert (Op2.second != 0 && "Division by zero!");
Val = Op1.second / Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_MOD:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Modulo operation with an immediate and a register!");
Val = Op1.second % Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_OR:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Or operation with an immediate and a register!");
Val = Op1.second | Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_XOR:
assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Xor operation with an immediate and a register!");
Val = Op1.second ^ Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_AND:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"And operation with an immediate and a register!");
Val = Op1.second & Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_LSHIFT:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Left shift operation with an immediate and a register!");
Val = Op1.second << Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
case IC_RSHIFT:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"Right shift operation with an immediate and a register!");
Val = Op1.second >> Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
}
}
}
assert (OperandStack.size() == 1 && "Expected a single result.");
return OperandStack.pop_back_val().second;
}
};
enum IntelExprState {
IES_INIT,
IES_OR,
IES_XOR,
IES_AND,
IES_LSHIFT,
IES_RSHIFT,
IES_PLUS,
IES_MINUS,
IES_NOT,
IES_MULTIPLY,
IES_DIVIDE,
IES_MOD,
IES_LBRAC,
IES_RBRAC,
IES_LPAREN,
IES_RPAREN,
IES_REGISTER,
IES_INTEGER,
IES_IDENTIFIER,
IES_ERROR
};
class IntelExprStateMachine {
IntelExprState State, PrevState;
unsigned BaseReg, IndexReg, TmpReg, Scale;
int64_t Imm;
const MCExpr *Sym;
StringRef SymName;
InfixCalculator IC;
InlineAsmIdentifierInfo Info;
short BracCount;
bool MemExpr;
public:
IntelExprStateMachine()
: State(IES_INIT), PrevState(IES_ERROR), BaseReg(0), IndexReg(0),
TmpReg(0), Scale(0), Imm(0), Sym(nullptr), BracCount(0),
MemExpr(false) {}
void addImm(int64_t imm) { Imm += imm; }
short getBracCount() { return BracCount; }
bool isMemExpr() { return MemExpr; }
unsigned getBaseReg() { return BaseReg; }
unsigned getIndexReg() { return IndexReg; }
unsigned getScale() { return Scale; }
const MCExpr *getSym() { return Sym; }
StringRef getSymName() { return SymName; }
int64_t getImm() { return Imm + IC.execute(); }
bool isValidEndState() {
return State == IES_RBRAC || State == IES_INTEGER;
}
bool hadError() { return State == IES_ERROR; }
InlineAsmIdentifierInfo &getIdentifierInfo() { return Info; }
void onOr() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_OR;
IC.pushOperator(IC_OR);
break;
}
PrevState = CurrState;
}
void onXor() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_XOR;
IC.pushOperator(IC_XOR);
break;
}
PrevState = CurrState;
}
void onAnd() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_AND;
IC.pushOperator(IC_AND);
break;
}
PrevState = CurrState;
}
void onLShift() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_LSHIFT;
IC.pushOperator(IC_LSHIFT);
break;
}
PrevState = CurrState;
}
void onRShift() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_RSHIFT;
IC.pushOperator(IC_RSHIFT);
break;
}
PrevState = CurrState;
}
bool onPlus(StringRef &ErrMsg) {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
case IES_REGISTER:
State = IES_PLUS;
IC.pushOperator(IC_PLUS);
if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
// If we already have a BaseReg, then assume this is the IndexReg with
// no explicit scale.
if (!BaseReg) {
BaseReg = TmpReg;
} else {
if (IndexReg) {
ErrMsg = "BaseReg/IndexReg already set!";
return true;
}
IndexReg = TmpReg;
Scale = 0;
}
}
break;
}
PrevState = CurrState;
return false;
}
bool onMinus(StringRef &ErrMsg) {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_OR:
case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
case IES_PLUS:
case IES_NOT:
case IES_MULTIPLY:
case IES_DIVIDE:
case IES_MOD:
case IES_LPAREN:
case IES_RPAREN:
case IES_LBRAC:
case IES_RBRAC:
case IES_INTEGER:
case IES_REGISTER:
case IES_INIT:
State = IES_MINUS;
// push minus operator if it is not a negate operator
if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
CurrState == IES_INTEGER || CurrState == IES_RBRAC)
IC.pushOperator(IC_MINUS);
else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
// We have negate operator for Scale: it's illegal
ErrMsg = "Scale can't be negative";
return true;
} else
IC.pushOperator(IC_NEG);
if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
// If we already have a BaseReg, then assume this is the IndexReg with
// no explicit scale.
if (!BaseReg) {
BaseReg = TmpReg;
} else {
if (IndexReg) {
ErrMsg = "BaseReg/IndexReg already set!";
return true;
}
IndexReg = TmpReg;
Scale = 0;
}
}
break;
}
PrevState = CurrState;
return false;
}
void onNot() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_OR:
case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
case IES_PLUS:
case IES_MINUS:
case IES_NOT:
case IES_MULTIPLY:
case IES_DIVIDE:
case IES_MOD:
case IES_LPAREN:
case IES_LBRAC:
case IES_INIT:
State = IES_NOT;
IC.pushOperator(IC_NOT);
break;
}
PrevState = CurrState;
}
bool onRegister(unsigned Reg, StringRef &ErrMsg) {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_PLUS:
case IES_LPAREN:
case IES_LBRAC:
State = IES_REGISTER;
TmpReg = Reg;
IC.pushOperand(IC_REGISTER);
break;
case IES_MULTIPLY:
// Index Register - Scale * Register
if (PrevState == IES_INTEGER) {
if (IndexReg) {
ErrMsg = "BaseReg/IndexReg already set!";
return true;
}
State = IES_REGISTER;
IndexReg = Reg;
// Get the scale and replace the 'Scale * Register' with '0'.
Scale = IC.popOperand();
if (checkScale(Scale, ErrMsg))
return true;
IC.pushOperand(IC_IMM);
IC.popOperator();
} else {
State = IES_ERROR;
}
break;
}
PrevState = CurrState;
return false;
}
bool onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName,
const InlineAsmIdentifierInfo &IDInfo,
bool ParsingInlineAsm, StringRef &ErrMsg) {
// InlineAsm: Treat an enum value as an integer
if (ParsingInlineAsm)
if (IDInfo.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
return onInteger(IDInfo.Enum.EnumVal, ErrMsg);
// Treat a symbolic constant like an integer
if (auto *CE = dyn_cast<MCConstantExpr>(SymRef))
return onInteger(CE->getValue(), ErrMsg);
PrevState = State;
bool HasSymbol = Sym != nullptr;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_PLUS:
case IES_MINUS:
case IES_NOT:
case IES_INIT:
case IES_LBRAC:
MemExpr = true;
State = IES_INTEGER;
Sym = SymRef;
SymName = SymRefName;
IC.pushOperand(IC_IMM);
if (ParsingInlineAsm)
Info = IDInfo;
break;
}
if (HasSymbol)
ErrMsg = "cannot use more than one symbol in memory operand";
return HasSymbol;
}
bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_PLUS:
case IES_MINUS:
case IES_NOT:
case IES_OR:
case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
case IES_DIVIDE:
case IES_MOD:
case IES_MULTIPLY:
case IES_LPAREN:
case IES_INIT:
case IES_LBRAC:
State = IES_INTEGER;
if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
// Index Register - Register * Scale
if (IndexReg) {
ErrMsg = "BaseReg/IndexReg already set!";
return true;
}
IndexReg = TmpReg;
Scale = TmpInt;
if (checkScale(Scale, ErrMsg))
return true;
// Get the scale and replace the 'Register * Scale' with '0'.
IC.popOperator();
} else {
IC.pushOperand(IC_IMM, TmpInt);
}
break;
}
PrevState = CurrState;
return false;
}
void onStar() {
PrevState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_REGISTER:
case IES_RPAREN:
State = IES_MULTIPLY;
IC.pushOperator(IC_MULTIPLY);
break;
}
}
void onDivide() {
PrevState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
State = IES_DIVIDE;
IC.pushOperator(IC_DIVIDE);
break;
}
}
void onMod() {
PrevState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_RPAREN:
State = IES_MOD;
IC.pushOperator(IC_MOD);
break;
}
}
bool onLBrac() {
if (BracCount)
return true;
PrevState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_RBRAC:
case IES_INTEGER:
case IES_RPAREN:
State = IES_PLUS;
IC.pushOperator(IC_PLUS);
break;
case IES_INIT:
assert(!BracCount && "BracCount should be zero on parsing's start");
State = IES_LBRAC;
break;
}
MemExpr = true;
BracCount++;
return false;
}
bool onRBrac() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_REGISTER:
case IES_RPAREN:
if (BracCount-- != 1)
return true;
State = IES_RBRAC;
if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
// If we already have a BaseReg, then assume this is the IndexReg with
// no explicit scale.
if (!BaseReg) {
BaseReg = TmpReg;
} else {
assert (!IndexReg && "BaseReg/IndexReg already set!");
IndexReg = TmpReg;
Scale = 0;
}
}
break;
}
PrevState = CurrState;
return false;
}
void onLParen() {
IntelExprState CurrState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_PLUS:
case IES_MINUS:
case IES_NOT:
case IES_OR:
case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
case IES_MULTIPLY:
case IES_DIVIDE:
case IES_MOD:
case IES_LPAREN:
case IES_INIT:
case IES_LBRAC:
State = IES_LPAREN;
IC.pushOperator(IC_LPAREN);
break;
}
PrevState = CurrState;
}
void onRParen() {
PrevState = State;
switch (State) {
default:
State = IES_ERROR;
break;
case IES_INTEGER:
case IES_REGISTER:
case IES_RPAREN:
State = IES_RPAREN;
IC.pushOperator(IC_RPAREN);
break;
}
}
};
bool Error(SMLoc L, const Twine &Msg, SMRange Range = None,
bool MatchingInlineAsm = false) {
MCAsmParser &Parser = getParser();
if (MatchingInlineAsm) {
if (!getLexer().isAtStartOfStatement())
Parser.eatToEndOfStatement();
return false;
}
return Parser.Error(L, Msg, Range);
}
std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg, SMRange R = SMRange()) {
Error(Loc, Msg, R);
return nullptr;
}
std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
bool IsSIReg(unsigned Reg);
unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
void
AddDefaultSrcDestOperands(OperandVector &Operands,
std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
OperandVector &FinalOperands);
std::unique_ptr<X86Operand> ParseOperand();
std::unique_ptr<X86Operand> ParseATTOperand();
std::unique_ptr<X86Operand> ParseIntelOperand();
std::unique_ptr<X86Operand> ParseIntelOffsetOfOperator();
bool ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End);
unsigned IdentifyIntelInlineAsmOperator(StringRef Name);
unsigned ParseIntelInlineAsmOperator(unsigned OpKind);
std::unique_ptr<X86Operand> ParseRoundingModeOp(SMLoc Start);
bool ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM);
void RewriteIntelExpression(IntelExprStateMachine &SM, SMLoc Start,
SMLoc End);
bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
bool ParseIntelInlineAsmIdentifier(const MCExpr *&Val, StringRef &Identifier,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedOperand, SMLoc &End);
std::unique_ptr<X86Operand> ParseMemOperand(unsigned SegReg,
const MCExpr *&Disp,
const SMLoc &StartLoc,
SMLoc &EndLoc);
bool ParseIntelMemoryOperandSize(unsigned &Size);
std::unique_ptr<X86Operand>
CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
unsigned IndexReg, unsigned Scale, SMLoc Start,
SMLoc End, unsigned Size, StringRef Identifier,
const InlineAsmIdentifierInfo &Info);
bool parseDirectiveEven(SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
/// CodeView FPO data directives.
bool parseDirectiveFPOProc(SMLoc L);
bool parseDirectiveFPOSetFrame(SMLoc L);
bool parseDirectiveFPOPushReg(SMLoc L);
bool parseDirectiveFPOStackAlloc(SMLoc L);
bool parseDirectiveFPOStackAlign(SMLoc L);
bool parseDirectiveFPOEndPrologue(SMLoc L);
bool parseDirectiveFPOEndProc(SMLoc L);
bool parseDirectiveFPOData(SMLoc L);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops);
/// Wrapper around MCStreamer::EmitInstruction(). Possibly adds
/// instrumentation around Inst.
void EmitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
MCStreamer &Out, bool MatchingInlineAsm);
bool ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
bool MatchingInlineAsm);
bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm);
bool MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm);
bool OmitRegisterFromClobberLists(unsigned RegNo) override;
/// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
/// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
/// return false if no parsing errors occurred, true otherwise.
bool HandleAVX512Operand(OperandVector &Operands,
const MCParsedAsmOperand &Op);
bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode64Bit];
}
bool is32BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode32Bit];
}
bool is16BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode16Bit];
}
void SwitchMode(unsigned mode) {
MCSubtargetInfo &STI = copySTI();
FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
uint64_t FB = ComputeAvailableFeatures(
STI.ToggleFeature(OldMode.flip(mode)));
setAvailableFeatures(FB);
assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
}
unsigned getPointerWidth() {
if (is16BitMode()) return 16;
if (is32BitMode()) return 32;
if (is64BitMode()) return 64;
llvm_unreachable("invalid mode");
}
bool isParsingIntelSyntax() {
return getParser().getAssemblerDialect();
}
/// @name Auto-generated Matcher Functions
/// {
#define GET_ASSEMBLER_HEADER
#include "X86GenAsmMatcher.inc"
/// }
public:
X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
const MCInstrInfo &mii, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, sti, mii), InstInfo(nullptr),
Code16GCC(false) {
Parser.addAliasForDirective(".word", ".2byte");
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Instrumentation.reset(
CreateX86AsmInstrumentation(Options, Parser.getContext(), STI));
}
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
void SetFrameRegister(unsigned RegNo) override;
bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
bool ParseDirective(AsmToken DirectiveID) override;
};
} // end anonymous namespace
/// @name Auto-generated Match Functions
/// {
static unsigned MatchRegisterName(StringRef Name);
/// }
static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg,
unsigned Scale, bool Is64BitMode,
StringRef &ErrMsg) {
// If we have both a base register and an index register make sure they are
// both 64-bit or 32-bit registers.
// To support VSIB, IndexReg can be 128-bit or 256-bit registers.
if (BaseReg != 0 &&
!(BaseReg == X86::RIP || BaseReg == X86::EIP ||
X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) ||
X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg))) {
ErrMsg = "invalid base+index expression";
return true;
}
if (IndexReg != 0 &&
!(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg))) {
ErrMsg = "invalid base+index expression";
return true;
}
if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
IndexReg == X86::EIP || IndexReg == X86::RIP ||
IndexReg == X86::ESP || IndexReg == X86::RSP) {
ErrMsg = "invalid base+index expression";
return true;
}
// Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
// and then only in non-64-bit modes.
if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
(Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
BaseReg != X86::SI && BaseReg != X86::DI))) {
ErrMsg = "invalid 16-bit base register";
return true;
}
if (BaseReg == 0 &&
X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
ErrMsg = "16-bit memory operand may not include only index register";
return true;
}
if (BaseReg != 0 && IndexReg != 0) {
if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
(X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
IndexReg == X86::EIZ)) {
ErrMsg = "base register is 64-bit, but index register is not";
return true;
}
if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
(X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
IndexReg == X86::RIZ)) {
ErrMsg = "base register is 32-bit, but index register is not";
return true;
}
if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
ErrMsg = "base register is 16-bit, but index register is not";
return true;
}
if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
(IndexReg != X86::SI && IndexReg != X86::DI)) {
ErrMsg = "invalid 16-bit base/index register combination";
return true;
}
}
}
// RIP/EIP-relative addressing is only supported in 64-bit mode.
if (!Is64BitMode && BaseReg != 0 &&
(BaseReg == X86::RIP || BaseReg == X86::EIP)) {
ErrMsg = "IP-relative addressing requires 64-bit mode";
return true;
}
return checkScale(Scale, ErrMsg);
}
bool X86AsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
RegNo = 0;
const AsmToken &PercentTok = Parser.getTok();
StartLoc = PercentTok.getLoc();
// If we encounter a %, ignore it. This code handles registers with and
// without the prefix, unprefixed registers can occur in cfi directives.
if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
Parser.Lex(); // Eat percent token.
const AsmToken &Tok = Parser.getTok();
EndLoc = Tok.getEndLoc();
if (Tok.isNot(AsmToken::Identifier)) {
if (isParsingIntelSyntax()) return true;
return Error(StartLoc, "invalid register name",
SMRange(StartLoc, EndLoc));
}
RegNo = MatchRegisterName(Tok.getString());
// If the match failed, try the register name as lowercase.
if (RegNo == 0)
RegNo = MatchRegisterName(Tok.getString().lower());
// The "flags" register cannot be referenced directly.
// Treat it as an identifier instead.
if (isParsingInlineAsm() && isParsingIntelSyntax() && RegNo == X86::EFLAGS)
RegNo = 0;
if (!is64BitMode()) {
// FIXME: This should be done using Requires<Not64BitMode> and
// Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
// checked.
// FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
// REX prefix.
if (RegNo == X86::RIZ || RegNo == X86::RIP ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
X86II::isX86_64NonExtLowByteReg(RegNo) ||
X86II::isX86_64ExtendedReg(RegNo)) {
StringRef RegName = Tok.getString();
Parser.Lex(); // Eat register name.
return Error(StartLoc,
"register %" + RegName + " is only available in 64-bit mode",
SMRange(StartLoc, EndLoc));
}
}
// Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
- if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
- RegNo = X86::ST0;
+ if (RegNo == X86::ST0) {
Parser.Lex(); // Eat 'st'
// Check to see if we have '(4)' after %st.
if (getLexer().isNot(AsmToken::LParen))
return false;
// Lex the paren.
getParser().Lex();
const AsmToken &IntTok = Parser.getTok();
if (IntTok.isNot(AsmToken::Integer))
return Error(IntTok.getLoc(), "expected stack index");
switch (IntTok.getIntVal()) {
case 0: RegNo = X86::ST0; break;
case 1: RegNo = X86::ST1; break;
case 2: RegNo = X86::ST2; break;
case 3: RegNo = X86::ST3; break;
case 4: RegNo = X86::ST4; break;
case 5: RegNo = X86::ST5; break;
case 6: RegNo = X86::ST6; break;
case 7: RegNo = X86::ST7; break;
default: return Error(IntTok.getLoc(), "invalid stack index");
}
if (getParser().Lex().isNot(AsmToken::RParen))
return Error(Parser.getTok().getLoc(), "expected ')'");
EndLoc = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat ')'
return false;
}
EndLoc = Parser.getTok().getEndLoc();
// If this is "db[0-15]", match it as an alias
// for dr[0-15].
if (RegNo == 0 && Tok.getString().startswith("db")) {
if (Tok.getString().size() == 3) {
switch (Tok.getString()[2]) {
case '0': RegNo = X86::DR0; break;
case '1': RegNo = X86::DR1; break;
case '2': RegNo = X86::DR2; break;
case '3': RegNo = X86::DR3; break;
case '4': RegNo = X86::DR4; break;
case '5': RegNo = X86::DR5; break;
case '6': RegNo = X86::DR6; break;
case '7': RegNo = X86::DR7; break;
case '8': RegNo = X86::DR8; break;
case '9': RegNo = X86::DR9; break;
}
} else if (Tok.getString().size() == 4 && Tok.getString()[2] == '1') {
switch (Tok.getString()[3]) {
case '0': RegNo = X86::DR10; break;
case '1': RegNo = X86::DR11; break;
case '2': RegNo = X86::DR12; break;
case '3': RegNo = X86::DR13; break;
case '4': RegNo = X86::DR14; break;
case '5': RegNo = X86::DR15; break;
}
}
if (RegNo != 0) {
EndLoc = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat it.
return false;
}
}
if (RegNo == 0) {
if (isParsingIntelSyntax()) return true;
return Error(StartLoc, "invalid register name",
SMRange(StartLoc, EndLoc));
}
Parser.Lex(); // Eat identifier token.
return false;
}
void X86AsmParser::SetFrameRegister(unsigned RegNo) {
Instrumentation->SetInitialFrameRegister(RegNo);
}
std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
bool Parse32 = is32BitMode() || Code16GCC;
unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
const MCExpr *Disp = MCConstantExpr::create(0, getContext());
return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
/*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
Loc, Loc, 0);
}
std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
bool Parse32 = is32BitMode() || Code16GCC;
unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
const MCExpr *Disp = MCConstantExpr::create(0, getContext());
return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
/*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
Loc, Loc, 0);
}
bool X86AsmParser::IsSIReg(unsigned Reg) {
switch (Reg) {
default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
case X86::RSI:
case X86::ESI:
case X86::SI:
return true;
case X86::RDI:
case X86::EDI:
case X86::DI:
return false;
}
}
unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
bool IsSIReg) {
switch (RegClassID) {
default: llvm_unreachable("Unexpected register class");
case X86::GR64RegClassID:
return IsSIReg ? X86::RSI : X86::RDI;
case X86::GR32RegClassID:
return IsSIReg ? X86::ESI : X86::EDI;
case X86::GR16RegClassID:
return IsSIReg ? X86::SI : X86::DI;
}
}
void X86AsmParser::AddDefaultSrcDestOperands(
OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
if (isParsingIntelSyntax()) {
Operands.push_back(std::move(Dst));
Operands.push_back(std::move(Src));
}
else {
Operands.push_back(std::move(Src));
Operands.push_back(std::move(Dst));
}
}
bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
OperandVector &FinalOperands) {
if (OrigOperands.size() > 1) {
// Check if sizes match, OrigOperands also contains the instruction name
assert(OrigOperands.size() == FinalOperands.size() + 1 &&
"Operand size mismatch");
SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
// Verify types match
int RegClassID = -1;
for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
if (FinalOp.isReg() &&
(!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
// Return false and let a normal complaint about bogus operands happen
return false;
if (FinalOp.isMem()) {
if (!OrigOp.isMem())
// Return false and let a normal complaint about bogus operands happen
return false;
unsigned OrigReg = OrigOp.Mem.BaseReg;
unsigned FinalReg = FinalOp.Mem.BaseReg;
// If we've already encounterd a register class, make sure all register
// bases are of the same register class
if (RegClassID != -1 &&
!X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
return Error(OrigOp.getStartLoc(),
"mismatching source and destination index registers");
}
if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
RegClassID = X86::GR64RegClassID;
else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
RegClassID = X86::GR32RegClassID;
else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
RegClassID = X86::GR16RegClassID;
else
// Unexpected register class type
// Return false and let a normal complaint about bogus operands happen
return false;
bool IsSI = IsSIReg(FinalReg);
FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
if (FinalReg != OrigReg) {
std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
Warnings.push_back(std::make_pair(
OrigOp.getStartLoc(),
"memory operand is only for determining the size, " + RegName +
" will be used for the location"));
}
FinalOp.Mem.Size = OrigOp.Mem.Size;
FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
FinalOp.Mem.BaseReg = FinalReg;
}
}
// Produce warnings only if all the operands passed the adjustment - prevent
// legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
for (auto &WarningMsg : Warnings) {
Warning(WarningMsg.first, WarningMsg.second);
}
// Remove old operands
for (unsigned int i = 0; i < FinalOperands.size(); ++i)
OrigOperands.pop_back();
}
// OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
for (unsigned int i = 0; i < FinalOperands.size(); ++i)
OrigOperands.push_back(std::move(FinalOperands[i]));
return false;
}
std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
if (isParsingIntelSyntax())
return ParseIntelOperand();
return ParseATTOperand();
}
std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
const InlineAsmIdentifierInfo &Info) {
// If we found a decl other than a VarDecl, then assume it is a FuncDecl or
// some other label reference.
if (Info.isKind(InlineAsmIdentifierInfo::IK_Label)) {
// Insert an explicit size if the user didn't have one.
if (!Size) {
Size = getPointerWidth();
InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
/*Len=*/0, Size);
}
// Create an absolute memory reference in order to match against
// instructions taking a PC relative operand.
return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size,
Identifier, Info.Label.Decl);
}
// We either have a direct symbol reference, or an offset from a symbol. The
// parser always puts the symbol on the LHS, so look there for size
// calculation purposes.
unsigned FrontendSize = 0;
void *Decl = nullptr;
bool IsGlobalLV = false;
if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
// Size is in terms of bits in this context.
FrontendSize = Info.Var.Type * 8;
Decl = Info.Var.Decl;
IsGlobalLV = Info.Var.IsGlobalLV;
}
// It is widely common for MS InlineAsm to use a global variable and one/two
// registers in a mmory expression, and though unaccessible via rip/eip.
if (IsGlobalLV && (BaseReg || IndexReg)) {
return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End);
// Otherwise, we set the base register to a non-zero value
// if we don't know the actual value at this time. This is necessary to
// get the matching correct in some cases.
} else {
BaseReg = BaseReg ? BaseReg : 1;
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
IndexReg, Scale, Start, End, Size, Identifier,
Decl, FrontendSize);
}
}
// Some binary bitwise operators have a named synonymous
// Query a candidate string for being such a named operator
// and if so - invoke the appropriate handler
bool X86AsmParser::ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM) {
// A named operator should be either lower or upper case, but not a mix
if (Name.compare(Name.lower()) && Name.compare(Name.upper()))
return false;
if (Name.equals_lower("not"))
SM.onNot();
else if (Name.equals_lower("or"))
SM.onOr();
else if (Name.equals_lower("shl"))
SM.onLShift();
else if (Name.equals_lower("shr"))
SM.onRShift();
else if (Name.equals_lower("xor"))
SM.onXor();
else if (Name.equals_lower("and"))
SM.onAnd();
else if (Name.equals_lower("mod"))
SM.onMod();
else
return false;
return true;
}
bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
StringRef ErrMsg;
AsmToken::TokenKind PrevTK = AsmToken::Error;
bool Done = false;
while (!Done) {
bool UpdateLocLex = true;
AsmToken::TokenKind TK = getLexer().getKind();
switch (TK) {
default:
if ((Done = SM.isValidEndState()))
break;
return Error(Tok.getLoc(), "unknown token in expression");
case AsmToken::EndOfStatement:
Done = true;
break;
case AsmToken::Real:
// DotOperator: [ebx].0
UpdateLocLex = false;
if (ParseIntelDotOperator(SM, End))
return true;
break;
case AsmToken::At:
case AsmToken::String:
case AsmToken::Identifier: {
SMLoc IdentLoc = Tok.getLoc();
StringRef Identifier = Tok.getString();
UpdateLocLex = false;
// Register
unsigned Reg;
if (Tok.is(AsmToken::Identifier) && !ParseRegister(Reg, IdentLoc, End)) {
if (SM.onRegister(Reg, ErrMsg))
return Error(Tok.getLoc(), ErrMsg);
break;
}
// Operator synonymous ("not", "or" etc.)
if ((UpdateLocLex = ParseIntelNamedOperator(Identifier, SM)))
break;
// Symbol reference, when parsing assembly content
InlineAsmIdentifierInfo Info;
const MCExpr *Val;
if (!isParsingInlineAsm()) {
if (getParser().parsePrimaryExpr(Val, End)) {
return Error(Tok.getLoc(), "Unexpected identifier!");
} else if (SM.onIdentifierExpr(Val, Identifier, Info, false, ErrMsg)) {
return Error(IdentLoc, ErrMsg);
} else
break;
}
// MS InlineAsm operators (TYPE/LENGTH/SIZE)
if (unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
if (OpKind == IOK_OFFSET)
return Error(IdentLoc, "Dealing OFFSET operator as part of"
"a compound immediate expression is yet to be supported");
if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
if (SM.onInteger(Val, ErrMsg))
return Error(IdentLoc, ErrMsg);
} else
return true;
break;
}
// MS Dot Operator expression
if (Identifier.count('.') && PrevTK == AsmToken::RBrac) {
if (ParseIntelDotOperator(SM, End))
return true;
break;
}
// MS InlineAsm identifier
// Call parseIdentifier() to combine @ with the identifier behind it.
if (TK == AsmToken::At && Parser.parseIdentifier(Identifier))
return Error(IdentLoc, "expected identifier");
if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, false, End))
return true;
else if (SM.onIdentifierExpr(Val, Identifier, Info, true, ErrMsg))
return Error(IdentLoc, ErrMsg);
break;
}
case AsmToken::Integer: {
// Look for 'b' or 'f' following an Integer as a directional label
SMLoc Loc = getTok().getLoc();
int64_t IntVal = getTok().getIntVal();
End = consumeToken();
UpdateLocLex = false;
if (getLexer().getKind() == AsmToken::Identifier) {
StringRef IDVal = getTok().getString();
if (IDVal == "f" || IDVal == "b") {
MCSymbol *Sym =
getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
const MCExpr *Val =
MCSymbolRefExpr::create(Sym, Variant, getContext());
if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "invalid reference to undefined symbol");
StringRef Identifier = Sym->getName();
InlineAsmIdentifierInfo Info;
if (SM.onIdentifierExpr(Val, Identifier, Info,
isParsingInlineAsm(), ErrMsg))
return Error(Loc, ErrMsg);
End = consumeToken();
} else {
if (SM.onInteger(IntVal, ErrMsg))
return Error(Loc, ErrMsg);
}
} else {
if (SM.onInteger(IntVal, ErrMsg))
return Error(Loc, ErrMsg);
}
break;
}
case AsmToken::Plus:
if (SM.onPlus(ErrMsg))
return Error(getTok().getLoc(), ErrMsg);
break;
case AsmToken::Minus:
if (SM.onMinus(ErrMsg))
return Error(getTok().getLoc(), ErrMsg);
break;
case AsmToken::Tilde: SM.onNot(); break;
case AsmToken::Star: SM.onStar(); break;
case AsmToken::Slash: SM.onDivide(); break;
case AsmToken::Percent: SM.onMod(); break;
case AsmToken::Pipe: SM.onOr(); break;
case AsmToken::Caret: SM.onXor(); break;
case AsmToken::Amp: SM.onAnd(); break;
case AsmToken::LessLess:
SM.onLShift(); break;
case AsmToken::GreaterGreater:
SM.onRShift(); break;
case AsmToken::LBrac:
if (SM.onLBrac())
return Error(Tok.getLoc(), "unexpected bracket encountered");
break;
case AsmToken::RBrac:
if (SM.onRBrac())
return Error(Tok.getLoc(), "unexpected bracket encountered");
break;
case AsmToken::LParen: SM.onLParen(); break;
case AsmToken::RParen: SM.onRParen(); break;
}
if (SM.hadError())
return Error(Tok.getLoc(), "unknown token in expression");
if (!Done && UpdateLocLex)
End = consumeToken();
PrevTK = TK;
}
return false;
}
void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start, SMLoc End) {
SMLoc Loc = Start;
unsigned ExprLen = End.getPointer() - Start.getPointer();
// Skip everything before a symbol displacement (if we have one)
if (SM.getSym()) {
StringRef SymName = SM.getSymName();
if (unsigned Len = SymName.data() - Start.getPointer())
InstInfo->AsmRewrites->emplace_back(AOK_Skip, Start, Len);
Loc = SMLoc::getFromPointer(SymName.data() + SymName.size());
ExprLen = End.getPointer() - (SymName.data() + SymName.size());
// If we have only a symbol than there's no need for complex rewrite,
// simply skip everything after it
if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
if (ExprLen)
InstInfo->AsmRewrites->emplace_back(AOK_Skip, Loc, ExprLen);
return;
}
}
// Build an Intel Expression rewrite
StringRef BaseRegStr;
StringRef IndexRegStr;
if (SM.getBaseReg())
BaseRegStr = X86IntelInstPrinter::getRegisterName(SM.getBaseReg());
if (SM.getIndexReg())
IndexRegStr = X86IntelInstPrinter::getRegisterName(SM.getIndexReg());
// Emit it
IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), SM.getImm(), SM.isMemExpr());
InstInfo->AsmRewrites->emplace_back(Loc, ExprLen, Expr);
}
// Inline assembly may use variable names with namespace alias qualifiers.
bool X86AsmParser::ParseIntelInlineAsmIdentifier(const MCExpr *&Val,
StringRef &Identifier,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedOperand,
SMLoc &End) {
MCAsmParser &Parser = getParser();
assert(isParsingInlineAsm() && "Expected to be parsing inline assembly.");
Val = nullptr;
StringRef LineBuf(Identifier.data());
SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
const AsmToken &Tok = Parser.getTok();
SMLoc Loc = Tok.getLoc();
// Advance the token stream until the end of the current token is
// after the end of what the frontend claimed.
const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
do {
End = Tok.getEndLoc();
getLexer().Lex();
} while (End.getPointer() < EndPtr);
Identifier = LineBuf;
// The frontend should end parsing on an assembler token boundary, unless it
// failed parsing.
assert((End.getPointer() == EndPtr ||
Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) &&
"frontend claimed part of a token?");
// If the identifier lookup was unsuccessful, assume that we are dealing with
// a label.
if (Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) {
StringRef InternalName =
SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
Loc, false);
assert(InternalName.size() && "We should have an internal name here.");
// Push a rewrite for replacing the identifier name with the internal name.
InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
InternalName);
} else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
return false;
// Create the symbol reference.
MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
Val = MCSymbolRefExpr::create(Sym, Variant, getParser().getContext());
return false;
}
//ParseRoundingModeOp - Parse AVX-512 rounding mode operand
std::unique_ptr<X86Operand>
X86AsmParser::ParseRoundingModeOp(SMLoc Start) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
// Eat "{" and mark the current place.
const SMLoc consumedToken = consumeToken();
if (Tok.getIdentifier().startswith("r")){
int rndMode = StringSwitch<int>(Tok.getIdentifier())
.Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
.Case("rd", X86::STATIC_ROUNDING::TO_NEG_INF)
.Case("ru", X86::STATIC_ROUNDING::TO_POS_INF)
.Case("rz", X86::STATIC_ROUNDING::TO_ZERO)
.Default(-1);
if (-1 == rndMode)
return ErrorOperand(Tok.getLoc(), "Invalid rounding mode.");
Parser.Lex(); // Eat "r*" of r*-sae
if (!getLexer().is(AsmToken::Minus))
return ErrorOperand(Tok.getLoc(), "Expected - at this point");
Parser.Lex(); // Eat "-"
Parser.Lex(); // Eat the sae
if (!getLexer().is(AsmToken::RCurly))
return ErrorOperand(Tok.getLoc(), "Expected } at this point");
SMLoc End = Tok.getEndLoc();
Parser.Lex(); // Eat "}"
const MCExpr *RndModeOp =
MCConstantExpr::create(rndMode, Parser.getContext());
return X86Operand::CreateImm(RndModeOp, Start, End);
}
if(Tok.getIdentifier().equals("sae")){
Parser.Lex(); // Eat the sae
if (!getLexer().is(AsmToken::RCurly))
return ErrorOperand(Tok.getLoc(), "Expected } at this point");
Parser.Lex(); // Eat "}"
return X86Operand::CreateToken("{sae}", consumedToken);
}
return ErrorOperand(Tok.getLoc(), "unknown token in expression");
}
/// Parse the '.' operator.
bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End) {
const AsmToken &Tok = getTok();
unsigned Offset;
// Drop the optional '.'.
StringRef DotDispStr = Tok.getString();
if (DotDispStr.startswith("."))
DotDispStr = DotDispStr.drop_front(1);
// .Imm gets lexed as a real.
if (Tok.is(AsmToken::Real)) {
APInt DotDisp;
DotDispStr.getAsInteger(10, DotDisp);
Offset = DotDisp.getZExtValue();
} else if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
Offset))
return Error(Tok.getLoc(), "Unable to lookup field reference!");
} else
return Error(Tok.getLoc(), "Unexpected token type!");
// Eat the DotExpression and update End
End = SMLoc::getFromPointer(DotDispStr.data());
const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
while (Tok.getLoc().getPointer() < DotExprEndLoc)
Lex();
SM.addImm(Offset);
return false;
}
/// Parse the 'offset' operator. This operator is used to specify the
/// location rather then the content of a variable.
std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOffsetOfOperator() {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
SMLoc OffsetOfLoc = Tok.getLoc();
Parser.Lex(); // Eat offset.
const MCExpr *Val;
InlineAsmIdentifierInfo Info;
SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString();
if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
/*Unevaluated=*/false, End))
return nullptr;
void *Decl = nullptr;
// FIXME: MS evaluates "offset <Constant>" to the underlying integral
if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
return ErrorOperand(Start, "offset operator cannot yet handle constants");
else if (Info.isKind(InlineAsmIdentifierInfo::IK_Var))
Decl = Info.Var.Decl;
// Don't emit the offset operator.
InstInfo->AsmRewrites->emplace_back(AOK_Skip, OffsetOfLoc, 7);
// The offset operator will have an 'r' constraint, thus we need to create
// register operand to ensure proper matching. Just pick a GPR based on
// the size of a pointer.
bool Parse32 = is32BitMode() || Code16GCC;
unsigned RegNo = is64BitMode() ? X86::RBX : (Parse32 ? X86::EBX : X86::BX);
return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
OffsetOfLoc, Identifier, Decl);
}
// Query a candidate string for being an Intel assembly operator
// Report back its kind, or IOK_INVALID if does not evaluated as a known one
unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
return StringSwitch<unsigned>(Name)
.Cases("TYPE","type",IOK_TYPE)
.Cases("SIZE","size",IOK_SIZE)
.Cases("LENGTH","length",IOK_LENGTH)
.Cases("OFFSET","offset",IOK_OFFSET)
.Default(IOK_INVALID);
}
/// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
/// returns the number of elements in an array. It returns the value 1 for
/// non-array variables. The SIZE operator returns the size of a C or C++
/// variable. A variable's size is the product of its LENGTH and TYPE. The
/// TYPE operator returns the size of a C or C++ type or variable. If the
/// variable is an array, TYPE returns the size of a single element.
unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
Parser.Lex(); // Eat operator.
const MCExpr *Val = nullptr;
InlineAsmIdentifierInfo Info;
SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString();
if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
/*Unevaluated=*/true, End))
return 0;
if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
Error(Start, "unable to lookup expression");
return 0;
}
unsigned CVal = 0;
switch(OpKind) {
default: llvm_unreachable("Unexpected operand kind!");
case IOK_LENGTH: CVal = Info.Var.Length; break;
case IOK_SIZE: CVal = Info.Var.Size; break;
case IOK_TYPE: CVal = Info.Var.Type; break;
}
return CVal;
}
bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size) {
Size = StringSwitch<unsigned>(getTok().getString())
.Cases("BYTE", "byte", 8)
.Cases("WORD", "word", 16)
.Cases("DWORD", "dword", 32)
.Cases("FLOAT", "float", 32)
.Cases("LONG", "long", 32)
.Cases("FWORD", "fword", 48)
.Cases("DOUBLE", "double", 64)
.Cases("QWORD", "qword", 64)
.Cases("MMWORD","mmword", 64)
.Cases("XWORD", "xword", 80)
.Cases("TBYTE", "tbyte", 80)
.Cases("XMMWORD", "xmmword", 128)
.Cases("YMMWORD", "ymmword", 256)
.Cases("ZMMWORD", "zmmword", 512)
.Default(0);
if (Size) {
const AsmToken &Tok = Lex(); // Eat operand size (e.g., byte, word).
if (!(Tok.getString().equals("PTR") || Tok.getString().equals("ptr")))
return Error(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
Lex(); // Eat ptr.
}
return false;
}
std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
SMLoc Start, End;
// FIXME: Offset operator
// Should be handled as part of immediate expression, as other operators
// Currently, only supported as a stand-alone operand
if (isParsingInlineAsm())
if (IdentifyIntelInlineAsmOperator(Tok.getString()) == IOK_OFFSET)
return ParseIntelOffsetOfOperator();
// Parse optional Size directive.
unsigned Size;
if (ParseIntelMemoryOperandSize(Size))
return nullptr;
bool PtrInOperand = bool(Size);
Start = Tok.getLoc();
// Rounding mode operand.
if (getLexer().is(AsmToken::LCurly))
return ParseRoundingModeOp(Start);
// Register operand.
unsigned RegNo = 0;
if (Tok.is(AsmToken::Identifier) && !ParseRegister(RegNo, Start, End)) {
if (RegNo == X86::RIP)
return ErrorOperand(Start, "rip can only be used as a base register");
// A Register followed by ':' is considered a segment override
if (Tok.isNot(AsmToken::Colon))
return !PtrInOperand ? X86Operand::CreateReg(RegNo, Start, End) :
ErrorOperand(Start, "expected memory operand after 'ptr', "
"found register operand instead");
// An alleged segment override. check if we have a valid segment register
if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
return ErrorOperand(Start, "invalid segment register");
// Eat ':' and update Start location
Start = Lex().getLoc();
}
// Immediates and Memory
IntelExprStateMachine SM;
if (ParseIntelExpression(SM, End))
return nullptr;
if (isParsingInlineAsm())
RewriteIntelExpression(SM, Start, Tok.getLoc());
int64_t Imm = SM.getImm();
const MCExpr *Disp = SM.getSym();
const MCExpr *ImmDisp = MCConstantExpr::create(Imm, getContext());
if (Disp && Imm)
Disp = MCBinaryExpr::createAdd(Disp, ImmDisp, getContext());
if (!Disp)
Disp = ImmDisp;
// RegNo != 0 specifies a valid segment register,
// and we are parsing a segment override
if (!SM.isMemExpr() && !RegNo)
return X86Operand::CreateImm(Disp, Start, End);
StringRef ErrMsg;
unsigned BaseReg = SM.getBaseReg();
unsigned IndexReg = SM.getIndexReg();
unsigned Scale = SM.getScale();
if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
(IndexReg == X86::ESP || IndexReg == X86::RSP))
std::swap(BaseReg, IndexReg);
// If BaseReg is a vector register and IndexReg is not, swap them unless
// Scale was specified in which case it would be an error.
if (Scale == 0 &&
!(X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg)) &&
(X86MCRegisterClasses[X86::VR128XRegClassID].contains(BaseReg) ||
X86MCRegisterClasses[X86::VR256XRegClassID].contains(BaseReg) ||
X86MCRegisterClasses[X86::VR512RegClassID].contains(BaseReg)))
std::swap(BaseReg, IndexReg);
if (Scale != 0 &&
X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg))
return ErrorOperand(Start, "16-bit addresses cannot have a scale");
// If there was no explicit scale specified, change it to 1.
if (Scale == 0)
Scale = 1;
// If this is a 16-bit addressing mode with the base and index in the wrong
// order, swap them so CheckBaseRegAndIndexRegAndScale doesn't fail. It is
// shared with att syntax where order matters.
if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
(IndexReg == X86::BX || IndexReg == X86::BP))
std::swap(BaseReg, IndexReg);
if ((BaseReg || IndexReg) &&
CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
ErrMsg))
return ErrorOperand(Start, ErrMsg);
if (isParsingInlineAsm())
return CreateMemForInlineAsm(RegNo, Disp, BaseReg, IndexReg,
Scale, Start, End, Size, SM.getSymName(),
SM.getIdentifierInfo());
if (!(BaseReg || IndexReg || RegNo))
return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size);
return X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
BaseReg, IndexReg, Scale, Start, End, Size);
}
std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
MCAsmParser &Parser = getParser();
switch (getLexer().getKind()) {
case AsmToken::Dollar: {
// $42 or $ID -> immediate.
SMLoc Start = Parser.getTok().getLoc(), End;
Parser.Lex();
const MCExpr *Val;
// This is an immediate, so we should not parse a register. Do a precheck
// for '%' to supercede intra-register parse errors.
SMLoc L = Parser.getTok().getLoc();
if (check(getLexer().is(AsmToken::Percent), L,
"expected immediate expression") ||
getParser().parseExpression(Val, End) ||
check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
return nullptr;
return X86Operand::CreateImm(Val, Start, End);
}
case AsmToken::LCurly: {
SMLoc Start = Parser.getTok().getLoc();
return ParseRoundingModeOp(Start);
}
default: {
// This a memory operand or a register. We have some parsing complications
// as a '(' may be part of an immediate expression or the addressing mode
// block. This is complicated by the fact that an assembler-level variable
// may refer either to a register or an immediate expression.
SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
const MCExpr *Expr = nullptr;
unsigned Reg = 0;
if (getLexer().isNot(AsmToken::LParen)) {
// No '(' so this is either a displacement expression or a register.
if (Parser.parseExpression(Expr, EndLoc))
return nullptr;
if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
// Segment Register. Reset Expr and copy value to register.
Expr = nullptr;
Reg = RE->getRegNo();
// Sanity check register.
if (Reg == X86::EIZ || Reg == X86::RIZ)
return ErrorOperand(
Loc, "%eiz and %riz can only be used as index registers",
SMRange(Loc, EndLoc));
if (Reg == X86::RIP)
return ErrorOperand(Loc, "%rip can only be used as a base register",
SMRange(Loc, EndLoc));
// Return register that are not segment prefixes immediately.
if (!Parser.parseOptionalToken(AsmToken::Colon))
return X86Operand::CreateReg(Reg, Loc, EndLoc);
if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
return ErrorOperand(Loc, "invalid segment register");
}
}
// This is a Memory operand.
return ParseMemOperand(Reg, Expr, Loc, EndLoc);
}
}
}
// true on failure, false otherwise
// If no {z} mark was found - Parser doesn't advance
bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc) {
MCAsmParser &Parser = getParser();
// Assuming we are just pass the '{' mark, quering the next token
// Searched for {z}, but none was found. Return false, as no parsing error was
// encountered
if (!(getLexer().is(AsmToken::Identifier) &&
(getLexer().getTok().getIdentifier() == "z")))
return false;
Parser.Lex(); // Eat z
// Query and eat the '}' mark
if (!getLexer().is(AsmToken::RCurly))
return Error(getLexer().getLoc(), "Expected } at this point");
Parser.Lex(); // Eat '}'
// Assign Z with the {z} mark opernad
Z = X86Operand::CreateToken("{z}", StartLoc);
return false;
}
// true on failure, false otherwise
bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands,
const MCParsedAsmOperand &Op) {
MCAsmParser &Parser = getParser();
if (getLexer().is(AsmToken::LCurly)) {
// Eat "{" and mark the current place.
const SMLoc consumedToken = consumeToken();
// Distinguish {1to<NUM>} from {%k<NUM>}.
if(getLexer().is(AsmToken::Integer)) {
// Parse memory broadcasting ({1to<NUM>}).
if (getLexer().getTok().getIntVal() != 1)
return TokError("Expected 1to<NUM> at this point");
Parser.Lex(); // Eat "1" of 1to8
if (!getLexer().is(AsmToken::Identifier) ||
!getLexer().getTok().getIdentifier().startswith("to"))
return TokError("Expected 1to<NUM> at this point");
// Recognize only reasonable suffixes.
const char *BroadcastPrimitive =
StringSwitch<const char*>(getLexer().getTok().getIdentifier())
.Case("to2", "{1to2}")
.Case("to4", "{1to4}")
.Case("to8", "{1to8}")
.Case("to16", "{1to16}")
.Default(nullptr);
if (!BroadcastPrimitive)
return TokError("Invalid memory broadcast primitive.");
Parser.Lex(); // Eat "toN" of 1toN
if (!getLexer().is(AsmToken::RCurly))
return TokError("Expected } at this point");
Parser.Lex(); // Eat "}"
Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
consumedToken));
// No AVX512 specific primitives can pass
// after memory broadcasting, so return.
return false;
} else {
// Parse either {k}{z}, {z}{k}, {k} or {z}
// last one have no meaning, but GCC accepts it
// Currently, we're just pass a '{' mark
std::unique_ptr<X86Operand> Z;
if (ParseZ(Z, consumedToken))
return true;
// Reaching here means that parsing of the allegadly '{z}' mark yielded
// no errors.
// Query for the need of further parsing for a {%k<NUM>} mark
if (!Z || getLexer().is(AsmToken::LCurly)) {
SMLoc StartLoc = Z ? consumeToken() : consumedToken;
// Parse an op-mask register mark ({%k<NUM>}), which is now to be
// expected
unsigned RegNo;
SMLoc RegLoc;
if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
X86MCRegisterClasses[X86::VK1RegClassID].contains(RegNo)) {
if (RegNo == X86::K0)
return Error(RegLoc, "Register k0 can't be used as write mask");
if (!getLexer().is(AsmToken::RCurly))
return Error(getLexer().getLoc(), "Expected } at this point");
Operands.push_back(X86Operand::CreateToken("{", StartLoc));
Operands.push_back(
X86Operand::CreateReg(RegNo, StartLoc, StartLoc));
Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
} else
return Error(getLexer().getLoc(),
"Expected an op-mask register at this point");
// {%k<NUM>} mark is found, inquire for {z}
if (getLexer().is(AsmToken::LCurly) && !Z) {
// Have we've found a parsing error, or found no (expected) {z} mark
// - report an error
if (ParseZ(Z, consumeToken()) || !Z)
return Error(getLexer().getLoc(),
"Expected a {z} mark at this point");
}
// '{z}' on its own is meaningless, hence should be ignored.
// on the contrary - have it been accompanied by a K register,
// allow it.
if (Z)
Operands.push_back(std::move(Z));
}
}
}
return false;
}
/// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'. The '%ds:' prefix
/// has already been parsed if present. disp may be provided as well.
std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
const MCExpr *&Disp,
const SMLoc &StartLoc,
SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
SMLoc Loc;
// Based on the initial passed values, we may be in any of these cases, we are
// in one of these cases (with current position (*)):
// 1. seg : * disp (base-index-scale-expr)
// 2. seg : *(disp) (base-index-scale-expr)
// 3. seg : *(base-index-scale-expr)
// 4. disp *(base-index-scale-expr)
// 5. *(disp) (base-index-scale-expr)
// 6. *(base-index-scale-expr)
// 7. disp *
// 8. *(disp)
// If we do not have an displacement yet, check if we're in cases 4 or 6 by
// checking if the first object after the parenthesis is a register (or an
// identifier referring to a register) and parse the displacement or default
// to 0 as appropriate.
auto isAtMemOperand = [this]() {
if (this->getLexer().isNot(AsmToken::LParen))
return false;
AsmToken Buf[2];
StringRef Id;
auto TokCount = this->getLexer().peekTokens(Buf, true);
if (TokCount == 0)
return false;
switch (Buf[0].getKind()) {
case AsmToken::Percent:
case AsmToken::Comma:
return true;
// These lower cases are doing a peekIdentifier.
case AsmToken::At:
case AsmToken::Dollar:
if ((TokCount > 1) &&
(Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
(Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
Id = StringRef(Buf[0].getLoc().getPointer(),
Buf[1].getIdentifier().size() + 1);
break;
case AsmToken::Identifier:
case AsmToken::String:
Id = Buf[0].getIdentifier();
break;
default:
return false;
}
// We have an ID. Check if it is bound to a register.
if (!Id.empty()) {
MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
if (Sym->isVariable()) {
auto V = Sym->getVariableValue(/*SetUsed*/ false);
return isa<X86MCExpr>(V);
}
}
return false;
};
if (!Disp) {
// Parse immediate if we're not at a mem operand yet.
if (!isAtMemOperand()) {
if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
return nullptr;
assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
} else {
// Disp is implicitly zero if we haven't parsed it yet.
Disp = MCConstantExpr::create(0, Parser.getContext());
}
}
// We are now either at the end of the operand or at the '(' at the start of a
// base-index-scale-expr.
if (!parseOptionalToken(AsmToken::LParen)) {
if (SegReg == 0)
return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
StartLoc, EndLoc);
}
// If we reached here, then eat the '(' and Process
// the rest of the memory operand.
unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
SMLoc BaseLoc = getLexer().getLoc();
const MCExpr *E;
StringRef ErrMsg;
// Parse BaseReg if one is provided.
if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
if (Parser.parseExpression(E, EndLoc) ||
check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
return nullptr;
// Sanity check register.
BaseReg = cast<X86MCExpr>(E)->getRegNo();
if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
return ErrorOperand(BaseLoc,
"eiz and riz can only be used as index registers",
SMRange(BaseLoc, EndLoc));
}
if (parseOptionalToken(AsmToken::Comma)) {
// Following the comma we should have either an index register, or a scale
// value. We don't support the later form, but we want to parse it
// correctly.
//
// Even though it would be completely consistent to support syntax like
// "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
if (getLexer().isNot(AsmToken::RParen)) {
if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
return nullptr;
if (!isa<X86MCExpr>(E)) {
// We've parsed an unexpected Scale Value instead of an index
// register. Interpret it as an absolute.
int64_t ScaleVal;
if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
return ErrorOperand(Loc, "expected absolute expression");
if (ScaleVal != 1)
Warning(Loc, "scale factor without index register is ignored");
Scale = 1;
} else { // IndexReg Found.
IndexReg = cast<X86MCExpr>(E)->getRegNo();
if (BaseReg == X86::RIP)
return ErrorOperand(
Loc, "%rip as base register can not have an index register");
if (IndexReg == X86::RIP)
return ErrorOperand(Loc, "%rip is not allowed as an index register");
if (parseOptionalToken(AsmToken::Comma)) {
// Parse the scale amount:
// ::= ',' [scale-expression]
// A scale amount without an index is ignored.
if (getLexer().isNot(AsmToken::RParen)) {
int64_t ScaleVal;
if (Parser.parseTokenLoc(Loc) ||
Parser.parseAbsoluteExpression(ScaleVal))
return ErrorOperand(Loc, "expected scale expression");
Scale = (unsigned)ScaleVal;
// Validate the scale amount.
if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
Scale != 1)
return ErrorOperand(Loc,
"scale factor in 16-bit address must be 1");
if (checkScale(Scale, ErrMsg))
return ErrorOperand(Loc, ErrMsg);
}
}
}
}
}
// Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
return nullptr;
// This is to support otherwise illegal operand (%dx) found in various
// unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
// be supported. Mark such DX variants separately fix only in special cases.
if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
isa<MCConstantExpr>(Disp) && cast<MCConstantExpr>(Disp)->getValue() == 0)
return X86Operand::CreateDXReg(BaseLoc, BaseLoc);
if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
ErrMsg))
return ErrorOperand(BaseLoc, ErrMsg);
if (SegReg || BaseReg || IndexReg)
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
IndexReg, Scale, StartLoc, EndLoc);
return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
}
// Parse either a standard primary expression or a register.
bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
// See if this is a register first.
if (getTok().is(AsmToken::Percent) ||
(isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
MatchRegisterName(Parser.getTok().getString()))) {
SMLoc StartLoc = Parser.getTok().getLoc();
unsigned RegNo;
if (ParseRegister(RegNo, StartLoc, EndLoc))
return true;
Res = X86MCExpr::create(RegNo, Parser.getContext());
return false;
}
return Parser.parsePrimaryExpr(Res, EndLoc);
}
bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
InstInfo = &Info;
StringRef PatchedName = Name;
if ((Name.equals("jmp") || Name.equals("jc") || Name.equals("jz")) &&
isParsingIntelSyntax() && isParsingInlineAsm()) {
StringRef NextTok = Parser.getTok().getString();
if (NextTok == "short") {
SMLoc NameEndLoc =
NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
// Eat the short keyword
Parser.Lex();
// MS ignores the short keyword, it determines the jmp type based
// on the distance of the label
InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
NextTok.size() + 1);
}
}
// FIXME: Hack to recognize setneb as setne.
if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
PatchedName != "setb" && PatchedName != "setnb")
PatchedName = PatchedName.substr(0, Name.size()-1);
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
bool IsVCMP = PatchedName[0] == 'v';
unsigned CCIdx = IsVCMP ? 4 : 3;
unsigned ComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(CCIdx, PatchedName.size() - 2))
.Case("eq", 0x00)
.Case("eq_oq", 0x00)
.Case("lt", 0x01)
.Case("lt_os", 0x01)
.Case("le", 0x02)
.Case("le_os", 0x02)
.Case("unord", 0x03)
.Case("unord_q", 0x03)
.Case("neq", 0x04)
.Case("neq_uq", 0x04)
.Case("nlt", 0x05)
.Case("nlt_us", 0x05)
.Case("nle", 0x06)
.Case("nle_us", 0x06)
.Case("ord", 0x07)
.Case("ord_q", 0x07)
/* AVX only from here */
.Case("eq_uq", 0x08)
.Case("nge", 0x09)
.Case("nge_us", 0x09)
.Case("ngt", 0x0A)
.Case("ngt_us", 0x0A)
.Case("false", 0x0B)
.Case("false_oq", 0x0B)
.Case("neq_oq", 0x0C)
.Case("ge", 0x0D)
.Case("ge_os", 0x0D)
.Case("gt", 0x0E)
.Case("gt_os", 0x0E)
.Case("true", 0x0F)
.Case("true_uq", 0x0F)
.Case("eq_os", 0x10)
.Case("lt_oq", 0x11)
.Case("le_oq", 0x12)
.Case("unord_s", 0x13)
.Case("neq_us", 0x14)
.Case("nlt_uq", 0x15)
.Case("nle_uq", 0x16)
.Case("ord_s", 0x17)
.Case("eq_us", 0x18)
.Case("nge_uq", 0x19)
.Case("ngt_uq", 0x1A)
.Case("false_os", 0x1B)
.Case("neq_os", 0x1C)
.Case("ge_oq", 0x1D)
.Case("gt_oq", 0x1E)
.Case("true_us", 0x1F)
.Default(~0U);
if (ComparisonCode != ~0U && (IsVCMP || ComparisonCode < 8)) {
Operands.push_back(X86Operand::CreateToken(PatchedName.slice(0, CCIdx),
NameLoc));
const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
getParser().getContext());
Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
PatchedName = PatchedName.substr(PatchedName.size() - 2);
}
}
// FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
if (PatchedName.startswith("vpcmp") &&
(PatchedName.endswith("b") || PatchedName.endswith("w") ||
PatchedName.endswith("d") || PatchedName.endswith("q"))) {
unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
unsigned ComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(5, PatchedName.size() - CCIdx))
.Case("eq", 0x0) // Only allowed on unsigned. Checked below.
.Case("lt", 0x1)
.Case("le", 0x2)
//.Case("false", 0x3) // Not a documented alias.
.Case("neq", 0x4)
.Case("nlt", 0x5)
.Case("nle", 0x6)
//.Case("true", 0x7) // Not a documented alias.
.Default(~0U);
if (ComparisonCode != ~0U && (ComparisonCode != 0 || CCIdx == 2)) {
Operands.push_back(X86Operand::CreateToken("vpcmp", NameLoc));
const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
getParser().getContext());
Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
}
}
// FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
if (PatchedName.startswith("vpcom") &&
(PatchedName.endswith("b") || PatchedName.endswith("w") ||
PatchedName.endswith("d") || PatchedName.endswith("q"))) {
unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
unsigned ComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(5, PatchedName.size() - CCIdx))
.Case("lt", 0x0)
.Case("le", 0x1)
.Case("gt", 0x2)
.Case("ge", 0x3)
.Case("eq", 0x4)
.Case("neq", 0x5)
.Case("false", 0x6)
.Case("true", 0x7)
.Default(~0U);
if (ComparisonCode != ~0U) {
Operands.push_back(X86Operand::CreateToken("vpcom", NameLoc));
const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
getParser().getContext());
Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
}
}
// Determine whether this is an instruction prefix.
// FIXME:
// Enhance prefixes integrity robustness. for example, following forms
// are currently tolerated:
// repz repnz <insn> ; GAS errors for the use of two similar prefixes
// lock addq %rax, %rbx ; Destination operand must be of memory type
// xacquire <insn> ; xacquire must be accompanied by 'lock'
bool isPrefix = StringSwitch<bool>(Name)
.Cases("rex64", "data32", "data16", true)
.Cases("xacquire", "xrelease", true)
.Cases("acquire", "release", isParsingIntelSyntax())
.Default(false);
auto isLockRepeatNtPrefix = [](StringRef N) {
return StringSwitch<bool>(N)
.Cases("lock", "rep", "repe", "repz", "repne", "repnz", "notrack", true)
.Default(false);
};
bool CurlyAsEndOfStatement = false;
unsigned Flags = X86::IP_NO_PREFIX;
while (isLockRepeatNtPrefix(Name.lower())) {
unsigned Prefix =
StringSwitch<unsigned>(Name)
.Cases("lock", "lock", X86::IP_HAS_LOCK)
.Cases("rep", "repe", "repz", X86::IP_HAS_REPEAT)
.Cases("repne", "repnz", X86::IP_HAS_REPEAT_NE)
.Cases("notrack", "notrack", X86::IP_HAS_NOTRACK)
.Default(X86::IP_NO_PREFIX); // Invalid prefix (impossible)
Flags |= Prefix;
if (getLexer().is(AsmToken::EndOfStatement)) {
// We don't have real instr with the given prefix
// let's use the prefix as the instr.
// TODO: there could be several prefixes one after another
Flags = X86::IP_NO_PREFIX;
break;
}
Name = Parser.getTok().getString();
Parser.Lex(); // eat the prefix
// Hack: we could have something like "rep # some comment" or
// "lock; cmpxchg16b $1" or "lock\0A\09incl" or "lock/incl"
while (Name.startswith(";") || Name.startswith("\n") ||
Name.startswith("#") || Name.startswith("\t") ||
Name.startswith("/")) {
Name = Parser.getTok().getString();
Parser.Lex(); // go to next prefix or instr
}
}
if (Flags)
PatchedName = Name;
// Hacks to handle 'data16' and 'data32'
if (PatchedName == "data16" && is16BitMode()) {
return Error(NameLoc, "redundant data16 prefix");
}
if (PatchedName == "data32") {
if (is32BitMode())
return Error(NameLoc, "redundant data32 prefix");
if (is64BitMode())
return Error(NameLoc, "'data32' is not supported in 64-bit mode");
// Hack to 'data16' for the table lookup.
PatchedName = "data16";
}
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
// This does the actual operand parsing. Don't parse any more if we have a
// prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
// just want to parse the "lock" as the first instruction and the "incl" as
// the next one.
if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
// Parse '*' modifier.
if (getLexer().is(AsmToken::Star))
Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
// Read the operands.
while(1) {
if (std::unique_ptr<X86Operand> Op = ParseOperand()) {
Operands.push_back(std::move(Op));
if (HandleAVX512Operand(Operands, *Operands.back()))
return true;
} else {
return true;
}
// check for comma and eat it
if (getLexer().is(AsmToken::Comma))
Parser.Lex();
else
break;
}
// In MS inline asm curly braces mark the beginning/end of a block,
// therefore they should be interepreted as end of statement
CurlyAsEndOfStatement =
isParsingIntelSyntax() && isParsingInlineAsm() &&
(getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
return TokError("unexpected token in argument list");
}
// Consume the EndOfStatement or the prefix separator Slash
if (getLexer().is(AsmToken::EndOfStatement) ||
(isPrefix && getLexer().is(AsmToken::Slash)))
Parser.Lex();
else if (CurlyAsEndOfStatement)
// Add an actual EndOfStatement before the curly brace
Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
getLexer().getTok().getLoc(), 0);
// This is for gas compatibility and cannot be done in td.
// Adding "p" for some floating point with no argument.
// For example: fsub --> fsubp
bool IsFp =
Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
if (IsFp && Operands.size() == 1) {
const char *Repl = StringSwitch<const char *>(Name)
.Case("fsub", "fsubp")
.Case("fdiv", "fdivp")
.Case("fsubr", "fsubrp")
.Case("fdivr", "fdivrp");
static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
}
// Moving a 32 or 16 bit value into a segment register has the same
// behavior. Modify such instructions to always take shorter form.
if ((Name == "mov" || Name == "movw" || Name == "movl") &&
(Operands.size() == 3)) {
X86Operand &Op1 = (X86Operand &)*Operands[1];
X86Operand &Op2 = (X86Operand &)*Operands[2];
SMLoc Loc = Op1.getEndLoc();
if (Op1.isReg() && Op2.isReg() &&
X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
Op2.getReg()) &&
(X86MCRegisterClasses[X86::GR16RegClassID].contains(Op1.getReg()) ||
X86MCRegisterClasses[X86::GR32RegClassID].contains(Op1.getReg()))) {
// Change instruction name to match new instruction.
if (Name != "mov" && Name[3] == (is16BitMode() ? 'l' : 'w')) {
Name = is16BitMode() ? "movw" : "movl";
Operands[0] = X86Operand::CreateToken(Name, NameLoc);
}
// Select the correct equivalent 16-/32-bit source register.
unsigned Reg =
getX86SubSuperRegisterOrZero(Op1.getReg(), is16BitMode() ? 16 : 32);
Operands[1] = X86Operand::CreateReg(Reg, Loc, Loc);
}
}
// This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands.back();
if (Op.isDXReg())
Operands.back() = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
Op.getEndLoc());
}
// Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands[1];
if (Op.isDXReg())
Operands[1] = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
Op.getEndLoc());
}
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
bool HadVerifyError = false;
// Append default arguments to "ins[bwld]"
if (Name.startswith("ins") &&
(Operands.size() == 1 || Operands.size() == 3) &&
(Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
Name == "ins")) {
AddDefaultSrcDestOperands(TmpOperands,
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
DefaultMemDIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Append default arguments to "outs[bwld]"
if (Name.startswith("outs") &&
(Operands.size() == 1 || Operands.size() == 3) &&
(Name == "outsb" || Name == "outsw" || Name == "outsl" ||
Name == "outsd" || Name == "outs")) {
AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
// values of $SIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
if (Name.startswith("lods") &&
(Operands.size() == 1 || Operands.size() == 2) &&
(Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
if (Name.startswith("stos") &&
(Operands.size() == 1 || Operands.size() == 2) &&
(Name == "stos" || Name == "stosb" || Name == "stosw" ||
Name == "stosl" || Name == "stosd" || Name == "stosq")) {
TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
if (Name.startswith("scas") &&
(Operands.size() == 1 || Operands.size() == 2) &&
(Name == "scas" || Name == "scasb" || Name == "scasw" ||
Name == "scasl" || Name == "scasd" || Name == "scasq")) {
TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Add default SI and DI operands to "cmps[bwlq]".
if (Name.startswith("cmps") &&
(Operands.size() == 1 || Operands.size() == 3) &&
(Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
DefaultMemSIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Add default SI and DI operands to "movs[bwlq]".
if (((Name.startswith("movs") &&
(Name == "movs" || Name == "movsb" || Name == "movsw" ||
Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
(Name.startswith("smov") &&
(Name == "smov" || Name == "smovb" || Name == "smovw" ||
Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
(Operands.size() == 1 || Operands.size() == 3)) {
if (Name == "movsd" && Operands.size() == 1 && !isParsingIntelSyntax())
Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
DefaultMemDIOperand(NameLoc));
HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Check if we encountered an error for one the string insturctions
if (HadVerifyError) {
return HadVerifyError;
}
// FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
// "shift <op>".
if ((Name.startswith("shr") || Name.startswith("sar") ||
Name.startswith("shl") || Name.startswith("sal") ||
Name.startswith("rcl") || Name.startswith("rcr") ||
Name.startswith("rol") || Name.startswith("ror")) &&
Operands.size() == 3) {
if (isParsingIntelSyntax()) {
// Intel syntax
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[2]);
if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
Operands.pop_back();
} else {
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
Operands.erase(Operands.begin() + 1);
}
}
// Transforms "int $3" into "int3" as a size optimization. We can't write an
// instalias with an immediate operand yet.
if (Name == "int" && Operands.size() == 2) {
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
if (Op1.isImm())
if (auto *CE = dyn_cast<MCConstantExpr>(Op1.getImm()))
if (CE->getValue() == 3) {
Operands.erase(Operands.begin() + 1);
static_cast<X86Operand &>(*Operands[0]).setTokenValue("int3");
}
}
// Transforms "xlat mem8" into "xlatb"
if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
if (Op1.isMem8()) {
Warning(Op1.getStartLoc(), "memory operand is only for determining the "
"size, (R|E)BX will be used for the location");
Operands.pop_back();
static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
}
}
if (Flags)
Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
return false;
}
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
return false;
}
bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
const MCRegisterInfo *MRI = getContext().getRegisterInfo();
switch (Inst.getOpcode()) {
case X86::VGATHERDPDYrm:
case X86::VGATHERDPDrm:
case X86::VGATHERDPSYrm:
case X86::VGATHERDPSrm:
case X86::VGATHERQPDYrm:
case X86::VGATHERQPDrm:
case X86::VGATHERQPSYrm:
case X86::VGATHERQPSrm:
case X86::VPGATHERDDYrm:
case X86::VPGATHERDDrm:
case X86::VPGATHERDQYrm:
case X86::VPGATHERDQrm:
case X86::VPGATHERQDYrm:
case X86::VPGATHERQDrm:
case X86::VPGATHERQQYrm:
case X86::VPGATHERQQrm: {
unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
unsigned Mask = MRI->getEncodingValue(Inst.getOperand(1).getReg());
unsigned Index =
MRI->getEncodingValue(Inst.getOperand(3 + X86::AddrIndexReg).getReg());
if (Dest == Mask || Dest == Index || Mask == Index)
return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
"registers should be distinct");
break;
}
case X86::VGATHERDPDZ128rm:
case X86::VGATHERDPDZ256rm:
case X86::VGATHERDPDZrm:
case X86::VGATHERDPSZ128rm:
case X86::VGATHERDPSZ256rm:
case X86::VGATHERDPSZrm:
case X86::VGATHERQPDZ128rm:
case X86::VGATHERQPDZ256rm:
case X86::VGATHERQPDZrm:
case X86::VGATHERQPSZ128rm:
case X86::VGATHERQPSZ256rm:
case X86::VGATHERQPSZrm:
case X86::VPGATHERDDZ128rm:
case X86::VPGATHERDDZ256rm:
case X86::VPGATHERDDZrm:
case X86::VPGATHERDQZ128rm:
case X86::VPGATHERDQZ256rm:
case X86::VPGATHERDQZrm:
case X86::VPGATHERQDZ128rm:
case X86::VPGATHERQDZ256rm:
case X86::VPGATHERQDZrm:
case X86::VPGATHERQQZ128rm:
case X86::VPGATHERQQZ256rm:
case X86::VPGATHERQQZrm: {
unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
unsigned Index =
MRI->getEncodingValue(Inst.getOperand(4 + X86::AddrIndexReg).getReg());
if (Dest == Index)
return Warning(Ops[0]->getStartLoc(), "index and destination registers "
"should be distinct");
break;
}
case X86::V4FMADDPSrm:
case X86::V4FMADDPSrmk:
case X86::V4FMADDPSrmkz:
case X86::V4FMADDSSrm:
case X86::V4FMADDSSrmk:
case X86::V4FMADDSSrmkz:
case X86::V4FNMADDPSrm:
case X86::V4FNMADDPSrmk:
case X86::V4FNMADDPSrmkz:
case X86::V4FNMADDSSrm:
case X86::V4FNMADDSSrmk:
case X86::V4FNMADDSSrmkz:
case X86::VP4DPWSSDSrm:
case X86::VP4DPWSSDSrmk:
case X86::VP4DPWSSDSrmkz:
case X86::VP4DPWSSDrm:
case X86::VP4DPWSSDrmk:
case X86::VP4DPWSSDrmkz: {
unsigned Src2 = Inst.getOperand(Inst.getNumOperands() -
X86::AddrNumOperands - 1).getReg();
unsigned Src2Enc = MRI->getEncodingValue(Src2);
if (Src2Enc % 4 != 0) {
StringRef RegName = X86IntelInstPrinter::getRegisterName(Src2);
unsigned GroupStart = (Src2Enc / 4) * 4;
unsigned GroupEnd = GroupStart + 3;
return Warning(Ops[0]->getStartLoc(),
"source register '" + RegName + "' implicitly denotes '" +
RegName.take_front(3) + Twine(GroupStart) + "' to '" +
RegName.take_front(3) + Twine(GroupEnd) +
"' source group");
}
break;
}
}
return false;
}
static const char *getSubtargetFeatureName(uint64_t Val);
void X86AsmParser::EmitInstruction(MCInst &Inst, OperandVector &Operands,
MCStreamer &Out) {
Instrumentation->InstrumentAndEmitInstruction(
Inst, Operands, getContext(), MII, Out,
getParser().shouldPrintSchedInfo());
}
bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out, uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
if (isParsingIntelSyntax())
return MatchAndEmitIntelInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
MatchingInlineAsm);
return MatchAndEmitATTInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
MatchingInlineAsm);
}
void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
OperandVector &Operands, MCStreamer &Out,
bool MatchingInlineAsm) {
// FIXME: This should be replaced with a real .td file alias mechanism.
// Also, MatchInstructionImpl should actually *do* the EmitInstruction
// call.
const char *Repl = StringSwitch<const char *>(Op.getToken())
.Case("finit", "fninit")
.Case("fsave", "fnsave")
.Case("fstcw", "fnstcw")
.Case("fstcww", "fnstcw")
.Case("fstenv", "fnstenv")
.Case("fstsw", "fnstsw")
.Case("fstsww", "fnstsw")
.Case("fclex", "fnclex")
.Default(nullptr);
if (Repl) {
MCInst Inst;
Inst.setOpcode(X86::WAIT);
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
EmitInstruction(Inst, Operands, Out);
Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
}
}
bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
bool MatchingInlineAsm) {
assert(ErrorInfo && "Unknown missing feature!");
SmallString<126> Msg;
raw_svector_ostream OS(Msg);
OS << "instruction requires:";
uint64_t Mask = 1;
for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
if (ErrorInfo & Mask)
OS << ' ' << getSubtargetFeatureName(ErrorInfo & Mask);
Mask <<= 1;
}
return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
}
static unsigned getPrefixes(OperandVector &Operands) {
unsigned Result = 0;
X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
if (Prefix.isPrefix()) {
Result = Prefix.getPrefix();
Operands.pop_back();
}
return Result;
}
bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
assert(Op.isToken() && "Leading operand should always be a mnemonic!");
SMRange EmptyRange = None;
// First, handle aliases that expand to multiple instructions.
MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
bool WasOriginallyInvalidOperand = false;
unsigned Prefixes = getPrefixes(Operands);
MCInst Inst;
if (Prefixes)
Inst.setFlags(Prefixes);
// First, try a direct match.
switch (MatchInstruction(Operands, Inst, ErrorInfo, MatchingInlineAsm,
isParsingIntelSyntax())) {
default: llvm_unreachable("Unexpected match result!");
case Match_Success:
if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
return true;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other.
if (!MatchingInlineAsm)
while (processInstruction(Inst, Operands))
;
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
EmitInstruction(Inst, Operands, Out);
Opcode = Inst.getOpcode();
return false;
case Match_MissingFeature:
return ErrorMissingFeature(IDLoc, ErrorInfo, MatchingInlineAsm);
case Match_InvalidOperand:
WasOriginallyInvalidOperand = true;
break;
case Match_MnemonicFail:
break;
}
// FIXME: Ideally, we would only attempt suffix matches for things which are
// valid prefixes, and we could just infer the right unambiguous
// type. However, that requires substantially more matcher support than the
// following hack.
// Change the operand to point to a temporary token.
StringRef Base = Op.getToken();
SmallString<16> Tmp;
Tmp += Base;
Tmp += ' ';
Op.setTokenValue(Tmp);
// If this instruction starts with an 'f', then it is a floating point stack
// instruction. These come in up to three forms for 32-bit, 64-bit, and
// 80-bit floating point, which use the suffixes s,l,t respectively.
//
// Otherwise, we assume that this may be an integer instruction, which comes
// in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
// Check for the various suffix matches.
uint64_t ErrorInfoIgnore;
uint64_t ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
unsigned Match[4];
for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
Tmp.back() = Suffixes[I];
Match[I] = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match[I] == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
}
// Restore the old token.
Op.setTokenValue(Base);
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
unsigned NumSuccessfulMatches =
std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
EmitInstruction(Inst, Operands, Out);
Opcode = Inst.getOpcode();
return false;
}
// Otherwise, the match failed, try to produce a decent error message.
// If we had multiple suffix matches, then identify this as an ambiguous
// match.
if (NumSuccessfulMatches > 1) {
char MatchChars[4];
unsigned NumMatches = 0;
for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I)
if (Match[I] == Match_Success)
MatchChars[NumMatches++] = Suffixes[I];
SmallString<126> Msg;
raw_svector_ostream OS(Msg);
OS << "ambiguous instructions require an explicit suffix (could be ";
for (unsigned i = 0; i != NumMatches; ++i) {
if (i != 0)
OS << ", ";
if (i + 1 == NumMatches)
OS << "or ";
OS << "'" << Base << MatchChars[i] << "'";
}
OS << ")";
Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
return true;
}
// Okay, we know that none of the variants matched successfully.
// If all of the instructions reported an invalid mnemonic, then the original
// mnemonic was invalid.
if (std::count(std::begin(Match), std::end(Match), Match_MnemonicFail) == 4) {
if (!WasOriginallyInvalidOperand) {
return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
Op.getLocRange(), MatchingInlineAsm);
}
// Recover location info for the operand if we know which was the problem.
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction", EmptyRange,
MatchingInlineAsm);
X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
if (Operand.getStartLoc().isValid()) {
SMRange OperandRange = Operand.getLocRange();
return Error(Operand.getStartLoc(), "invalid operand for instruction",
OperandRange, MatchingInlineAsm);
}
}
return Error(IDLoc, "invalid operand for instruction", EmptyRange,
MatchingInlineAsm);
}
// If one instruction matched with a missing feature, report this as a
// missing feature.
if (std::count(std::begin(Match), std::end(Match),
Match_MissingFeature) == 1) {
ErrorInfo = ErrorInfoMissingFeature;
return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
MatchingInlineAsm);
}
// If one instruction matched with an invalid operand, report this as an
// operand failure.
if (std::count(std::begin(Match), std::end(Match),
Match_InvalidOperand) == 1) {
return Error(IDLoc, "invalid operand for instruction", EmptyRange,
MatchingInlineAsm);
}
// If all of these were an outright failure, report it in a useless way.
Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
EmptyRange, MatchingInlineAsm);
return true;
}
bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
assert(Op.isToken() && "Leading operand should always be a mnemonic!");
StringRef Mnemonic = Op.getToken();
SMRange EmptyRange = None;
StringRef Base = Op.getToken();
unsigned Prefixes = getPrefixes(Operands);
// First, handle aliases that expand to multiple instructions.
MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
MCInst Inst;
if (Prefixes)
Inst.setFlags(Prefixes);
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
for (const auto &Op : Operands) {
X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
if (X86Op->isMemUnsized()) {
UnsizedMemOp = X86Op;
// Have we found an unqualified memory operand,
// break. IA allows only one memory operand.
break;
}
}
// Allow some instructions to have implicitly pointer-sized operands. This is
// compatible with gas.
if (UnsizedMemOp) {
static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
for (const char *Instr : PtrSizedInstrs) {
if (Mnemonic == Instr) {
UnsizedMemOp->Mem.Size = getPointerWidth();
break;
}
}
}
SmallVector<unsigned, 8> Match;
uint64_t ErrorInfoMissingFeature = 0;
// If unsized push has immediate operand we should default the default pointer
// size for the size.
if (Mnemonic == "push" && Operands.size() == 2) {
auto *X86Op = static_cast<X86Operand *>(Operands[1].get());
if (X86Op->isImm()) {
// If it's not a constant fall through and let remainder take care of it.
const auto *CE = dyn_cast<MCConstantExpr>(X86Op->getImm());
unsigned Size = getPointerWidth();
if (CE &&
(isIntN(Size, CE->getValue()) || isUIntN(Size, CE->getValue()))) {
SmallString<16> Tmp;
Tmp += Base;
Tmp += (is64BitMode())
? "q"
: (is32BitMode()) ? "l" : (is16BitMode()) ? "w" : " ";
Op.setTokenValue(Tmp);
// Do match in ATT mode to allow explicit suffix usage.
Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
MatchingInlineAsm,
false /*isParsingIntelSyntax()*/));
Op.setTokenValue(Base);
}
}
}
// If an unsized memory operand is present, try to match with each memory
// operand size. In Intel assembly, the size is not part of the instruction
// mnemonic.
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
UnsizedMemOp->Mem.Size = Size;
uint64_t ErrorInfoIgnore;
unsigned LastOpcode = Inst.getOpcode();
unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
MatchingInlineAsm, isParsingIntelSyntax());
if (Match.empty() || LastOpcode != Inst.getOpcode())
Match.push_back(M);
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
}
// Restore the size of the unsized memory operand if we modified it.
UnsizedMemOp->Mem.Size = 0;
}
// If we haven't matched anything yet, this is not a basic integer or FPU
// operation. There shouldn't be any ambiguity in our mnemonic table, so try
// matching with the unsized operand.
if (Match.empty()) {
Match.push_back(MatchInstruction(
Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax()));
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfo;
}
// Restore the size of the unsized memory operand if we modified it.
if (UnsizedMemOp)
UnsizedMemOp->Mem.Size = 0;
// If it's a bad mnemonic, all results will be the same.
if (Match.back() == Match_MnemonicFail) {
return Error(IDLoc, "invalid instruction mnemonic '" + Mnemonic + "'",
Op.getLocRange(), MatchingInlineAsm);
}
unsigned NumSuccessfulMatches =
std::count(std::begin(Match), std::end(Match), Match_Success);
// If matching was ambiguous and we had size information from the frontend,
// try again with that. This handles cases like "movxz eax, m8/m16".
if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
UnsizedMemOp->getMemFrontendSize()) {
UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
unsigned M = MatchInstruction(
Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
if (M == Match_Success)
NumSuccessfulMatches = 1;
// Add a rewrite that encodes the size information we used from the
// frontend.
InstInfo->AsmRewrites->emplace_back(
AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
/*Len=*/0, UnsizedMemOp->getMemFrontendSize());
}
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
if (NumSuccessfulMatches == 1) {
if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
return true;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
if (!MatchingInlineAsm)
while (processInstruction(Inst, Operands))
;
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
EmitInstruction(Inst, Operands, Out);
Opcode = Inst.getOpcode();
return false;
} else if (NumSuccessfulMatches > 1) {
assert(UnsizedMemOp &&
"multiple matches only possible with unsized memory operands");
return Error(UnsizedMemOp->getStartLoc(),
"ambiguous operand size for instruction '" + Mnemonic + "\'",
UnsizedMemOp->getLocRange());
}
// If one instruction matched with a missing feature, report this as a
// missing feature.
if (std::count(std::begin(Match), std::end(Match),
Match_MissingFeature) == 1) {
ErrorInfo = ErrorInfoMissingFeature;
return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
MatchingInlineAsm);
}
// If one instruction matched with an invalid operand, report this as an
// operand failure.
if (std::count(std::begin(Match), std::end(Match),
Match_InvalidOperand) == 1) {
return Error(IDLoc, "invalid operand for instruction", EmptyRange,
MatchingInlineAsm);
}
// If all of these were an outright failure, report it in a useless way.
return Error(IDLoc, "unknown instruction mnemonic", EmptyRange,
MatchingInlineAsm);
}
bool X86AsmParser::OmitRegisterFromClobberLists(unsigned RegNo) {
return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
}
bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
MCAsmParser &Parser = getParser();
StringRef IDVal = DirectiveID.getIdentifier();
if (IDVal.startswith(".code"))
return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
else if (IDVal.startswith(".att_syntax")) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (Parser.getTok().getString() == "prefix")
Parser.Lex();
else if (Parser.getTok().getString() == "noprefix")
return Error(DirectiveID.getLoc(), "'.att_syntax noprefix' is not "
"supported: registers must have a "
"'%' prefix in .att_syntax");
}
getParser().setAssemblerDialect(0);
return false;
} else if (IDVal.startswith(".intel_syntax")) {
getParser().setAssemblerDialect(1);
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (Parser.getTok().getString() == "noprefix")
Parser.Lex();
else if (Parser.getTok().getString() == "prefix")
return Error(DirectiveID.getLoc(), "'.intel_syntax prefix' is not "
"supported: registers must not have "
"a '%' prefix in .intel_syntax");
}
return false;
} else if (IDVal == ".even")
return parseDirectiveEven(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_proc")
return parseDirectiveFPOProc(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_setframe")
return parseDirectiveFPOSetFrame(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_pushreg")
return parseDirectiveFPOPushReg(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_stackalloc")
return parseDirectiveFPOStackAlloc(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_stackalign")
return parseDirectiveFPOStackAlign(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_endprologue")
return parseDirectiveFPOEndPrologue(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_endproc")
return parseDirectiveFPOEndProc(DirectiveID.getLoc());
return true;
}
/// parseDirectiveEven
/// ::= .even
bool X86AsmParser::parseDirectiveEven(SMLoc L) {
if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
return false;
const MCSection *Section = getStreamer().getCurrentSectionOnly();
if (!Section) {
getStreamer().InitSections(false);
Section = getStreamer().getCurrentSectionOnly();
}
if (Section->UseCodeAlign())
getStreamer().EmitCodeAlignment(2, 0);
else
getStreamer().EmitValueToAlignment(2, 0, 1, 0);
return false;
}
/// ParseDirectiveCode
/// ::= .code16 | .code32 | .code64
bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
MCAsmParser &Parser = getParser();
Code16GCC = false;
if (IDVal == ".code16") {
Parser.Lex();
if (!is16BitMode()) {
SwitchMode(X86::Mode16Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
}
} else if (IDVal == ".code16gcc") {
// .code16gcc parses as if in 32-bit mode, but emits code in 16-bit mode.
Parser.Lex();
Code16GCC = true;
if (!is16BitMode()) {
SwitchMode(X86::Mode16Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
}
} else if (IDVal == ".code32") {
Parser.Lex();
if (!is32BitMode()) {
SwitchMode(X86::Mode32Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
}
} else if (IDVal == ".code64") {
Parser.Lex();
if (!is64BitMode()) {
SwitchMode(X86::Mode64Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
}
} else {
Error(L, "unknown directive " + IDVal);
return false;
}
return false;
}
// .cv_fpo_proc foo
bool X86AsmParser::parseDirectiveFPOProc(SMLoc L) {
MCAsmParser &Parser = getParser();
StringRef ProcName;
int64_t ParamsSize;
if (Parser.parseIdentifier(ProcName))
return Parser.TokError("expected symbol name");
if (Parser.parseIntToken(ParamsSize, "expected parameter byte count"))
return true;
if (!isUIntN(32, ParamsSize))
return Parser.TokError("parameters size out of range");
if (Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_proc' directive");
MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
}
// .cv_fpo_setframe ebp
bool X86AsmParser::parseDirectiveFPOSetFrame(SMLoc L) {
MCAsmParser &Parser = getParser();
unsigned Reg;
SMLoc DummyLoc;
if (ParseRegister(Reg, DummyLoc, DummyLoc) ||
Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_setframe' directive");
return getTargetStreamer().emitFPOSetFrame(Reg, L);
}
// .cv_fpo_pushreg ebx
bool X86AsmParser::parseDirectiveFPOPushReg(SMLoc L) {
MCAsmParser &Parser = getParser();
unsigned Reg;
SMLoc DummyLoc;
if (ParseRegister(Reg, DummyLoc, DummyLoc) ||
Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_pushreg' directive");
return getTargetStreamer().emitFPOPushReg(Reg, L);
}
// .cv_fpo_stackalloc 20
bool X86AsmParser::parseDirectiveFPOStackAlloc(SMLoc L) {
MCAsmParser &Parser = getParser();
int64_t Offset;
if (Parser.parseIntToken(Offset, "expected offset") ||
Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_stackalloc' directive");
return getTargetStreamer().emitFPOStackAlloc(Offset, L);
}
// .cv_fpo_stackalign 8
bool X86AsmParser::parseDirectiveFPOStackAlign(SMLoc L) {
MCAsmParser &Parser = getParser();
int64_t Offset;
if (Parser.parseIntToken(Offset, "expected offset") ||
Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_stackalign' directive");
return getTargetStreamer().emitFPOStackAlign(Offset, L);
}
// .cv_fpo_endprologue
bool X86AsmParser::parseDirectiveFPOEndPrologue(SMLoc L) {
MCAsmParser &Parser = getParser();
if (Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_endprologue' directive");
return getTargetStreamer().emitFPOEndPrologue(L);
}
// .cv_fpo_endproc
bool X86AsmParser::parseDirectiveFPOEndProc(SMLoc L) {
MCAsmParser &Parser = getParser();
if (Parser.parseEOL("unexpected tokens"))
return addErrorSuffix(" in '.cv_fpo_endproc' directive");
return getTargetStreamer().emitFPOEndProc(L);
}
// Force static initialization.
extern "C" void LLVMInitializeX86AsmParser() {
RegisterMCAsmParser<X86AsmParser> X(getTheX86_32Target());
RegisterMCAsmParser<X86AsmParser> Y(getTheX86_64Target());
}
#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#define GET_SUBTARGET_FEATURE_NAME
#include "X86GenAsmMatcher.inc"
Index: vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp (revision 344166)
@@ -1,202 +1,213 @@
//===-- X86ATTInstPrinter.cpp - AT&T assembly instruction printing --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes code for rendering MCInst instances as AT&T-style
// assembly.
//
//===----------------------------------------------------------------------===//
#include "X86ATTInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstComments.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cinttypes>
#include <cstdint>
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
// Include the auto-generated portion of the assembly writer.
#define PRINT_ALIAS_INSTR
#include "X86GenAsmWriter.inc"
void X86ATTInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
OS << markup("<reg:") << '%' << getRegisterName(RegNo) << markup(">");
}
void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
StringRef Annot, const MCSubtargetInfo &STI) {
// If verbose assembly is enabled, we can print some informative comments.
if (CommentStream)
HasCustomInstComment = EmitAnyX86InstComments(MI, *CommentStream, MII);
printInstFlags(MI, OS);
// Output CALLpcrel32 as "callq" in 64-bit mode.
// In Intel annotation it's always emitted as "call".
//
// TODO: Probably this hack should be redesigned via InstAlias in
// InstrInfo.td as soon as Requires clause is supported properly
// for InstAlias.
if (MI->getOpcode() == X86::CALLpcrel32 &&
(STI.getFeatureBits()[X86::Mode64Bit])) {
OS << "\tcallq\t";
printPCRelImm(MI, 0, OS);
}
// data16 and data32 both have the same encoding of 0x66. While data32 is
// valid only in 16 bit systems, data16 is valid in the rest.
// There seems to be some lack of support of the Requires clause that causes
// 0x66 to be interpreted as "data16" by the asm printer.
// Thus we add an adjustment here in order to print the "right" instruction.
else if (MI->getOpcode() == X86::DATA16_PREFIX &&
STI.getFeatureBits()[X86::Mode16Bit]) {
OS << "\tdata32";
}
// Try to print any aliases first.
else if (!printAliasInstr(MI, OS))
printInstruction(MI, OS);
// Next always print the annotation.
printAnnotation(OS, Annot);
}
void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
// Print immediates as signed values.
int64_t Imm = Op.getImm();
O << markup("<imm:") << '$' << formatImm(Imm) << markup(">");
// TODO: This should be in a helper function in the base class, so it can
// be used by other printers.
// If there are no instruction-specific comments, add a comment clarifying
// the hex value of the immediate operand when it isn't in the range
// [-256,255].
if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256)) {
// Don't print unnecessary hex sign bits.
if (Imm == (int16_t)(Imm))
*CommentStream << format("imm = 0x%" PRIX16 "\n", (uint16_t)Imm);
else if (Imm == (int32_t)(Imm))
*CommentStream << format("imm = 0x%" PRIX32 "\n", (uint32_t)Imm);
else
*CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Imm);
}
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << markup("<imm:") << '$';
Op.getExpr()->print(O, &MAI);
O << markup(">");
}
}
void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &BaseReg = MI->getOperand(Op + X86::AddrBaseReg);
const MCOperand &IndexReg = MI->getOperand(Op + X86::AddrIndexReg);
const MCOperand &DispSpec = MI->getOperand(Op + X86::AddrDisp);
O << markup("<mem:");
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + X86::AddrSegmentReg, O);
if (DispSpec.isImm()) {
int64_t DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
O << formatImm(DispVal);
} else {
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
DispSpec.getExpr()->print(O, &MAI);
}
if (IndexReg.getReg() || BaseReg.getReg()) {
O << '(';
if (BaseReg.getReg())
printOperand(MI, Op + X86::AddrBaseReg, O);
if (IndexReg.getReg()) {
O << ',';
printOperand(MI, Op + X86::AddrIndexReg, O);
unsigned ScaleVal = MI->getOperand(Op + X86::AddrScaleAmt).getImm();
if (ScaleVal != 1) {
O << ',' << markup("<imm:") << ScaleVal // never printed in hex.
<< markup(">");
}
}
O << ')';
}
O << markup(">");
}
void X86ATTInstPrinter::printSrcIdx(const MCInst *MI, unsigned Op,
raw_ostream &O) {
O << markup("<mem:");
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + 1, O);
O << "(";
printOperand(MI, Op, O);
O << ")";
O << markup(">");
}
void X86ATTInstPrinter::printDstIdx(const MCInst *MI, unsigned Op,
raw_ostream &O) {
O << markup("<mem:");
O << "%es:(";
printOperand(MI, Op, O);
O << ")";
O << markup(">");
}
void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &DispSpec = MI->getOperand(Op);
O << markup("<mem:");
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + 1, O);
if (DispSpec.isImm()) {
O << formatImm(DispSpec.getImm());
} else {
assert(DispSpec.isExpr() && "non-immediate displacement?");
DispSpec.getExpr()->print(O, &MAI);
}
O << markup(">");
}
void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
raw_ostream &O) {
if (MI->getOperand(Op).isExpr())
return printOperand(MI, Op, O);
O << markup("<imm:") << '$' << formatImm(MI->getOperand(Op).getImm() & 0xff)
<< markup(">");
}
+
+void X86ATTInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ unsigned Reg = Op.getReg();
+ // Override the default printing to print st(0) instead st.
+ if (Reg == X86::ST0)
+ OS << markup("<reg:") << "%st(0)" << markup(">");
+ else
+ printRegName(OS, Reg);
+}
Index: vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h (revision 344166)
@@ -1,138 +1,139 @@
//=- X86ATTInstPrinter.h - Convert X86 MCInst to assembly syntax --*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class prints an X86 MCInst to AT&T style .s file syntax.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_INSTPRINTER_X86ATTINSTPRINTER_H
#define LLVM_LIB_TARGET_X86_INSTPRINTER_X86ATTINSTPRINTER_H
#include "X86InstPrinterCommon.h"
namespace llvm {
class X86ATTInstPrinter final : public X86InstPrinterCommon {
public:
X86ATTInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: X86InstPrinterCommon(MAI, MII, MRI) {}
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
const MCSubtargetInfo &STI) override;
// Autogenerated by tblgen, returns true if we successfully printed an
// alias.
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
unsigned PrintMethodIdx, raw_ostream &O);
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &OS);
static const char *getRegisterName(unsigned RegNo);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS) override;
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printSrcIdx(const MCInst *MI, unsigned Op, raw_ostream &O);
void printDstIdx(const MCInst *MI, unsigned Op, raw_ostream &O);
void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printSTiRegOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi16mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf80mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printSrcIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printSrcIdx(MI, OpNo, O);
}
void printDstIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printDstIdx(MI, OpNo, O);
}
void printDstIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printDstIdx(MI, OpNo, O);
}
void printDstIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printDstIdx(MI, OpNo, O);
}
void printDstIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printDstIdx(MI, OpNo, O);
}
void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemOffset(MI, OpNo, O);
}
void printMemOffs16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemOffset(MI, OpNo, O);
}
void printMemOffs32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemOffset(MI, OpNo, O);
}
void printMemOffs64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemOffset(MI, OpNo, O);
}
private:
bool HasCustomInstComment;
};
} // end namespace llvm
#endif // LLVM_LIB_TARGET_X86_INSTPRINTER_X86ATTINSTPRINTER_H
Index: vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp (revision 344166)
@@ -1,162 +1,173 @@
//===-- X86IntelInstPrinter.cpp - Intel assembly instruction printing -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes code for rendering MCInst instances as Intel-style
// assembly.
//
//===----------------------------------------------------------------------===//
#include "X86IntelInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstComments.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
#include "X86GenAsmWriter1.inc"
void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
OS << getRegisterName(RegNo);
}
void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
StringRef Annot,
const MCSubtargetInfo &STI) {
printInstFlags(MI, OS);
// In 16-bit mode, print data16 as data32.
if (MI->getOpcode() == X86::DATA16_PREFIX &&
STI.getFeatureBits()[X86::Mode16Bit]) {
OS << "\tdata32";
} else
printInstruction(MI, OS);
// Next always print the annotation.
printAnnotation(OS, Annot);
// If verbose assembly is enabled, we can print some informative comments.
if (CommentStream)
EmitAnyX86InstComments(MI, *CommentStream, MII);
}
void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
O << formatImm((int64_t)Op.getImm());
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << "offset ";
Op.getExpr()->print(O, &MAI);
}
}
void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
const MCOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
const MCOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + X86::AddrSegmentReg, O);
O << '[';
bool NeedPlus = false;
if (BaseReg.getReg()) {
printOperand(MI, Op+X86::AddrBaseReg, O);
NeedPlus = true;
}
if (IndexReg.getReg()) {
if (NeedPlus) O << " + ";
if (ScaleVal != 1)
O << ScaleVal << '*';
printOperand(MI, Op+X86::AddrIndexReg, O);
NeedPlus = true;
}
if (!DispSpec.isImm()) {
if (NeedPlus) O << " + ";
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
DispSpec.getExpr()->print(O, &MAI);
} else {
int64_t DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) {
if (NeedPlus) {
if (DispVal > 0)
O << " + ";
else {
O << " - ";
DispVal = -DispVal;
}
}
O << formatImm(DispVal);
}
}
O << ']';
}
void X86IntelInstPrinter::printSrcIdx(const MCInst *MI, unsigned Op,
raw_ostream &O) {
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + 1, O);
O << '[';
printOperand(MI, Op, O);
O << ']';
}
void X86IntelInstPrinter::printDstIdx(const MCInst *MI, unsigned Op,
raw_ostream &O) {
// DI accesses are always ES-based.
O << "es:[";
printOperand(MI, Op, O);
O << ']';
}
void X86IntelInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &DispSpec = MI->getOperand(Op);
// If this has a segment register, print it.
printOptionalSegReg(MI, Op + 1, O);
O << '[';
if (DispSpec.isImm()) {
O << formatImm(DispSpec.getImm());
} else {
assert(DispSpec.isExpr() && "non-immediate displacement?");
DispSpec.getExpr()->print(O, &MAI);
}
O << ']';
}
void X86IntelInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
raw_ostream &O) {
if (MI->getOperand(Op).isExpr())
return MI->getOperand(Op).getExpr()->print(O, &MAI);
O << formatImm(MI->getOperand(Op).getImm() & 0xff);
}
+
+void X86IntelInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ unsigned Reg = Op.getReg();
+ // Override the default printing to print st(0) instead st.
+ if (Reg == X86::ST0)
+ OS << "st(0)";
+ else
+ printRegName(OS, Reg);
+}
Index: vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h (revision 344166)
@@ -1,157 +1,158 @@
//= X86IntelInstPrinter.h - Convert X86 MCInst to assembly syntax -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class prints an X86 MCInst to Intel style .s file syntax.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_INSTPRINTER_X86INTELINSTPRINTER_H
#define LLVM_LIB_TARGET_X86_INSTPRINTER_X86INTELINSTPRINTER_H
#include "X86InstPrinterCommon.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
class X86IntelInstPrinter final : public X86InstPrinterCommon {
public:
X86IntelInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: X86InstPrinterCommon(MAI, MII, MRI) {}
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
const MCSubtargetInfo &STI) override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) override;
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &O);
+ void printSTiRegOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printMemReference(MI, OpNo, O);
}
void printi16mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "word ptr ";
printMemReference(MI, OpNo, O);
}
void printi32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "dword ptr ";
printMemReference(MI, OpNo, O);
}
void printi64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "qword ptr ";
printMemReference(MI, OpNo, O);
}
void printi128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "xmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "ymmword ptr ";
printMemReference(MI, OpNo, O);
}
void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "zmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "dword ptr ";
printMemReference(MI, OpNo, O);
}
void printf64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "qword ptr ";
printMemReference(MI, OpNo, O);
}
void printf80mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "tbyte ptr ";
printMemReference(MI, OpNo, O);
}
void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "xmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "ymmword ptr ";
printMemReference(MI, OpNo, O);
}
void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "zmmword ptr ";
printMemReference(MI, OpNo, O);
}
void printSrcIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "word ptr ";
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "dword ptr ";
printSrcIdx(MI, OpNo, O);
}
void printSrcIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "qword ptr ";
printSrcIdx(MI, OpNo, O);
}
void printDstIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printDstIdx(MI, OpNo, O);
}
void printDstIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "word ptr ";
printDstIdx(MI, OpNo, O);
}
void printDstIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "dword ptr ";
printDstIdx(MI, OpNo, O);
}
void printDstIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "qword ptr ";
printDstIdx(MI, OpNo, O);
}
void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printMemOffset(MI, OpNo, O);
}
void printMemOffs16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "word ptr ";
printMemOffset(MI, OpNo, O);
}
void printMemOffs32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "dword ptr ";
printMemOffset(MI, OpNo, O);
}
void printMemOffs64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "qword ptr ";
printMemOffset(MI, OpNo, O);
}
};
} // end namespace llvm
#endif // LLVM_LIB_TARGET_X86_INSTPRINTER_X86INTELINSTPRINTER_H
Index: vendor/llvm/dist-release_80/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86ISelLowering.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86ISelLowering.cpp (revision 344166)
@@ -1,42730 +1,42738 @@
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
#include "X86CallingConv.h"
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86IntrinsicsInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <bitset>
#include <cctype>
#include <numeric>
using namespace llvm;
#define DEBUG_TYPE "x86-isel"
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool> ExperimentalVectorWideningLegalization(
"x86-experimental-vector-widening-legalization", cl::init(false),
cl::desc("Enable an experimental vector type legalization through widening "
"rather than promotion."),
cl::Hidden);
static cl::opt<int> ExperimentalPrefLoopAlignment(
"x86-experimental-pref-loop-alignment", cl::init(4),
cl::desc("Sets the preferable loop alignment for experiments "
"(the last x86-experimental-pref-loop-alignment bits"
" of the loop header PC will be 0)."),
cl::Hidden);
static cl::opt<bool> MulConstantOptimization(
"mul-constant-optimization", cl::init(true),
cl::desc("Replace 'mul x, Const' with more effective instructions like "
"SHIFT, LEA, etc."),
cl::Hidden);
/// Call this when the user attempts to do something unsupported, like
/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
/// report_fatal_error, so calling code should attempt to recover without
/// crashing.
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
X86ScalarSSEf64 = Subtarget.hasSSE2();
X86ScalarSSEf32 = Subtarget.hasSSE1();
MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
// Set up the TargetLowering object.
// X86 is weird. It always uses i8 for shift amounts and setcc results.
setBooleanContents(ZeroOrOneBooleanContent);
// X86-SSE is even stranger. It uses -1 or 0 for vector masks.
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// For 64-bit, since we have so many registers, use the ILP scheduler.
// For 32-bit, use the register pressure specific scheduling.
// For Atom, always use ILP scheduling.
if (Subtarget.isAtom())
setSchedulingPreference(Sched::ILP);
else if (Subtarget.is64Bit())
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides and use cheaper ones.
if (TM.getOptLevel() >= CodeGenOpt::Default) {
if (Subtarget.hasSlowDivide32())
addBypassSlowDiv(32, 8);
if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
addBypassSlowDiv(64, 32);
}
if (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
setLibcallName(RTLIB::SREM_I64, "_allrem");
setLibcallName(RTLIB::UREM_I64, "_aullrem");
setLibcallName(RTLIB::MUL_I64, "_allmul");
setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
}
if (Subtarget.isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
} else if (Subtarget.isTargetWindowsGNU()) {
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
} else {
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(true);
}
// Set up the register classes.
addRegisterClass(MVT::i8, &X86::GR8RegClass);
addRegisterClass(MVT::i16, &X86::GR16RegClass);
addRegisterClass(MVT::i32, &X86::GR32RegClass);
if (Subtarget.is64Bit())
addRegisterClass(MVT::i64, &X86::GR64RegClass);
for (MVT VT : MVT::integer_valuetypes())
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// SETOEQ and SETUNE require checking two conditions.
setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
// Integer absolute.
if (Subtarget.hasCMov()) {
setOperationAction(ISD::ABS , MVT::i16 , Custom);
setOperationAction(ISD::ABS , MVT::i32 , Custom);
if (Subtarget.is64Bit())
setOperationAction(ISD::ABS , MVT::i64 , Custom);
}
// Funnel shifts.
for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
setOperationAction(ShiftOp , MVT::i16 , Custom);
setOperationAction(ShiftOp , MVT::i32 , Custom);
if (Subtarget.is64Bit())
setOperationAction(ShiftOp , MVT::i64 , Custom);
}
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
if (Subtarget.is64Bit()) {
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
// f32/f64 are legal, f80 is custom.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
} else if (!Subtarget.useSoftFloat()) {
// We have an algorithm for SSE2->double, and we turn this into a
// 64-bit FILD followed by conditional FADD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
// We have an algorithm for SSE2, and we turn this into a 64-bit
// FILD or VCVTUSI2SS/SD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
} else {
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
}
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
if (!Subtarget.useSoftFloat()) {
// SSE has no i16 to fp conversion, only i32.
if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
} else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
} else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
}
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
if (!Subtarget.useSoftFloat()) {
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
// are Legal, f80 is custom lowered.
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
if (X86ScalarSSEf32) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
}
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
}
// Handle FP_TO_UINT by promoting the destination to a larger signed
// conversion.
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
if (Subtarget.is64Bit()) {
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
} else {
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
}
} else if (!Subtarget.useSoftFloat()) {
// Since AVX is a superset of SSE3, only check for SSE here.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
else
// With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
// With SSE3 we can use fisttpll to convert to a signed i64; without
// SSE, we're stuck with a fistpll.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSEf64) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory.
setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
}
} else if (!Subtarget.is64Bit())
setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
// Scalar integer divide and remainder are lowered to use operations that
// produce two results, to match the available instructions. This exposes
// the two-result form to trivial CSE, which is able to combine x/y and x%y
// into a single instruction.
//
// Scalar integer multiply-high is also lowered to use two-result
// operations, to match the available instructions. However, plain multiply
// (low) operations are left as Legal, as there are single-result
// instructions for this in x86. Using the two-result multiply instructions
// when both high and low results are needed must be arranged by dagcombine.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
}
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::BR_CC, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
}
if (Subtarget.is64Bit())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
// Promote the i8 variants and force them on up to i32 which has a shorter
// encoding.
setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
if (!Subtarget.hasBMI()) {
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
}
}
if (Subtarget.hasLZCNT()) {
// When promoting the i8 variants, force them to i32 for a shorter
// encoding.
setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
} else {
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
}
}
// Special handling for half-precision floating point conversions.
// If we don't have F16C support, then lower half float conversions
// into library calls.
if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
}
// There's never any support for operations beyond MVT::f32.
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f80, MVT::f16, Expand);
if (Subtarget.hasPOPCNT()) {
setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
} else {
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
if (Subtarget.is64Bit())
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
if (!Subtarget.hasMOVBE())
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
}
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
}
// Custom action for SELECT MMX and expand action for SELECT_CC MMX
setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
// LLVM/Clang supports zero-cost DWARF and SEH exception handling.
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
// Darwin ABI issue.
for (auto VT : { MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::ConstantPool , VT, Custom);
setOperationAction(ISD::JumpTable , VT, Custom);
setOperationAction(ISD::GlobalAddress , VT, Custom);
setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
setOperationAction(ISD::ExternalSymbol , VT, Custom);
setOperationAction(ISD::BlockAddress , VT, Custom);
}
// 64-bit shl, sra, srl (iff 32-bit x86)
for (auto VT : { MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::SHL_PARTS, VT, Custom);
setOperationAction(ISD::SRA_PARTS, VT, Custom);
setOperationAction(ISD::SRL_PARTS, VT, Custom);
}
if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
// Expand certain atomics
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
}
if (Subtarget.hasCmpxchg16b()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
// FIXME - use subtarget debug flags
if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
!Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
bool Is64Bit = Subtarget.is64Bit();
setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
// GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
: &X86::FR32RegClass);
addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
: &X86::FR64RegClass);
for (auto VT : { MVT::f32, MVT::f64 }) {
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS, VT, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG, VT, Custom);
// Use ANDPD and ORPD to simulate FCOPYSIGN.
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
// These might be better off as horizontal vector ops.
setOperationAction(ISD::FADD, VT, Custom);
setOperationAction(ISD::FSUB, VT, Custom);
// We don't support sin/cos/fmod
setOperationAction(ISD::FSIN , VT, Expand);
setOperationAction(ISD::FCOS , VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
}
// Lower this to MOVMSK plus an AND.
setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
} else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
if (UseX87)
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
// Use ANDPS to simulate FABS.
setOperationAction(ISD::FABS , MVT::f32, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG , MVT::f32, Custom);
if (UseX87)
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
// Use ANDPS and ORPS to simulate FCOPYSIGN.
if (UseX87)
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
// We don't support sin/cos/fmod
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
if (UseX87) {
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN, MVT::f64, Expand);
setOperationAction(ISD::FCOS, MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
}
} else if (UseX87) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
addRegisterClass(MVT::f32, &X86::RFP32RegClass);
for (auto VT : { MVT::f32, MVT::f64 }) {
setOperationAction(ISD::UNDEF, VT, Expand);
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN , VT, Expand);
setOperationAction(ISD::FCOS , VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
}
}
// Expand FP32 immediates into loads from the stack, save special cases.
if (isTypeLegal(MVT::f32)) {
if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
addLegalFPImmediate(APFloat(+0.0f)); // FLD0
addLegalFPImmediate(APFloat(+1.0f)); // FLD1
addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
} else // SSE immediates.
addLegalFPImmediate(APFloat(+0.0f)); // xorps
}
// Expand FP64 immediates into loads from the stack, save special cases.
if (isTypeLegal(MVT::f64)) {
if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
} else // SSE immediates.
addLegalFPImmediate(APFloat(+0.0)); // xorpd
}
// We don't support FMA.
setOperationAction(ISD::FMA, MVT::f64, Expand);
setOperationAction(ISD::FMA, MVT::f32, Expand);
// Long double always uses X87, except f128 in MMX.
if (UseX87) {
if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
setOperationAction(ISD::FABS , MVT::f128, Custom);
setOperationAction(ISD::FNEG , MVT::f128, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
}
addRegisterClass(MVT::f80, &X86::RFP80RegClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
bool ignored;
APFloat TmpFlt2(+1.0);
TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt2); // FLD1
TmpFlt2.changeSign();
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
}
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN , MVT::f80, Expand);
setOperationAction(ISD::FCOS , MVT::f80, Expand);
setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
setOperationAction(ISD::FCEIL, MVT::f80, Expand);
setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
setOperationAction(ISD::FRINT, MVT::f80, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
setOperationAction(ISD::FMA, MVT::f80, Expand);
}
// Always use a library call for pow.
setOperationAction(ISD::FPOW , MVT::f32 , Expand);
setOperationAction(ISD::FPOW , MVT::f64 , Expand);
setOperationAction(ISD::FPOW , MVT::f80 , Expand);
setOperationAction(ISD::FLOG, MVT::f80, Expand);
setOperationAction(ISD::FLOG2, MVT::f80, Expand);
setOperationAction(ISD::FLOG10, MVT::f80, Expand);
setOperationAction(ISD::FEXP, MVT::f80, Expand);
setOperationAction(ISD::FEXP2, MVT::f80, Expand);
setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
// Some FP actions are always expanded for vector types.
for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::FLOG, VT, Expand);
setOperationAction(ISD::FLOG2, VT, Expand);
setOperationAction(ISD::FLOG10, VT, Expand);
setOperationAction(ISD::FEXP, VT, Expand);
setOperationAction(ISD::FEXP2, VT, Expand);
}
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::FMA, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::SETCC, VT, Expand);
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
setOperationAction(ISD::SINT_TO_FP, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
setOperationAction(ISD::TRUNCATE, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(InnerVT, VT, Expand);
setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
// N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
// types, we have to deal with them whether we ask for Expansion or not.
// Setting Expand causes its own optimisation problems though, so leave
// them legal.
if (VT.getVectorElementType() == MVT::i1)
setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
// EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
// split/scalarized right now.
if (VT.getVectorElementType() == MVT::f16)
setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
}
}
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
// FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
// registers cannot be used even for integer operations.
addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
setOperationAction(ISD::SDIV, VT, Custom);
setOperationAction(ISD::SREM, VT, Custom);
setOperationAction(ISD::UDIV, VT, Custom);
setOperationAction(ISD::UREM, VT, Custom);
}
setOperationAction(ISD::MUL, MVT::v2i8, Custom);
setOperationAction(ISD::MUL, MVT::v2i16, Custom);
setOperationAction(ISD::MUL, MVT::v2i32, Custom);
setOperationAction(ISD::MUL, MVT::v4i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i16, Custom);
setOperationAction(ISD::MUL, MVT::v8i8, Custom);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
}
setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
if (!ExperimentalVectorWideningLegalization) {
// Use widening instead of promotion.
for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8,
MVT::v4i16, MVT::v2i16 }) {
setOperationAction(ISD::UADDSAT, VT, Custom);
setOperationAction(ISD::SADDSAT, VT, Custom);
setOperationAction(ISD::USUBSAT, VT, Custom);
setOperationAction(ISD::SSUBSAT, VT, Custom);
}
}
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
// Provide custom widening for v2f32 setcc. This is really for VLX when
// setcc result type returns v2i1/v4i1 vector for v2f32/v4f32 leading to
// type legalization changing the result type to v4i1 during widening.
// It works fine for SSE2 and is probably faster so no need to qualify with
// VLX support.
setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::ABS, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// We support custom legalizing of sext and anyext loads for specific
// memory vector types which we can load as a scalar (or sequence of
// scalars) and extend in-register to a legal 128-bit vector type. For sext
// loads these must work with a single scalar load.
for (MVT VT : MVT::integer_vector_valuetypes()) {
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
}
for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
if (VT == MVT::v2i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
// Custom legalize these to avoid over promotion or custom promotion.
setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
// By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
// promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
// split again based on the input type, this will cause an AssertSExt i16 to
// be emitted instead of an AssertZExt. This will allow packssdw followed by
// packuswb to be used to truncate to v8i8. This is necessary since packusdw
// isn't available until sse4.1.
setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
// Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
// We want to legalize this to an f64 load rather than an i64 load on
// 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
// store.
setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
setOperationAction(ISD::STORE, MVT::v2f32, Custom);
setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setOperationAction(ISD::STORE, MVT::v4i16, Custom);
setOperationAction(ISD::STORE, MVT::v8i8, Custom);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
if (!Subtarget.hasAVX512())
setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
if (ExperimentalVectorWideningLegalization) {
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
} else {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
}
// In the customized shift lowering, the legal v4i32/v2i64 cases
// in AVX2 will be recognized.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
}
setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
// With AVX512, expanding (and promoting the shifts) is better.
if (!Subtarget.hasAVX512())
setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
setOperationAction(ISD::ABS, MVT::v16i8, Legal);
setOperationAction(ISD::ABS, MVT::v8i16, Legal);
setOperationAction(ISD::ABS, MVT::v4i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
// These might be better off as horizontal vector ops.
setOperationAction(ISD::ADD, MVT::i16, Custom);
setOperationAction(ISD::ADD, MVT::i32, Custom);
setOperationAction(ISD::SUB, MVT::i16, Custom);
setOperationAction(ISD::SUB, MVT::i32, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
setOperationAction(ISD::FCEIL, RoundedTy, Legal);
setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
setOperationAction(ISD::FRINT, RoundedTy, Legal);
setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
}
setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
// We directly match byte blends in the backend as they match the VSELECT
// condition form.
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
// SSE41 brings specific instructions for doing vector sign extend even in
// cases where we don't have SRA.
for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
}
if (!ExperimentalVectorWideningLegalization) {
// Avoid narrow result types when widening. The legal types are listed
// in the next loop.
for (MVT VT : MVT::integer_vector_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
}
}
// SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
if (!ExperimentalVectorWideningLegalization)
setLoadExtAction(LoadExtOp, MVT::v2i32, MVT::v2i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
}
// i8 vectors are custom because the source register and source
// source memory operand types are not the same width.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
setOperationAction(ISD::ROTL, VT, Custom);
// XOP can efficiently perform BITREVERSE with VPPERM.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
setOperationAction(ISD::BITREVERSE, VT, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
setOperationAction(ISD::BITREVERSE, VT, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
bool HasInt256 = Subtarget.hasInt256();
addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::FFLOOR, VT, Legal);
setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::FTRUNC, VT, Legal);
setOperationAction(ISD::FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
}
// (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
// even though v8i16 is a legal type.
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
if (!Subtarget.hasAVX512())
setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
// In the customized shift lowering, the legal v8i32/v4i64 cases
// in AVX2 will be recognized.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
}
if (ExperimentalVectorWideningLegalization) {
// These types need custom splitting if their input is a 128-bit vector.
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
}
setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
// With BWI, expanding (and promoting the shifts) is the better.
if (!Subtarget.hasBWI())
setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
}
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTLZ, VT, Custom);
// TODO - remove this once 256-bit X86ISD::ANDNP correctly split.
setOperationAction(ISD::CTTZ, VT, HasInt256 ? Expand : Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
if (Subtarget.hasAnyFMA()) {
for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::FMA, VT, Legal);
}
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
}
setOperationAction(ISD::MUL, MVT::v4i64, Custom);
setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MUL, MVT::v32i8, Custom);
setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
setOperationAction(ISD::ABS, MVT::v4i64, Custom);
setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
}
for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
}
if (HasInt256) {
// The custom lowering for UINT_TO_FP for v8i32 becomes interesting
// when we have a 256bit-wide blend with immediate.
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
// AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
}
}
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
}
// Extract subvector is special because the value type
// (result) is 128-bit but the source is 256-bit wide.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v4f32, MVT::v2f64 }) {
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
}
// Custom lower several nodes for 256-bit types.
for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
if (HasInt256)
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
if (HasInt256) {
// Custom legalize 2x32 to get a little better code.
setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::MGATHER, VT, Custom);
}
}
// This block controls legalization of the mask vector sizes that are
// available with AVX512. 512-bit vectors are in a separate block controlled
// by useAVX512Regs.
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
// There is no byte sized k-register load or store without AVX512DQ.
if (!Subtarget.hasDQI()) {
setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
setOperationAction(ISD::STORE, MVT::v1i1, Custom);
setOperationAction(ISD::STORE, MVT::v2i1, Custom);
setOperationAction(ISD::STORE, MVT::v4i1, Custom);
setOperationAction(ISD::STORE, MVT::v8i1, Custom);
}
// Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
}
for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
setOperationAction(ISD::ADD, VT, Custom);
setOperationAction(ISD::SUB, VT, Custom);
setOperationAction(ISD::MUL, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::UADDSAT, VT, Custom);
setOperationAction(ISD::SADDSAT, VT, Custom);
setOperationAction(ISD::USUBSAT, VT, Custom);
setOperationAction(ISD::SSUBSAT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Expand);
}
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v2i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
}
// This block controls legalization for 512-bit operations with 32/64 bit
// elements. 512-bits can be disabled based on prefer-vector-width and
// required-vector-width function attributes.
if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
}
for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FMA, VT, Legal);
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
}
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
if (!Subtarget.hasVLX()) {
// With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
// to 512-bit rather than use the AVX2 instructions so that we can use
// k-masks.
for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
setOperationAction(ISD::MLOAD, VT, Custom);
setOperationAction(ISD::MSTORE, VT, Custom);
}
}
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
if (ExperimentalVectorWideningLegalization) {
// Need to custom widen this if we don't have AVX512BW.
setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
}
for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::FFLOOR, VT, Legal);
setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::FTRUNC, VT, Legal);
setOperationAction(ISD::FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
}
// Without BWI we need to use custom lowering to handle MVT::v64i8 input.
for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
}
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16i32, Custom);
setOperationAction(ISD::SELECT, MVT::v32i16, Custom);
setOperationAction(ISD::SELECT, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::ROTL, VT, Custom);
setOperationAction(ISD::ROTR, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
if (Subtarget.hasDQI()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i64, Legal);
}
if (Subtarget.hasCDI()) {
// NonVLX sub-targets extend 128/256 vectors to use the 512 version.
for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
setOperationAction(ISD::CTLZ, VT, Legal);
}
} // Subtarget.hasCDI()
if (Subtarget.hasVPOPCNTDQ()) {
for (auto VT : { MVT::v16i32, MVT::v8i64 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
// Extract subvector is special because the value type
// (result) is 256-bit but the source is 512-bit wide.
// 128-bit was made Legal under AVX1.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
MVT::v8f32, MVT::v4f64 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
setOperationAction(ISD::MGATHER, VT, Custom);
setOperationAction(ISD::MSCATTER, VT, Custom);
}
// Need to custom split v32i16/v64i8 bitcasts.
if (!Subtarget.hasBWI()) {
setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
}
if (Subtarget.hasVBMI2()) {
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::FSHL, VT, Custom);
setOperationAction(ISD::FSHR, VT, Custom);
}
}
}// has AVX-512
// This block controls legalization for operations that don't have
// pre-AVX512 equivalents. Without VLX we use 512-bit operations for
// narrower widths.
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// These operations are handled on non-VLX by artificially widening in
// isel patterns.
// TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::ABS, VT, Legal);
}
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::ROTL, VT, Custom);
setOperationAction(ISD::ROTR, VT, Custom);
}
// Custom legalize 2x32 to get a little better code.
setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::MSCATTER, VT, Custom);
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
setOperationAction(ISD::UINT_TO_FP, VT, Legal);
setOperationAction(ISD::FP_TO_SINT, VT, Legal);
setOperationAction(ISD::FP_TO_UINT, VT, Legal);
setOperationAction(ISD::MUL, VT, Legal);
}
}
if (Subtarget.hasCDI()) {
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::CTLZ, VT, Legal);
}
} // Subtarget.hasCDI()
if (Subtarget.hasVPOPCNTDQ()) {
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
}
// This block control legalization of v32i1/v64i1 which are available with
// AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
// useBWIRegs.
if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
setOperationAction(ISD::ADD, VT, Custom);
setOperationAction(ISD::SUB, VT, Custom);
setOperationAction(ISD::MUL, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::UADDSAT, VT, Custom);
setOperationAction(ISD::SADDSAT, VT, Custom);
setOperationAction(ISD::USUBSAT, VT, Custom);
setOperationAction(ISD::SSUBSAT, VT, Custom);
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
}
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
for (auto VT : { MVT::v16i1, MVT::v32i1 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
// Extends from v32i1 masks to 256-bit vectors.
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
}
// This block controls legalization for v32i16 and v64i8. 512-bits can be
// disabled based on prefer-vector-width and required-vector-width function
// attributes.
if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
// Extends from v64i1 masks to 512-bit vectors.
setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::MUL, MVT::v32i16, Legal);
setOperationAction(ISD::MUL, MVT::v64i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTLZ, VT, Custom);
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::UADDSAT, VT, Legal);
setOperationAction(ISD::SADDSAT, VT, Legal);
setOperationAction(ISD::USUBSAT, VT, Legal);
setOperationAction(ISD::SSUBSAT, VT, Legal);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
}
if (Subtarget.hasBITALG()) {
for (auto VT : { MVT::v64i8, MVT::v32i16 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
if (Subtarget.hasVBMI2()) {
setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
}
}
if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
}
// These operations are handled on non-VLX by artificially widening in
// isel patterns.
// TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
if (Subtarget.hasBITALG()) {
for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
}
if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
if (Subtarget.hasDQI()) {
// Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
// v2f32 UINT_TO_FP is already custom under SSE2.
setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
"Unexpected operation action!");
// v2i64 FP_TO_S/UINT(v2f32) custom conversion.
setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
}
if (Subtarget.hasBWI()) {
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
}
if (Subtarget.hasVBMI2()) {
// TODO: Make these legal even without VLX?
for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::FSHL, VT, Custom);
setOperationAction(ISD::FSHR, VT, Custom);
}
}
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
if (!Subtarget.is64Bit()) {
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
}
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
// handle type legalization for these operations here.
//
// FIXME: We really should do custom legalization for addition and
// subtraction on x86-32 once PR3203 is fixed. We really can't do much better
// than generic legalization for 64-bit multiplication-with-overflow, though.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, VT, Custom);
setOperationAction(ISD::UADDO, VT, Custom);
setOperationAction(ISD::SSUBO, VT, Custom);
setOperationAction(ISD::USUBO, VT, Custom);
setOperationAction(ISD::SMULO, VT, Custom);
setOperationAction(ISD::UMULO, VT, Custom);
// Support carry in as value rather than glue.
setOperationAction(ISD::ADDCARRY, VT, Custom);
setOperationAction(ISD::SUBCARRY, VT, Custom);
setOperationAction(ISD::SETCCCARRY, VT, Custom);
}
if (!Subtarget.is64Bit()) {
// These libcalls are not available in 32-bit.
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
setLibcallName(RTLIB::SRA_I128, nullptr);
setLibcallName(RTLIB::MUL_I128, nullptr);
}
// Combine sin / cos into _sincos_stret if it is available.
if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
}
if (Subtarget.isTargetWin64()) {
setOperationAction(ISD::SDIV, MVT::i128, Custom);
setOperationAction(ISD::UDIV, MVT::i128, Custom);
setOperationAction(ISD::SREM, MVT::i128, Custom);
setOperationAction(ISD::UREM, MVT::i128, Custom);
setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
}
// On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
// is. We should promote the value to 64-bits to solve this.
// This is what the CRT headers do - `fmodf` is an inline header
// function casting to f64 and calling `fmod`.
if (Subtarget.is32Bit() && (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium()))
for (ISD::NodeType Op :
{ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
ISD::FLOG10, ISD::FPOW, ISD::FSIN})
if (isOperationExpand(Op, MVT::f32))
setOperationAction(Op, MVT::f32, Promote);
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
setTargetDAGCombine(ISD::BITCAST);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
setTargetDAGCombine(ISD::FNEG);
setTargetDAGCombine(ISD::FMA);
setTargetDAGCombine(ISD::FMINNUM);
setTargetDAGCombine(ISD::FMAXNUM);
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::MLOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::MSTORE);
setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::MSCATTER);
setTargetDAGCombine(ISD::MGATHER);
computeRegisterProperties(Subtarget.getRegisterInfo());
MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
MaxStoresPerMemsetOptSize = 8;
MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
MaxStoresPerMemcpyOptSize = 4;
MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
MaxStoresPerMemmoveOptSize = 4;
// TODO: These control memcmp expansion in CGP and could be raised higher, but
// that needs to benchmarked and balanced with the potential use of vector
// load/store types (PR33329, PR33914).
MaxLoadsPerMemcmp = 2;
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
setPrefFunctionAlignment(4); // 2^4 bytes.
verifyIntrinsicTables();
}
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
bool X86TargetLowering::useStackGuardXorFP() const {
// Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
return Subtarget.getTargetTriple().isOSMSVCRT();
}
SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
const SDLoc &DL) const {
EVT PtrTy = getPointerTy(DAG.getDataLayout());
unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
return SDValue(Node, 0);
}
TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(MVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return TypeSplitVector;
if (ExperimentalVectorWideningLegalization &&
VT.getVectorNumElements() != 1 &&
VT.getVectorElementType() != MVT::i1)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return MVT::v32i8;
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return 1;
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext& Context,
EVT VT) const {
if (!VT.isVector())
return MVT::i8;
if (Subtarget.hasAVX512()) {
const unsigned NumElts = VT.getVectorNumElements();
// Figure out what this type will be legalized to.
EVT LegalVT = VT;
while (getTypeAction(Context, LegalVT) != TypeLegal)
LegalVT = getTypeToTransformTo(Context, LegalVT);
// If we got a 512-bit vector then we'll definitely have a vXi1 compare.
if (LegalVT.getSimpleVT().is512BitVector())
return EVT::getVectorVT(Context, MVT::i1, NumElts);
if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
// If we legalized to less than a 512-bit vector, then we will use a vXi1
// compare for vXi32/vXi64 for sure. If we have BWI we will also support
// vXi16/vXi8.
MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
return EVT::getVectorVT(Context, MVT::i1, NumElts);
}
}
return VT.changeVectorElementTypeToInteger();
}
/// Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
if (MaxAlign == 16)
return;
if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getBitWidth() == 128)
MaxAlign = 16;
} else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
unsigned EltAlign = 0;
getMaxByValAlign(ATy->getElementType(), EltAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (auto *EltTy : STy->elements()) {
unsigned EltAlign = 0;
getMaxByValAlign(EltTy, EltAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
if (MaxAlign == 16)
break;
}
}
}
/// Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
/// are at 4-byte boundaries.
unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
if (Subtarget.is64Bit()) {
// Max of 8 and alignment of type.
unsigned TyAlign = DL.getABITypeAlignment(Ty);
if (TyAlign > 8)
return TyAlign;
return 8;
}
unsigned Align = 4;
if (Subtarget.hasSSE1())
getMaxByValAlign(Ty, Align);
return Align;
}
/// Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If 'IsMemset' is
/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
EVT
X86TargetLowering::getOptimalMemOpType(uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
const Function &F = MF.getFunction();
if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
// FIXME: Check if unaligned 32-byte accesses are slow.
if (Size >= 32 && Subtarget.hasAVX()) {
// Although this isn't a well-supported type for AVX1, we'll let
// legalization and shuffle lowering produce the optimal codegen. If we
// choose an optimal type with a vector element larger than a byte,
// getMemsetStores() may create an intermediate splat (using an integer
// multiply) before we splat as a vector.
return MVT::v32i8;
}
if (Subtarget.hasSSE2())
return MVT::v16i8;
// TODO: Can SSE1 handle a byte vector?
// If we have SSE1 registers we should be able to use them.
if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()))
return MVT::v4f32;
} else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
!Subtarget.is64Bit() && Subtarget.hasSSE2()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
// Also, do not use f64 to lower memset unless this is a memset of zeros.
// The gymnastics of splatting a byte value into an XMM register and then
// only using 8-byte stores (because this is a CPU with slow unaligned
// 16-byte accesses) makes that a loser.
return MVT::f64;
}
}
// This is a compromise. If we reach here, unaligned accesses may be slow on
// this target. However, creating smaller, aligned accesses could be even
// slower and would certainly be a lot more code.
if (Subtarget.is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
}
bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
if (VT == MVT::f32)
return X86ScalarSSEf32;
else if (VT == MVT::f64)
return X86ScalarSSEf64;
return true;
}
bool
X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned,
unsigned,
bool *Fast) const {
if (Fast) {
switch (VT.getSizeInBits()) {
default:
// 8-byte and under are always assumed to be fast.
*Fast = true;
break;
case 128:
*Fast = !Subtarget.isUnalignedMem16Slow();
break;
case 256:
*Fast = !Subtarget.isUnalignedMem32Slow();
break;
// TODO: What about AVX-512 (512-bit) accesses?
}
}
// Misaligned accesses of any size are always allowed.
return true;
}
/// Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
unsigned X86TargetLowering::getJumpTableEncoding() const {
// In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
// symbol.
if (isPositionIndependent() && Subtarget.isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32;
// Otherwise, use the normal jump table encoding heuristics.
return TargetLowering::getJumpTableEncoding();
}
bool X86TargetLowering::useSoftFloat() const {
return Subtarget.useSoftFloat();
}
void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
ArgListTy &Args) const {
// Only relabel X86-32 for C / Stdcall CCs.
if (Subtarget.is64Bit())
return;
if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
return;
unsigned ParamRegs = 0;
if (auto *M = MF->getFunction().getParent())
ParamRegs = M->getNumberRegisterParameters();
// Mark the first N int arguments as having reg
for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
Type *T = Args[Idx].Ty;
if (T->isIntOrPtrTy())
if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
unsigned numRegs = 1;
if (MF->getDataLayout().getTypeAllocSize(T) > 4)
numRegs = 2;
if (ParamRegs < numRegs)
return;
ParamRegs -= numRegs;
Args[Idx].IsInReg = true;
}
}
}
const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid,MCContext &Ctx) const{
assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
return MCSymbolRefExpr::create(MBB->getSymbol(),
MCSymbolRefExpr::VK_GOTOFF, Ctx);
}
/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
if (!Subtarget.is64Bit())
// This doesn't have SDLoc associated with it, but is not really the
// same as a Register.
return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
getPointerTy(DAG.getDataLayout()));
return Table;
}
/// This returns the relocation base for the given PIC jumptable,
/// the same as getPICJumpTableRelocBase, but as an MCExpr.
const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
MCContext &Ctx) const {
// X86-64 uses RIP relative addressing based on the jump table label.
if (Subtarget.isPICStyleRIPRel())
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
// Otherwise, the reference is relative to the PIC base.
return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
}
std::pair<const TargetRegisterClass *, uint8_t>
X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const {
const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
return TargetLowering::findRepresentativeClass(TRI, VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
break;
case MVT::x86mmx:
RRC = &X86::VR64RegClass;
break;
case MVT::f32: case MVT::f64:
case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
case MVT::v4f32: case MVT::v2f64:
case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
case MVT::v8f32: case MVT::v4f64:
case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
case MVT::v16f32: case MVT::v8f64:
RRC = &X86::VR128XRegClass;
break;
}
return std::make_pair(RRC, Cost);
}
unsigned X86TargetLowering::getAddressSpace() const {
if (Subtarget.is64Bit())
return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
return 256;
}
static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
(TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
}
static Constant* SegmentOffset(IRBuilder<> &IRB,
unsigned Offset, unsigned AddressSpace) {
return ConstantExpr::getIntToPtr(
ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
}
Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
// glibc, bionic, and Fuchsia have a special slot for the stack guard in
// tcbhead_t; use it instead of the usual global variable (see
// sysdeps/{i386,x86_64}/nptl/tls.h)
if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
if (Subtarget.isTargetFuchsia()) {
// <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
return SegmentOffset(IRB, 0x10, getAddressSpace());
} else {
// %fs:0x28, unless we're using a Kernel code model, in which case
// it's %gs:0x28. gs:0x14 on i386.
unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
return SegmentOffset(IRB, Offset, getAddressSpace());
}
}
return TargetLowering::getIRStackGuard(IRB);
}
void X86TargetLowering::insertSSPDeclarations(Module &M) const {
// MSVC CRT provides functionalities for stack protection.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
// MSVC CRT has a global variable holding security cookie.
M.getOrInsertGlobal("__security_cookie",
Type::getInt8PtrTy(M.getContext()));
// MSVC CRT has a function to validate security cookie.
auto *SecurityCheckCookie = cast<Function>(
M.getOrInsertFunction("__security_check_cookie",
Type::getVoidTy(M.getContext()),
Type::getInt8PtrTy(M.getContext())));
SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
return;
}
// glibc, bionic, and Fuchsia have a special slot for the stack guard.
if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
return;
TargetLowering::insertSSPDeclarations(M);
}
Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
// MSVC CRT has a global variable holding security cookie.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
return M.getGlobalVariable("__security_cookie");
}
return TargetLowering::getSDagStackGuard(M);
}
Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
// MSVC CRT has a function to validate security cookie.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
return M.getFunction("__security_check_cookie");
}
return TargetLowering::getSSPStackGuardCheck(M);
}
Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
if (Subtarget.getTargetTriple().isOSContiki())
return getDefaultSafeStackPointerLocation(IRB, false);
// Android provides a fixed TLS slot for the SafeStack pointer. See the
// definition of TLS_SLOT_SAFESTACK in
// https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
if (Subtarget.isTargetAndroid()) {
// %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
// %gs:0x24 on i386
unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
return SegmentOffset(IRB, Offset, getAddressSpace());
}
// Fuchsia is similar.
if (Subtarget.isTargetFuchsia()) {
// <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
return SegmentOffset(IRB, 0x18, getAddressSpace());
}
return TargetLowering::getSafeStackPointerLocation(IRB);
}
bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
assert(SrcAS != DestAS && "Expected different address spaces!");
return SrcAS < 256 && DestAS < 256;
}
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
#include "X86GenCallingConv.inc"
bool X86TargetLowering::CanLowerReturn(
CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_X86);
}
const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
return ScratchRegs;
}
/// Lowers masks values (v*i1) to the local register values
/// \returns DAG node after lowering to register type
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
const SDLoc &Dl, SelectionDAG &DAG) {
EVT ValVT = ValArg.getValueType();
if (ValVT == MVT::v1i1)
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
DAG.getIntPtrConstant(0, Dl));
if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
(ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
// Two stage lowering might be required
// bitcast: v8i1 -> i8 / v16i1 -> i16
// anyextend: i8 -> i32 / i16 -> i32
EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
if (ValLoc == MVT::i32)
ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
return ValToCopy;
}
if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
(ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
// One stage lowering is required
// bitcast: v32i1 -> i32 / v64i1 -> i64
return DAG.getBitcast(ValLoc, ValArg);
}
return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
}
/// Breaks v64i1 value into two registers and adds the new node to the DAG
static void Passv64i1ArgInRegs(
const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA,
CCValAssign &NextVA, const X86Subtarget &Subtarget) {
assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
assert(Subtarget.is32Bit() && "Expecting 32 bit target");
assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"The value should reside in two registers");
// Before splitting the value we cast it to i64
Arg = DAG.getBitcast(MVT::i64, Arg);
// Splitting the value into two i32 types
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
DAG.getConstant(0, Dl, MVT::i32));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
DAG.getConstant(1, Dl, MVT::i32));
// Attach the two i32 types into corresponding registers
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
}
SDValue
X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
// In some cases we need to disable registers from the default CSR list.
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
CallConv == CallingConv::X86_RegCall ||
MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
report_fatal_error("X86 interrupts may not return any value");
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
SDValue Flag;
SmallVector<SDValue, 6> RetOps;
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
MVT::i32));
// Copy the result values into the output registers.
for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
++I, ++OutsIndex) {
CCValAssign &VA = RVLocs[I];
assert(VA.isRegLoc() && "Can only return in registers!");
// Add the register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
SDValue ValToCopy = OutVals[OutsIndex];
EVT ValVT = ValToCopy.getValueType();
// Promote values to the appropriate types.
if (VA.getLocInfo() == CCValAssign::SExt)
ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
else if (VA.getLocInfo() == CCValAssign::ZExt)
ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
else if (VA.getLocInfo() == CCValAssign::AExt) {
if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
else
ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
}
else if (VA.getLocInfo() == CCValAssign::BCvt)
ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
assert(VA.getLocInfo() != CCValAssign::FPExt &&
"Unexpected FP-extend for return value.");
// If this is x86-64, and we disabled SSE, we can't return FP values,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
(Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
} else if (ValVT == MVT::f64 &&
(Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
}
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier.
if (VA.getLocReg() == X86::FP0 ||
VA.getLocReg() == X86::FP1) {
// If this is a copy from an xmm register to ST(0), use an FPExtend to
// change the value to the FP stack register class.
if (isScalarFPTypeInSSEReg(VA.getValVT()))
ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
RetOps.push_back(ValToCopy);
// Don't emit a copytoreg.
continue;
}
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
if (Subtarget.is64Bit()) {
if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
if (!Subtarget.hasSSE2())
ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
}
}
}
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I],
Subtarget);
assert(2 == RegsToPass.size() &&
"Expecting two registers after Pass64BitArgInRegs");
// Add the second register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
} else {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
}
// Add nodes to the DAG and add the values into the RetOps list
for (auto &Reg : RegsToPass) {
Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
}
}
// Swift calling convention does not require we copy the sret argument
// into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
//
// Checking Function.hasStructRetAttr() here is insufficient because the IR
// may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
// false, then an sret argument may be implicitly inserted in the SelDAG. In
// either case FuncInfo->setSRetReturnReg() will have been called.
if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
// When we have both sret and another return value, we should use the
// original Chain stored in RetOps[0], instead of the current Chain updated
// in the above loop. If we only have sret, RetOps[0] equals to Chain.
// For the case of sret and another return value, we have
// Chain_0 at the function entry
// Chain_1 = getCopyToReg(Chain_0) in the above loop
// If we use Chain_1 in getCopyFromReg, we will have
// Val = getCopyFromReg(Chain_1)
// Chain_2 = getCopyToReg(Chain_1, Val) from below
// getCopyToReg(Chain_0) will be glued together with
// getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
// in Unit B, and we will have cyclic dependency between Unit A and Unit B:
// Data dependency from Unit B to Unit A due to usage of Val in
// getCopyToReg(Chain_1, Val)
// Chain dependency from Unit A to Unit B
// So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
getPointerTy(MF.getDataLayout()));
unsigned RetValReg
= (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
X86::RAX : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
Flag = Chain.getValue(1);
// RAX/EAX now acts like a return value.
RetOps.push_back(
DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
// Add the returned register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
}
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *I =
TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
if (I) {
for (; *I; ++I) {
if (X86::GR64RegClass.contains(*I))
RetOps.push_back(DAG.getRegister(*I, MVT::i64));
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
}
}
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
if (Flag.getNode())
RetOps.push_back(Flag);
X86ISD::NodeType opcode = X86ISD::RET_FLAG;
if (CallConv == CallingConv::X86_INTR)
opcode = X86ISD::IRET;
return DAG.getNode(opcode, dl, MVT::Other, RetOps);
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
return false;
SDValue TCChain = Chain;
SDNode *Copy = *N->use_begin();
if (Copy->getOpcode() == ISD::CopyToReg) {
// If the copy has a glue operand, we conservatively assume it isn't safe to
// perform a tail call.
if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
return false;
TCChain = Copy->getOperand(0);
} else if (Copy->getOpcode() != ISD::FP_EXTEND)
return false;
bool HasRet = false;
for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
UI != UE; ++UI) {
if (UI->getOpcode() != X86ISD::RET_FLAG)
return false;
// If we are returning more than one value, we can definitely
// not make a tail call see PR19530
if (UI->getNumOperands() > 4)
return false;
if (UI->getNumOperands() == 4 &&
UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
return false;
HasRet = true;
}
if (!HasRet)
return false;
Chain = TCChain;
return true;
}
EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const {
MVT ReturnMVT = MVT::i32;
bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
// The ABI does not require i1, i8 or i16 to be extended.
//
// On Darwin, there is code in the wild relying on Clang's old behaviour of
// always extending i8/i16 return values, so keep doing that for now.
// (PR26665).
ReturnMVT = MVT::i8;
}
EVT MinVT = getRegisterType(Context, ReturnMVT);
return VT.bitsLT(MinVT) ? MinVT : VT;
}
/// Reads two 32 bit registers and creates a 64 bit mask value.
/// \param VA The current 32 bit value that need to be assigned.
/// \param NextVA The next 32 bit value that need to be assigned.
/// \param Root The parent DAG node.
/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
/// glue purposes. In the case the DAG is already using
/// physical register instead of virtual, we should glue
/// our new SDValue to InFlag SDvalue.
/// \return a new SDvalue of size 64bit.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
const SDLoc &Dl, const X86Subtarget &Subtarget,
SDValue *InFlag = nullptr) {
assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
assert(Subtarget.is32Bit() && "Expecting 32 bit target");
assert(VA.getValVT() == MVT::v64i1 &&
"Expecting first location of 64 bit width type");
assert(NextVA.getValVT() == VA.getValVT() &&
"The locations should have the same type");
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"The values should reside in two registers");
SDValue Lo, Hi;
unsigned Reg;
SDValue ArgValueLo, ArgValueHi;
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterClass *RC = &X86::GR32RegClass;
// Read a 32 bit value from the registers.
if (nullptr == InFlag) {
// When no physical register is present,
// create an intermediate virtual register.
Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
} else {
// When a physical register is available read the value from it and glue
// the reads together.
ArgValueLo =
DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
*InFlag = ArgValueLo.getValue(2);
ArgValueHi =
DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
*InFlag = ArgValueHi.getValue(2);
}
// Convert the i32 type into v32i1 type.
Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
// Convert the i32 type into v32i1 type.
Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
// Concatenate the two values together.
return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
}
/// The function will lower a register of various sizes (8/16/32/64)
/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
/// \returns a DAG node contains the operand after lowering to mask type.
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
const EVT &ValLoc, const SDLoc &Dl,
SelectionDAG &DAG) {
SDValue ValReturned = ValArg;
if (ValVT == MVT::v1i1)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
if (ValVT == MVT::v64i1) {
// In 32 bit machine, this case is handled by getv64i1Argument
assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
// In 64 bit machine, There is no need to truncate the value only bitcast
} else {
MVT maskLen;
switch (ValVT.getSimpleVT().SimpleTy) {
case MVT::v8i1:
maskLen = MVT::i8;
break;
case MVT::v16i1:
maskLen = MVT::i16;
break;
case MVT::v32i1:
maskLen = MVT::i32;
break;
default:
llvm_unreachable("Expecting a vector of i1 types");
}
ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
}
return DAG.getBitcast(ValVT, ValReturned);
}
/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue X86TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
uint32_t *RegMask) const {
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
bool Is64Bit = Subtarget.is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
++I, ++InsIndex) {
CCValAssign &VA = RVLocs[I];
EVT CopyVT = VA.getLocVT();
// In some calling conventions we need to remove the used registers
// from the register mask.
if (RegMask) {
for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
}
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
}
// If we prefer to use the value in xmm registers, copy it out as f80 and
// use a truncate to move it from fp stack reg to xmm reg.
bool RoundAfterCopy = false;
if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
isScalarFPTypeInSSEReg(VA.getValVT())) {
if (!Subtarget.hasX87())
report_fatal_error("X87 register return with X87 disabled");
CopyVT = MVT::f80;
RoundAfterCopy = (CopyVT != VA.getLocVT());
}
SDValue Val;
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
Val =
getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
} else {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
.getValue(1);
Val = Chain.getValue(0);
InFlag = Chain.getValue(2);
}
if (RoundAfterCopy)
Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
// This truncation won't change the value.
DAG.getIntPtrConstant(1, dl));
if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
if (VA.getValVT().isVector() &&
((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
(VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
// promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
} else
Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
}
InVals.push_back(Val);
}
return Chain;
}
//===----------------------------------------------------------------------===//
// C & StdCall & Fast Calling Convention implementation
//===----------------------------------------------------------------------===//
// StdCall calling convention seems to be standard for many Windows' API
// routines and around. It differs from C calling convention just a little:
// callee should clean up the stack, not caller. Symbols should be also
// decorated in some fancy way :) It doesn't support any vector arguments.
// For info on fast calling convention see Fast Calling Convention (tail call)
// implementation LowerX86_32FastCCCallTo.
/// CallIsStructReturn - Determines whether a call uses struct return
/// semantics.
enum StructReturnType {
NotStructReturn,
RegStructReturn,
StackStructReturn
};
static StructReturnType
callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
if (Outs.empty())
return NotStructReturn;
const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
if (!Flags.isSRet())
return NotStructReturn;
if (Flags.isInReg() || IsMCU)
return RegStructReturn;
return StackStructReturn;
}
/// Determines whether a function uses struct return semantics.
static StructReturnType
argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
if (Ins.empty())
return NotStructReturn;
const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
if (!Flags.isSRet())
return NotStructReturn;
if (Flags.isInReg() || IsMCU)
return RegStructReturn;
return StackStructReturn;
}
/// Make a copy of an aggregate at address specified by "Src" to address
/// "Dst" with size and alignment information specified by the specific
/// parameter attribute. The copy will be passed as a byval function parameter.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
SDValue Chain, ISD::ArgFlagsTy Flags,
SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
/*isTailCall*/false,
MachinePointerInfo(), MachinePointerInfo());
}
/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
CC == CallingConv::HHVM);
}
/// Return true if we might ever do TCO for calls with this calling convention.
static bool mayTailCallThisCC(CallingConv::ID CC) {
switch (CC) {
// C calling conventions:
case CallingConv::C:
case CallingConv::Win64:
case CallingConv::X86_64_SysV:
// Callee pop conventions:
case CallingConv::X86_ThisCall:
case CallingConv::X86_StdCall:
case CallingConv::X86_VectorCall:
case CallingConv::X86_FastCall:
return true;
default:
return canGuaranteeTCO(CC);
}
}
/// Return true if the function is being made into a tailcall target by
/// changing its ABI.
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
}
bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
auto Attr =
CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
ImmutableCallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
if (!mayTailCallThisCC(CalleeCC))
return false;
return true;
}
SDValue
X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo &MFI, unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = shouldGuaranteeTCO(
CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT;
MVT PtrVT = getPointerTy(DAG.getDataLayout());
// If value is passed by pointer we have address passed instead of the value
// itself. No need to extend if the mask value and location share the same
// absolute size.
bool ExtendedInMem =
VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
ValVT = VA.getLocVT();
else
ValVT = VA.getValVT();
// Calculate SP offset of interrupt parameter, re-arrange the slot normally
// taken by a return address.
int Offset = 0;
if (CallConv == CallingConv::X86_INTR) {
// X86 interrupts may take one or two arguments.
// On the stack there will be no return address as in regular call.
// Offset of last argument need to be set to -4/-8 bytes.
// Where offset of the first argument out of two, should be set to 0 bytes.
Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
if (Subtarget.is64Bit() && Ins.size() == 2) {
// The stack pointer needs to be realigned for 64 bit handlers with error
// code, so the argument offset changes by 8 bytes.
Offset += 8;
}
}
// FIXME: For now, all byval parameter objects are marked mutable. This can be
// changed with more analysis.
// In case of tail call optimization mark all arguments mutable. Since they
// could be overwritten by lowering of arguments in case of a tail call.
if (Flags.isByVal()) {
unsigned Bytes = Flags.getByValSize();
if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
// FIXME: For now, all byval parameter objects are marked as aliasing. This
// can be improved with deeper analysis.
int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
/*isAliased=*/true);
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI.setObjectOffset(FI, Offset);
}
return DAG.getFrameIndex(FI, PtrVT);
}
// This is an argument in memory. We might be able to perform copy elision.
if (Flags.isCopyElisionCandidate()) {
EVT ArgVT = Ins[i].ArgVT;
SDValue PartAddr;
if (Ins[i].PartOffset == 0) {
// If this is a one-part value or the first part of a multi-part value,
// create a stack object for the entire argument value type and return a
// load from our portion of it. This assumes that if the first part of an
// argument is in memory, the rest will also be in memory.
int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
/*Immutable=*/false);
PartAddr = DAG.getFrameIndex(FI, PtrVT);
return DAG.getLoad(
ValVT, dl, Chain, PartAddr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
} else {
// This is not the first piece of an argument in memory. See if there is
// already a fixed stack object including this offset. If so, assume it
// was created by the PartOffset == 0 branch above and create a load from
// the appropriate offset into it.
int64_t PartBegin = VA.getLocMemOffset();
int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
int FI = MFI.getObjectIndexBegin();
for (; MFI.isFixedObjectIndex(FI); ++FI) {
int64_t ObjBegin = MFI.getObjectOffset(FI);
int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
break;
}
if (MFI.isFixedObjectIndex(FI)) {
SDValue Addr =
DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
return DAG.getLoad(
ValVT, dl, Chain, Addr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
Ins[i].PartOffset));
}
}
}
int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
VA.getLocMemOffset(), isImmutable);
// Set SExt or ZExt flag.
if (VA.getLocInfo() == CCValAssign::ZExt) {
MFI.setObjectZExt(FI, true);
} else if (VA.getLocInfo() == CCValAssign::SExt) {
MFI.setObjectSExt(FI, true);
}
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI.setObjectOffset(FI, Offset);
}
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
return ExtendedInMem
? (VA.getValVT().isVector()
? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
: DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
: Val;
}
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
const X86Subtarget &Subtarget) {
assert(Subtarget.is64Bit());
if (Subtarget.isCallingConvWin64(CallConv)) {
static const MCPhysReg GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
}
static const MCPhysReg GPR64ArgRegs64Bit[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
}
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
CallingConv::ID CallConv,
const X86Subtarget &Subtarget) {
assert(Subtarget.is64Bit());
if (Subtarget.isCallingConvWin64(CallConv)) {
// The XMM registers which might contain var arg parameters are shadowed
// in their paired GPR. So we only need to save the GPR to their home
// slots.
// TODO: __vectorcall will change this.
return None;
}
const Function &F = MF.getFunction();
bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
// Kernel mode asks for SSE to be disabled, so there are no XMM argument
// registers.
return None;
static const MCPhysReg XMMArgRegs64Bit[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
}
#ifndef NDEBUG
static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
[](const CCValAssign &A, const CCValAssign &B) -> bool {
return A.getValNo() < B.getValNo();
});
}
#endif
SDValue X86TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const Function &F = MF.getFunction();
if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
F.getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo &MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget.is64Bit();
bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
assert(
!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
if (CallConv == CallingConv::X86_INTR) {
bool isLegal = Ins.size() == 1 ||
(Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
(!Is64Bit && Ins[1].VT == MVT::i32)));
if (!isLegal)
report_fatal_error("X86 interrupts may take one or two arguments");
}
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeArguments(Ins, CC_X86);
// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
}
// The next loop assumes that the locations are in the same order of the
// input arguments.
assert(isSortedByValueNo(ArgLocs) &&
"Argument Location list must be sorted before lowering");
SDValue ArgValue;
for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
++I, ++InsIndex) {
assert(InsIndex < Ins.size() && "Invalid Ins index");
CCValAssign &VA = ArgLocs[I];
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
if (VA.needsCustom()) {
assert(
VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
// v64i1 values, in regcall calling convention, that are
// compiled to 32 bit arch, are split up into two registers.
ArgValue =
getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
} else {
const TargetRegisterClass *RC;
if (RegVT == MVT::i8)
RC = &X86::GR8RegClass;
else if (RegVT == MVT::i16)
RC = &X86::GR16RegClass;
else if (RegVT == MVT::i32)
RC = &X86::GR32RegClass;
else if (Is64Bit && RegVT == MVT::i64)
RC = &X86::GR64RegClass;
else if (RegVT == MVT::f32)
RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
else if (RegVT == MVT::f64)
RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
else if (RegVT == MVT::f80)
RC = &X86::RFP80RegClass;
else if (RegVT == MVT::f128)
RC = &X86::VR128RegClass;
else if (RegVT.is512BitVector())
RC = &X86::VR512RegClass;
else if (RegVT.is256BitVector())
RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
else if (RegVT.is128BitVector())
RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
RC = &X86::VR64RegClass;
else if (RegVT == MVT::v1i1)
RC = &X86::VK1RegClass;
else if (RegVT == MVT::v8i1)
RC = &X86::VK8RegClass;
else if (RegVT == MVT::v16i1)
RC = &X86::VK16RegClass;
else if (RegVT == MVT::v32i1)
RC = &X86::VK32RegClass;
else if (RegVT == MVT::v64i1)
RC = &X86::VK64RegClass;
else
llvm_unreachable("Unknown argument type!");
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
}
// If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the
// right size.
if (VA.getLocInfo() == CCValAssign::SExt)
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::ZExt)
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::BCvt)
ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
if (VA.isExtInLoc()) {
// Handle MMX values passed in XMM regs.
if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
else if (VA.getValVT().isVector() &&
VA.getValVT().getScalarType() == MVT::i1 &&
((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
(VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
// Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
} else
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
}
} else {
assert(VA.isMemLoc());
ArgValue =
LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
}
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
ArgValue =
DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
InVals.push_back(ArgValue);
}
for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
// Swift calling convention does not require we copy the sret argument
// into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
if (CallConv == CallingConv::Swift)
continue;
// All x86 ABIs require that for returning structs by value we copy the
// sret argument into %rax/%eax (depending on ABI) for the return. Save
// the argument into a virtual register so that we can access it from the
// return points.
if (Ins[I].Flags.isSRet()) {
unsigned Reg = FuncInfo->getSRetReturnReg();
if (!Reg) {
MVT PtrTy = getPointerTy(DAG.getDataLayout());
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
FuncInfo->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
break;
}
}
unsigned StackSize = CCInfo.getNextStackOffset();
// Align stack specially for tail calls.
if (shouldGuaranteeTCO(CallConv,
MF.getTarget().Options.GuaranteedTailCallOpt))
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start. We
// can skip this if there are no va_start calls.
if (MFI.hasVAStart() &&
(Is64Bit || (CallConv != CallingConv::X86_FastCall &&
CallConv != CallingConv::X86_ThisCall))) {
FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
}
// Figure out if XMM registers are in use.
assert(!(Subtarget.useSoftFloat() &&
F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
// have to do extra work to spill them in the prologue.
if (Is64Bit && isVarArg && MFI.hasVAStart()) {
// Find the first unallocated argument registers.
ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
// Gather all the live in physical registers.
SmallVector<SDValue, 6> LiveGPRs;
SmallVector<SDValue, 8> LiveXMMRegs;
SDValue ALVal;
for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
LiveGPRs.push_back(
DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
}
if (!ArgXMMs.empty()) {
unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
LiveXMMRegs.push_back(
DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
}
}
if (IsWin64) {
// Get to the caller-allocated home save location. Add 8 to account
// for the return address.
int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
FuncInfo->setRegSaveFrameIndex(
MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
// Fixup to set vararg frame on shadow area (4 x i64).
if (NumIntRegs < 4)
FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
} else {
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so
// they may be loaded by dereferencing the result of va_next.
FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
}
// Store the integer parameter registers.
SmallVector<SDValue, 8> MemOps;
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
getPointerTy(DAG.getDataLayout()));
unsigned Offset = FuncInfo->getVarArgsGPOffset();
for (SDValue Val : LiveGPRs) {
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
RSFIN, DAG.getIntPtrConstant(Offset, dl));
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(),
FuncInfo->getRegSaveFrameIndex(), Offset));
MemOps.push_back(Store);
Offset += 8;
}
if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
// Now store the XMM (fp + vector) parameter registers.
SmallVector<SDValue, 12> SaveXMMOps;
SaveXMMOps.push_back(Chain);
SaveXMMOps.push_back(ALVal);
SaveXMMOps.push_back(DAG.getIntPtrConstant(
FuncInfo->getRegSaveFrameIndex(), dl));
SaveXMMOps.push_back(DAG.getIntPtrConstant(
FuncInfo->getVarArgsFPOffset(), dl));
SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
LiveXMMRegs.end());
MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
MVT::Other, SaveXMMOps));
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
}
if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
// Find the largest legal vector type.
MVT VecVT = MVT::Other;
// FIXME: Only some x86_32 calling conventions support AVX512.
if (Subtarget.hasAVX512() &&
(Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
CallConv == CallingConv::Intel_OCL_BI)))
VecVT = MVT::v16f32;
else if (Subtarget.hasAVX())
VecVT = MVT::v8f32;
else if (Subtarget.hasSSE2())
VecVT = MVT::v4f32;
// We forward some GPRs and some vector types.
SmallVector<MVT, 2> RegParmTypes;
MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
RegParmTypes.push_back(IntVT);
if (VecVT != MVT::Other)
RegParmTypes.push_back(VecVT);
// Compute the set of forwarded registers. The rest are scratch.
SmallVectorImpl<ForwardedRegister> &Forwards =
FuncInfo->getForwardedMustTailRegParms();
CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
// Conservatively forward AL on x86_64, since it might be used for varargs.
if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
}
// Copy all forwards from physical to virtual registers.
for (ForwardedRegister &F : Forwards) {
// FIXME: Can we use a less constrained schedule?
SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
}
}
// Some CCs need callee pop.
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt)) {
FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
} else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
// X86 interrupts must pop the error code (and the alignment padding) if
// present.
FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
!Subtarget.getTargetTriple().isOSMSVCRT() &&
argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
if (!Is64Bit) {
// RegSaveFrameIndex is X86-64 only.
FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
if (CallConv == CallingConv::X86_FastCall ||
CallConv == CallingConv::X86_ThisCall)
// fastcc functions can't have varargs.
FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
}
FuncInfo->setArgumentStackSize(StackSize);
if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
assert(Is64Bit);
// TODO: Add a mechanism to frame lowering that will allow us to indicate
// that we'd prefer this slot be allocated towards the bottom of the frame
// (i.e. near the stack pointer after allocating the frame). Every
// funclet needs a copy of this slot in its (mostly empty) frame, and the
// offset from the bottom of this and each funclet's frame must be the
// same, so the size of funclets' (mostly empty) frames is dictated by
// how far this slot is from the bottom (since they allocate just enough
// space to accommodate holding this slot at the correct offset).
int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
EHInfo->PSPSymFrameIdx = PSPSymFI;
}
}
if (CallConv == CallingConv::X86_RegCall ||
F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
MRI.disableCalleeSavedRegister(Pair.first);
}
return Chain;
}
SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
SDValue Arg, const SDLoc &dl,
SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
StackPtr, PtrOff);
if (Flags.isByVal())
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
return DAG.getStore(
Chain, dl, Arg, PtrOff,
MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}
/// Emit a load of return address if tail call
/// optimization is performed and it is required.
SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
bool Is64Bit, int FPDiff, const SDLoc &dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy(DAG.getDataLayout());
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
return SDValue(OutRetAddr.getNode(), 1);
}
/// Emit a store of the return address if tail call
/// optimization is performed and it is required (FPDiff!=0).
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx,
EVT PtrVT, unsigned SlotSize,
int FPDiff, const SDLoc &dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
int NewReturnAddrFI =
MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
false);
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), NewReturnAddrFI));
return Chain;
}
/// Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
Mask.push_back(NumElems);
for (unsigned i = 1; i != NumElems; ++i)
Mask.push_back(i);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
SDValue
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
CallingConv::ID CallConv = CLI.CallConv;
bool &isTailCall = CLI.IsTailCall;
bool isVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget.is64Bit();
bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
(Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
bool HasNoCfCheck =
(CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
const Module *M = MF.getMMI().getModule();
Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
if (CallConv == CallingConv::X86_INTR)
report_fatal_error("X86 interrupts may not be called directly");
if (Attr.getValueAsString() == "true")
isTailCall = false;
if (Subtarget.isPICStyleGOT() &&
!MF.getTarget().Options.GuaranteedTailCallOpt) {
// If we are using a GOT, disable tail calls to external symbols with
// default visibility. Tail calling such a symbol requires using a GOT
// relocation, which forces early binding of the symbol. This breaks code
// that require lazy function symbol resolution. Using musttail or
// GuaranteedTailCallOpt will override this.
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
if (!G || (!G->getGlobal()->hasLocalLinkage() &&
G->getGlobal()->hasDefaultVisibility()))
isTailCall = false;
}
bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
if (IsMustTail) {
// Force this to be a tail call. The verifier rules are enough to ensure
// that we can lower this successfully without moving the return address
// around.
isTailCall = true;
} else if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
MF.getFunction().hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
IsSibcall = true;
if (isTailCall)
++NumTailCalls;
}
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeArguments(Outs, CC_X86);
// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
}
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
if (IsSibcall)
// This is a sibcall. The memory operands are available in caller's
// own caller's stack.
NumBytes = 0;
else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
canGuaranteeTCO(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
if (isTailCall && !IsSibcall && !IsMustTail) {
// Lower arguments at fp - stackoffset + fpdiff.
unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
FPDiff = NumBytesCallerPushed - NumBytes;
// Set the delta of movement of the returnaddr stackslot.
// But only set if delta is greater than previous delta.
if (FPDiff < X86Info->getTCReturnAddrDelta())
X86Info->setTCReturnAddrDelta(FPDiff);
}
unsigned NumBytesToPush = NumBytes;
unsigned NumBytesToPop = NumBytes;
// If we have an inalloca argument, all stack space has already been allocated
// for us and be right at the top of the stack. We don't support multiple
// arguments passed in memory when using inalloca.
if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
NumBytesToPush = 0;
if (!ArgLocs.back().isMemLoc())
report_fatal_error("cannot use inalloca attribute on a register "
"parameter");
if (ArgLocs.back().getLocMemOffset() != 0)
report_fatal_error("any parameter with the inalloca attribute must be "
"the only memory argument");
}
if (!IsSibcall)
Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
NumBytes - NumBytesToPush, dl);
SDValue RetAddrFrIdx;
// Load return address for tail calls.
if (isTailCall && FPDiff)
Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
Is64Bit, FPDiff, dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
SDValue StackPtr;
// The next loop assumes that the locations are in the same order of the
// input arguments.
assert(isSortedByValueNo(ArgLocs) &&
"Argument Location list must be sorted before lowering");
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
++I, ++OutIndex) {
assert(OutIndex < Outs.size() && "Invalid Out index");
// Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
if (Flags.isInAlloca())
continue;
CCValAssign &VA = ArgLocs[I];
EVT RegVT = VA.getLocVT();
SDValue Arg = OutVals[OutIndex];
bool isByVal = Flags.isByVal();
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::AExt:
if (Arg.getValueType().isVector() &&
Arg.getValueType().getVectorElementType() == MVT::i1)
Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
else if (RegVT.is128BitVector()) {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getBitcast(MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
} else
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::BCvt:
Arg = DAG.getBitcast(RegVT, Arg);
break;
case CCValAssign::Indirect: {
if (isByVal) {
// Memcpy the argument to a temporary stack slot to prevent
// the caller from seeing any modifications the callee may make
// as guaranteed by the `byval` attribute.
int FrameIdx = MF.getFrameInfo().CreateStackObject(
Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
false);
SDValue StackSlot =
DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
Chain =
CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
// From now on treat this as a regular pointer
Arg = StackSlot;
isByVal = false;
} else {
// Store the argument.
SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Chain = DAG.getStore(
Chain, dl, Arg, SpillSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
Arg = SpillSlot;
}
break;
}
}
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
// Split v64i1 value into two registers
Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I],
Subtarget);
} else if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
if (isVarArg && IsWin64) {
// Win64 ABI requires argument XMM reg to be copied to the corresponding
// shadow reg if callee is a varargs function.
unsigned ShadowReg = 0;
switch (VA.getLocReg()) {
case X86::XMM0: ShadowReg = X86::RCX; break;
case X86::XMM1: ShadowReg = X86::RDX; break;
case X86::XMM2: ShadowReg = X86::R8; break;
case X86::XMM3: ShadowReg = X86::R9; break;
}
if (ShadowReg)
RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
}
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
getPointerTy(DAG.getDataLayout()));
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
dl, DAG, VA, Flags));
}
}
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (Subtarget.isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (!isTailCall) {
RegsToPass.push_back(std::make_pair(
unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
getPointerTy(DAG.getDataLayout()))));
} else {
// If we are tail calling and generating PIC/GOT style code load the
// address of the callee into ECX. The value in ecx is used as target of
// the tail jump. This is done to circumvent the ebx/callee-saved problem
// for tail calls on PIC/GOT architectures. Normally we would just put the
// address of GOT into ebx and then call target@PLT. But for tail calls
// ebx would be restored (since ebx is callee saved) before jumping to the
// target@PLT.
// Note: The actual moving to ECX is done further down.
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
if (G && !G->getGlobal()->hasLocalLinkage() &&
G->getGlobal()->hasDefaultVisibility())
Callee = LowerGlobalAddress(Callee, DAG);
else if (isa<ExternalSymbolSDNode>(Callee))
Callee = LowerExternalSymbol(Callee, DAG);
}
}
if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
// the declaration) %al is used as hidden argument to specify the number
// of SSE registers used. The contents of %al do not need to match exactly
// the number of registers, but must be an ubound on the number of SSE
// registers used and is in the range 0 - 8 inclusive.
// Count the number of XMM registers allocated.
static const MCPhysReg XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
assert((Subtarget.hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
DAG.getConstant(NumXMMRegs, dl,
MVT::i8)));
}
if (isVarArg && IsMustTail) {
const auto &Forwards = X86Info->getForwardedMustTailRegParms();
for (const auto &F : Forwards) {
SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
}
}
// For tail calls lower the arguments to the 'real' stack slots. Sibcalls
// don't need this because the eligibility check rejects calls that require
// shuffling arguments passed in memory.
if (!IsSibcall && isTailCall) {
// Force all the incoming stack arguments to be loaded from the stack
// before any new outgoing arguments are stored to the stack, because the
// outgoing stack slots may alias the incoming argument stack slots, and
// the alias isn't otherwise explicit. This is slightly more conservative
// than necessary, because it means that each store effectively depends
// on every argument instead of just those arguments it would clobber.
SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN;
int FI = 0;
for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
++I, ++OutsIndex) {
CCValAssign &VA = ArgLocs[I];
if (VA.isRegLoc()) {
if (VA.needsCustom()) {
assert((CallConv == CallingConv::X86_RegCall) &&
"Expecting custom case only in regcall calling convention");
// This means that we are in special case where one argument was
// passed through two register locations - Skip the next location
++I;
}
continue;
}
assert(VA.isMemLoc());
SDValue Arg = OutVals[OutsIndex];
ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
// Skip inalloca arguments. They don't require any work.
if (Flags.isInAlloca())
continue;
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
if (Flags.isByVal()) {
// Copy relative to framepointer.
SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
getPointerTy(DAG.getDataLayout()));
Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
StackPtr, Source);
MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
ArgChain,
Flags, DAG, dl));
} else {
// Store relative to framepointer.
MemOpChains2.push_back(DAG.getStore(
ArgChain, dl, Arg, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
if (!MemOpChains2.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
getPointerTy(DAG.getDataLayout()),
RegInfo->getSlotSize(), FPDiff, dl);
}
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into registers.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
// through a register, since the call instruction's 32-bit
// pc-relative offset may not be large enough to hold the whole
// address.
} else if (Callee->getOpcode() == ISD::GlobalAddress) {
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
// it.
GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportStorageClass()) {
unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
Callee = DAG.getTargetGlobalAddress(
GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
if (OpFlags == X86II::MO_GOTPCREL) {
// Add a wrapper.
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
getPointerTy(DAG.getDataLayout()), Callee);
// Add extra indirection
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlags =
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
Callee = DAG.getTargetExternalSymbol(
S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
if (OpFlags == X86II::MO_GOTPCREL) {
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
getPointerTy(DAG.getDataLayout()), Callee);
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
} else if (Subtarget.isTarget64BitILP32() &&
Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
}
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
if (!IsSibcall && isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getIntPtrConstant(NumBytesToPop, dl, true),
DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
Ops.push_back(Chain);
Ops.push_back(Callee);
if (isTailCall)
Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
// Add argument registers to the end of the list so that they are known live
// into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
// If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
// set X86_INTR calling convention because it has the same CSR mask
// (same preserved registers).
const uint32_t *Mask = RegInfo->getCallPreservedMask(
MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
// If this is an invoke in a 32-bit function using a funclet-based
// personality, assume the function clobbers all registers. If an exception
// is thrown, the runtime will not restore CSRs.
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
CallerFn.hasPersonalityFn()
? classifyEHPersonality(CallerFn.getPersonalityFn())
: EHPersonality::Unknown;
if (isFuncletEHPersonality(Pers))
Mask = RegInfo->getNoPreservedMask();
}
// Define a new register mask from the existing mask.
uint32_t *RegMask = nullptr;
// In some calling conventions we need to remove the used physical registers
// from the reg mask.
if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
// Allocate a new Reg Mask and copy Mask.
RegMask = MF.allocateRegMask();
unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
// Make sure all sub registers of the argument registers are reset
// in the RegMask.
for (auto const &RegPair : RegsToPass)
for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
// Create the RegMask Operand according to our updated mask.
Ops.push_back(DAG.getRegisterMask(RegMask));
} else {
// Create the RegMask Operand according to the static mask.
Ops.push_back(DAG.getRegisterMask(Mask));
}
if (InFlag.getNode())
Ops.push_back(InFlag);
if (isTailCall) {
// We used to do:
//// If this is the first return lowered for this function, add the regs
//// to the liveout set for the function.
// This isn't right, although it's probably harmless on x86; liveouts
// should be computed from returns not tail calls. Consider a void
// function making a tail call to a function returning int.
MF.getFrameInfo().setHasTailCall();
return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
}
if (HasNoCfCheck && IsCFProtectionSupported) {
Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
} else {
Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
}
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPop;
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
DAG.getTarget().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPop = NumBytes; // Callee pops everything
else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
!Subtarget.getTargetTriple().isOSMSVCRT() &&
SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
// For MSVC Win32 targets, the caller pops the hidden struct pointer.
NumBytesForCalleeToPop = 4;
else
NumBytesForCalleeToPop = 0; // Callee pops nothing.
if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
// No need to reset the stack after the call if the call doesn't return. To
// make the MI verify, we'll pretend the callee does it for us.
NumBytesForCalleeToPop = NumBytes;
}
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getIntPtrConstant(NumBytesToPop, dl, true),
DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
true),
InFlag, dl);
InFlag = Chain.getValue(1);
}
// Handle result values, copying them out of physregs into vregs that we
// return.
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
InVals, RegMask);
}
//===----------------------------------------------------------------------===//
// Fast Calling Convention (tail call) implementation
//===----------------------------------------------------------------------===//
// Like std call, callee cleans arguments, convention except that ECX is
// reserved for storing the tail called function address. Only 2 registers are
// free for argument passing (inreg). Tail call optimization is performed
// provided:
// * tailcallopt is enabled
// * caller/callee are fastcc
// On X86_64 architecture with GOT-style position independent code only local
// (within module) calls are supported at the moment.
// To keep the stack aligned according to platform abi the function
// GetAlignedArgumentStackSize ensures that argument delta is always multiples
// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
// If a tail called function callee has more arguments than the caller the
// caller needs to make sure that there is room to move the RETADDR to. This is
// achieved by reserving an area the size of the argument delta right after the
// original RETADDR, but before the saved framepointer or the spilled registers
// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
// stack layout:
// arg1
// arg2
// RETADDR
// [ new RETADDR
// move area ]
// (possible EBP)
// ESI
// EDI
// local1 ..
/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
/// requirement.
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
unsigned SlotSize = RegInfo->getSlotSize();
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
// Number smaller than 12 so just add the difference.
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
} else {
// Mask out lower bits, add stackalignment once plus the 12 bytes.
Offset = ((~AlignMask) & Offset) + StackAlignment +
(StackAlignment-SlotSize);
}
return Offset;
}
/// Return true if the given stack call argument is already available in the
/// same position (relatively) of the caller's incoming argument stack.
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
const X86InstrInfo *TII, const CCValAssign &VA) {
unsigned Bytes = Arg.getValueSizeInBits() / 8;
for (;;) {
// Look through nodes that don't alter the bits of the incoming value.
unsigned Op = Arg.getOpcode();
if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
Arg = Arg.getOperand(0);
continue;
}
if (Op == ISD::TRUNCATE) {
const SDValue &TruncInput = Arg.getOperand(0);
if (TruncInput.getOpcode() == ISD::AssertZext &&
cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
Arg.getValueType()) {
Arg = TruncInput.getOperand(0);
continue;
}
}
break;
}
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
return false;
if (!Flags.isByVal()) {
if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
unsigned Opcode = Def->getOpcode();
if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
Opcode == X86::LEA64_32r) &&
Def->getOperand(1).isFI()) {
FI = Def->getOperand(1).getIndex();
Bytes = Flags.getByValSize();
} else
return false;
}
} else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
if (Flags.isByVal())
// ByVal argument is passed in as a pointer but it's now being
// dereferenced. e.g.
// define @foo(%struct.X* %A) {
// tail call @bar(%struct.X* byval %A)
// }
return false;
SDValue Ptr = Ld->getBasePtr();
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
if (!FINode)
return false;
FI = FINode->getIndex();
} else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
FI = FINode->getIndex();
Bytes = Flags.getByValSize();
} else
return false;
assert(FI != INT_MAX);
if (!MFI.isFixedObjectIndex(FI))
return false;
if (Offset != MFI.getObjectOffset(FI))
return false;
// If this is not byval, check that the argument stack object is immutable.
// inalloca and argument copy elision can create mutable argument stack
// objects. Byval objects can be mutated, but a byval call intends to pass the
// mutated memory.
if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
return false;
if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
// If the argument location is wider than the argument type, check that any
// extension flags match.
if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
Flags.isSExt() != MFI.isObjectSExt(FI)) {
return false;
}
}
return Bytes == MFI.getObjectSize(FI);
}
/// Check whether the call is eligible for tail call optimization. Targets
/// that want to do tail call optimization should implement this function.
bool X86TargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
if (!mayTailCallThisCC(CalleeCC))
return false;
// If -tailcallopt is specified, make fastcc functions tail-callable.
MachineFunction &MF = DAG.getMachineFunction();
const Function &CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
return false;
CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
// Win64 functions have extra shadow space for argument homing. Don't do the
// sibcall if the caller and callee have mismatched expectations for this
// space.
if (IsCalleeWin64 != IsCallerWin64)
return false;
if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
if (canGuaranteeTCO(CalleeCC) && CCMatch)
return true;
return false;
}
// Look for obvious safe cases to perform tail call optimization that do not
// require ABI changes. This is what gcc calls sibcall.
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
if (RegInfo->needsStackRealignment(MF))
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
// return semantics.
if (isCalleeStructRet || isCallerStructRet)
return false;
// Do not sibcall optimize vararg calls unless all arguments are passed via
// registers.
LLVMContext &C = *DAG.getContext();
if (isVarArg && !Outs.empty()) {
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
if (IsCalleeWin64 || IsCallerWin64)
return false;
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
if (!ArgLocs[i].isRegLoc())
return false;
}
// If the call result is in ST0 / ST1, it needs to be popped off the x87
// stack. Therefore, if it's not used by the call it is not safe to optimize
// this into a sibcall.
bool Unused = false;
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
if (!Ins[i].Used) {
Unused = true;
break;
}
}
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
return false;
}
}
// Check that the call results are passed in the same way.
if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
RetCC_X86, RetCC_X86))
return false;
// The callee has to preserve all registers the caller needs to preserve.
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CCMatch) {
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
}
unsigned StackArgsSize = 0;
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
// Allocate shadow area for Win64
if (IsCalleeWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
StackArgsSize = CCInfo.getNextStackOffset();
if (CCInfo.getNextStackOffset()) {
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
MachineFrameInfo &MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
MFI, MRI, TII, VA))
return false;
}
}
}
bool PositionIndependent = isPositionIndependent();
// If the tailcall address may be in a register, then make sure it's
// possible to register allocate for it. In 32-bit, the call address can
// only target EAX, EDX, or ECX since the tail call must be scheduled after
// callee-saved registers are restored. These happen to be the same
// registers used to pass 'inreg' arguments so watch out for those.
if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
!isa<ExternalSymbolSDNode>(Callee)) ||
PositionIndependent)) {
unsigned NumInRegs = 0;
// In PIC we need an extra register to formulate the address computation
// for the callee.
unsigned MaxInRegs = PositionIndependent ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc())
continue;
unsigned Reg = VA.getLocReg();
switch (Reg) {
default: break;
case X86::EAX: case X86::EDX: case X86::ECX:
if (++NumInRegs == MaxInRegs)
return false;
break;
}
}
}
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
return false;
}
bool CalleeWillPop =
X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt);
if (unsigned BytesToPop =
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
// If we have bytes to pop, the callee must pop them.
bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
if (!CalleePopMatches)
return false;
} else if (CalleeWillPop && StackArgsSize > 0) {
// If we don't have bytes to pop, make sure the callee doesn't pop any.
return false;
}
return true;
}
FastISel *
X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const {
return X86::createFastISel(funcInfo, libInfo);
}
//===----------------------------------------------------------------------===//
// Other Lowering Hooks
//===----------------------------------------------------------------------===//
static bool MayFoldLoad(SDValue Op) {
return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
}
static bool MayFoldIntoStore(SDValue Op) {
return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
}
static bool MayFoldIntoZeroExtend(SDValue Op) {
if (Op.hasOneUse()) {
unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
return (ISD::ZERO_EXTEND == Opcode);
}
return false;
}
static bool isTargetShuffle(unsigned Opcode) {
switch(Opcode) {
default: return false;
case X86ISD::BLENDI:
case X86ISD::PSHUFB:
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::SHUFP:
case X86ISD::INSERTPS:
case X86ISD::EXTRQI:
case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
case X86ISD::MOVLHPS:
case X86ISD::MOVHLPS:
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
case X86ISD::VBROADCAST:
case X86ISD::VPERMILPI:
case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
case X86ISD::SHUF128:
case X86ISD::VPERMIL2:
case X86ISD::VPERMI:
case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
case X86ISD::VZEXT_MOVL:
return true;
}
}
static bool isTargetShuffleVariableMask(unsigned Opcode) {
switch (Opcode) {
default: return false;
// Target Shuffles.
case X86ISD::PSHUFB:
case X86ISD::VPERMILPV:
case X86ISD::VPERMIL2:
case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
return true;
// 'Faux' Target Shuffles.
case ISD::OR:
case ISD::AND:
case X86ISD::ANDNP:
return true;
}
}
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
-(int64_t)SlotSize,
false);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
}
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement) {
// Offset should fit into 32 bit immediate field.
if (!isInt<32>(Offset))
return false;
// If we don't have a symbolic displacement - we don't have any extra
// restrictions.
if (!hasSymbolicDisplacement)
return true;
// FIXME: Some tweaks might be needed for medium code model.
if (M != CodeModel::Small && M != CodeModel::Kernel)
return false;
// For small code model we assume that latest object is 16MB before end of 31
// bits boundary. We may also accept pretty large negative constants knowing
// that all objects are in the positive half of address space.
if (M == CodeModel::Small && Offset < 16*1024*1024)
return true;
// For kernel code model we know that all object resist in the negative half
// of 32bits address space. We may not accept negative offsets, since they may
// be just off and we may accept pretty large positive ones.
if (M == CodeModel::Kernel && Offset >= 0)
return true;
return false;
}
/// Determines whether the callee is required to pop its own arguments.
/// Callee pop is necessary to support tail calls.
bool X86::isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
// If GuaranteeTCO is true, we force some calls to be callee pop so that we
// can guarantee TCO.
if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
return true;
switch (CallingConv) {
default:
return false;
case CallingConv::X86_StdCall:
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
case CallingConv::X86_VectorCall:
return !is64Bit;
}
}
/// Return true if the condition is an unsigned comparison operation.
static bool isX86CCUnsigned(unsigned X86CC) {
switch (X86CC) {
default:
llvm_unreachable("Invalid integer condition!");
case X86::COND_E:
case X86::COND_NE:
case X86::COND_B:
case X86::COND_A:
case X86::COND_BE:
case X86::COND_AE:
return true;
case X86::COND_G:
case X86::COND_GE:
case X86::COND_L:
case X86::COND_LE:
return false;
}
}
static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
switch (SetCCOpcode) {
default: llvm_unreachable("Invalid integer condition!");
case ISD::SETEQ: return X86::COND_E;
case ISD::SETGT: return X86::COND_G;
case ISD::SETGE: return X86::COND_GE;
case ISD::SETLT: return X86::COND_L;
case ISD::SETLE: return X86::COND_LE;
case ISD::SETNE: return X86::COND_NE;
case ISD::SETULT: return X86::COND_B;
case ISD::SETUGT: return X86::COND_A;
case ISD::SETULE: return X86::COND_BE;
case ISD::SETUGE: return X86::COND_AE;
}
}
/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
bool isFP, SDValue &LHS, SDValue &RHS,
SelectionDAG &DAG) {
if (!isFP) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
// X > -1 -> X == 0, jump !sign.
RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_NS;
}
if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
// X < 0 -> X == 0, jump on sign.
return X86::COND_S;
}
if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
// X < 1 -> X <= 0
RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_LE;
}
}
return TranslateIntegerX86CC(SetCCOpcode);
}
// First determine if it is required or is profitable to flip the operands.
// If LHS is a foldable load, but RHS is not, flip the condition.
if (ISD::isNON_EXTLoad(LHS.getNode()) &&
!ISD::isNON_EXTLoad(RHS.getNode())) {
SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
std::swap(LHS, RHS);
}
switch (SetCCOpcode) {
default: break;
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETUGT:
case ISD::SETUGE:
std::swap(LHS, RHS);
break;
}
// On a floating point condition, the flags are set as follows:
// ZF PF CF op
// 0 | 0 | 0 | X > Y
// 0 | 0 | 1 | X < Y
// 1 | 0 | 0 | X == Y
// 1 | 1 | 1 | unordered
switch (SetCCOpcode) {
default: llvm_unreachable("Condcode should be pre-legalized away");
case ISD::SETUEQ:
case ISD::SETEQ: return X86::COND_E;
case ISD::SETOLT: // flipped
case ISD::SETOGT:
case ISD::SETGT: return X86::COND_A;
case ISD::SETOLE: // flipped
case ISD::SETOGE:
case ISD::SETGE: return X86::COND_AE;
case ISD::SETUGT: // flipped
case ISD::SETULT:
case ISD::SETLT: return X86::COND_B;
case ISD::SETUGE: // flipped
case ISD::SETULE:
case ISD::SETLE: return X86::COND_BE;
case ISD::SETONE:
case ISD::SETNE: return X86::COND_NE;
case ISD::SETUO: return X86::COND_P;
case ISD::SETO: return X86::COND_NP;
case ISD::SETOEQ:
case ISD::SETUNE: return X86::COND_INVALID;
}
}
/// Is there a floating point cmov for the specific X86 condition code?
/// Current x86 isa includes the following FP cmov instructions:
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
static bool hasFPCMov(unsigned X86CC) {
switch (X86CC) {
default:
return false;
case X86::COND_B:
case X86::COND_BE:
case X86::COND_E:
case X86::COND_P:
case X86::COND_A:
case X86::COND_AE:
case X86::COND_NE:
case X86::COND_NP:
return true;
}
}
bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const {
const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
if (!IntrData)
return false;
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.flags = MachineMemOperand::MONone;
Info.offset = 0;
switch (IntrData->Type) {
case TRUNCATE_TO_MEM_VI8:
case TRUNCATE_TO_MEM_VI16:
case TRUNCATE_TO_MEM_VI32: {
Info.ptrVal = I.getArgOperand(0);
MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
ScalarVT = MVT::i8;
else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
ScalarVT = MVT::i16;
else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
ScalarVT = MVT::i32;
Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
Info.align = 1;
Info.flags |= MachineMemOperand::MOStore;
break;
}
default:
return false;
}
return true;
}
/// Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
return true;
}
return false;
}
bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
ISD::LoadExtType ExtTy,
EVT NewVT) const {
// "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
// relocation target a movq or addq instruction: don't let the load shrink.
SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
return true;
}
/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
if (BitSize == 0 || BitSize > 64)
return false;
return true;
}
bool X86TargetLowering::reduceSelectOfFPConstantLoads(bool IsFPSetCC) const {
// If we are using XMM registers in the ABI and the condition of the select is
// a floating-point compare and we have blendv or conditional move, then it is
// cheaper to select instead of doing a cross-register move and creating a
// load that depends on the compare result.
return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
}
bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
// TODO: It might be a win to ease or lift this restriction, but the generic
// folds in DAGCombiner conflict with vector folds for an AVX512 target.
if (VT.isVector() && Subtarget.hasAVX512())
return false;
return true;
}
bool X86TargetLowering::decomposeMulByConstant(EVT VT, SDValue C) const {
// TODO: We handle scalars using custom code, but generic combining could make
// that unnecessary.
APInt MulC;
if (!ISD::isConstantSplatVector(C.getNode(), MulC))
return false;
// If vector multiply is legal, assume that's faster than shl + add/sub.
// TODO: Multiply is a complex op with higher latency and lower througput in
// most implementations, so this check could be loosened based on type
// and/or a CPU attribute.
if (isOperationLegal(ISD::MUL, VT))
return false;
// shl+add, shl+sub, shl+add+neg
return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
(1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
}
bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
bool IsSigned) const {
// f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
}
bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
return false;
// Mask vectors support all subregister combinations and operations that
// extract half of vector.
if (ResVT.getVectorElementType() == MVT::i1)
return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
(Index == ResVT.getVectorNumElements()));
return (Index % ResVT.getVectorNumElements()) == 0;
}
bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
// If the vector op is not supported, try to convert to scalar.
EVT VecVT = VecOp.getValueType();
if (!isOperationLegalOrCustomOrPromote(VecOp.getOpcode(), VecVT))
return true;
// If the vector op is supported, but the scalar op is not, the transform may
// not be worthwhile.
EVT ScalarVT = VecVT.getScalarType();
return isOperationLegalOrCustomOrPromote(VecOp.getOpcode(), ScalarVT);
}
bool X86TargetLowering::isCheapToSpeculateCttz() const {
// Speculate cttz only if we can directly use TZCNT.
return Subtarget.hasBMI();
}
bool X86TargetLowering::isCheapToSpeculateCtlz() const {
// Speculate ctlz only if we can directly use LZCNT.
return Subtarget.hasLZCNT();
}
bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT,
EVT BitcastVT) const {
if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
BitcastVT.getVectorElementType() == MVT::i1)
return false;
if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
return false;
return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT);
}
bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
const SelectionDAG &DAG) const {
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat) {
unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
return (MemVT.getSizeInBits() <= MaxIntSize);
}
return true;
}
bool X86TargetLowering::isCtlzFast() const {
return Subtarget.hasFastLZCNT();
}
bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI) const {
return true;
}
bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
EVT VT = Y.getValueType();
if (VT.isVector())
return false;
if (!Subtarget.hasBMI())
return false;
// There are only 32-bit and 64-bit forms for 'andn'.
if (VT != MVT::i32 && VT != MVT::i64)
return false;
return !isa<ConstantSDNode>(Y);
}
bool X86TargetLowering::hasAndNot(SDValue Y) const {
EVT VT = Y.getValueType();
if (!VT.isVector())
return hasAndNotCompare(Y);
// Vector.
if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
return false;
if (VT == MVT::v4i32)
return true;
return Subtarget.hasSSE2();
}
bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const {
EVT VT = Y.getValueType();
// For vectors, we don't have a preference, but we probably want a mask.
if (VT.isVector())
return false;
// 64-bit shifts on 32-bit targets produce really bad bloated code.
if (VT == MVT::i64 && !Subtarget.is64Bit())
return false;
return true;
}
bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
// Any legal vector type can be splatted more efficiently than
// loading/spilling from memory.
return isTypeLegal(VT);
}
MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
MVT VT = MVT::getIntegerVT(NumBits);
if (isTypeLegal(VT))
return VT;
// PMOVMSKB can handle this.
if (NumBits == 128 && isTypeLegal(MVT::v16i8))
return MVT::v16i8;
// VPMOVMSKB can handle this.
if (NumBits == 256 && isTypeLegal(MVT::v32i8))
return MVT::v32i8;
// TODO: Allow 64-bit type for 32-bit target.
// TODO: 512-bit types should be allowed, but make sure that those
// cases are handled in combineVectorSizedSetCCEquality().
return MVT::INVALID_SIMPLE_VALUE_TYPE;
}
/// Val is the undef sentinel value or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
return ((Val == SM_SentinelUndef) || (Val == CmpVal));
}
/// Val is either the undef or zero sentinel value.
static bool isUndefOrZero(int Val) {
return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is the undef sentinel value.
static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
if (Mask[i] != SM_SentinelUndef)
return false;
return true;
}
/// Return true if Val falls within the specified range (L, H].
static bool isInRange(int Val, int Low, int Hi) {
return (Val >= Low && Val < Hi);
}
/// Return true if the value of any element in Mask falls within the specified
/// range (L, H].
static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
for (int M : Mask)
if (isInRange(M, Low, Hi))
return true;
return false;
}
/// Return true if Val is undef or if its value falls within the
/// specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {
return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
}
/// Return true if every element in Mask is undef or if its value
/// falls within the specified range (L, H].
static bool isUndefOrInRange(ArrayRef<int> Mask,
int Low, int Hi) {
for (int M : Mask)
if (!isUndefOrInRange(M, Low, Hi))
return false;
return true;
}
/// Return true if Val is undef, zero or if its value falls within the
/// specified range (L, H].
static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
}
/// Return true if every element in Mask is undef, zero or if its value
/// falls within the specified range (L, H].
static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
for (int M : Mask)
if (!isUndefOrZeroOrInRange(M, Low, Hi))
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos + Size, falls within the specified
/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size, int Low, int Step = 1) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
if (!isUndefOrEqual(Mask[i], Low))
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size], or is undef or is zero.
static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size, int Low) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is undef or is zero.
static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
if (!isUndefOrZero(Mask[i]))
return false;
return true;
}
/// Helper function to test whether a shuffle mask could be
/// simplified by widening the elements being shuffled.
///
/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
/// leaves it in an unspecified state.
///
/// NOTE: This must handle normal vector shuffle masks and *target* vector
/// shuffle masks. The latter have the special property of a '-2' representing
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
SmallVectorImpl<int> &WidenedMask) {
WidenedMask.assign(Mask.size() / 2, 0);
for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
int M0 = Mask[i];
int M1 = Mask[i + 1];
// If both elements are undef, its trivial.
if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
WidenedMask[i / 2] = SM_SentinelUndef;
continue;
}
// Check for an undef mask and a mask value properly aligned to fit with
// a pair of values. If we find such a case, use the non-undef mask's value.
if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
WidenedMask[i / 2] = M1 / 2;
continue;
}
if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
WidenedMask[i / 2] = M0 / 2;
continue;
}
// When zeroing, we need to spread the zeroing across both lanes to widen.
if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
(M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
WidenedMask[i / 2] = SM_SentinelZero;
continue;
}
return false;
}
// Finally check if the two mask values are adjacent and aligned with
// a pair.
if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
WidenedMask[i / 2] = M0 / 2;
continue;
}
// Otherwise we can't safely widen the elements used in this shuffle.
return false;
}
assert(WidenedMask.size() == Mask.size() / 2 &&
"Incorrect size of mask after widening the elements!");
return true;
}
static bool canWidenShuffleElements(ArrayRef<int> Mask,
const APInt &Zeroable,
SmallVectorImpl<int> &WidenedMask) {
SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
if (TargetMask[i] == SM_SentinelUndef)
continue;
if (Zeroable[i])
TargetMask[i] = SM_SentinelZero;
}
return canWidenShuffleElements(TargetMask, WidenedMask);
}
static bool canWidenShuffleElements(ArrayRef<int> Mask) {
SmallVector<int, 32> WidenedMask;
return canWidenShuffleElements(Mask, WidenedMask);
}
/// Returns true if Elt is a constant zero or a floating point constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
return isNullConstant(Elt) || isNullFPConstant(Elt);
}
// Build a vector of constants.
// Use an UNDEF node if MaskElt == -1.
// Split 64-bit constants in the 32-bit mode.
static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
const SDLoc &dl, bool IsMask = false) {
SmallVector<SDValue, 32> Ops;
bool Split = false;
MVT ConstVecVT = VT;
unsigned NumElts = VT.getVectorNumElements();
bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
Split = true;
}
MVT EltVT = ConstVecVT.getVectorElementType();
for (unsigned i = 0; i < NumElts; ++i) {
bool IsUndef = Values[i] < 0 && IsMask;
SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(Values[i], dl, EltVT);
Ops.push_back(OpNode);
if (Split)
Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(0, dl, EltVT));
}
SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
if (Split)
ConstsNode = DAG.getBitcast(VT, ConstsNode);
return ConstsNode;
}
static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert(Bits.size() == Undefs.getBitWidth() &&
"Unequal constant and undef arrays");
SmallVector<SDValue, 32> Ops;
bool Split = false;
MVT ConstVecVT = VT;
unsigned NumElts = VT.getVectorNumElements();
bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
Split = true;
}
MVT EltVT = ConstVecVT.getVectorElementType();
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Undefs[i]) {
Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
continue;
}
const APInt &V = Bits[i];
assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
if (Split) {
Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
} else if (EltVT == MVT::f32) {
APFloat FV(APFloat::IEEEsingle(), V);
Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
} else if (EltVT == MVT::f64) {
APFloat FV(APFloat::IEEEdouble(), V);
Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
} else {
Ops.push_back(DAG.getConstant(V, dl, EltVT));
}
}
SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
return DAG.getBitcast(VT, ConstsNode);
}
/// Returns a vector of specified type with all zero elements.
static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
SelectionDAG &DAG, const SDLoc &dl) {
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
VT.getVectorElementType() == MVT::i1) &&
"Unexpected vector type");
// Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
// type. This ensures they get CSE'd. But if the integer type is not
// available, use a floating-point +0.0 instead.
SDValue Vec;
if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
} else if (VT.getVectorElementType() == MVT::i1) {
assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
"Unexpected vector type");
Vec = DAG.getConstant(0, dl, VT);
} else {
unsigned Num32BitElts = VT.getSizeInBits() / 32;
Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
}
return DAG.getBitcast(VT, Vec);
}
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
const SDLoc &dl, unsigned vectorWidth) {
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
unsigned Factor = VT.getSizeInBits()/vectorWidth;
EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
VT.getVectorNumElements()/Factor);
// Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// This is the index of the first element of the vectorWidth-bit chunk
// we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
IdxVal &= ~(ElemsPerChunk - 1);
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getBuildVector(ResultVT, dl,
Vec->ops().slice(IdxVal, ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
}
/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert((Vec.getValueType().is256BitVector() ||
Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
return extractSubVector(Vec, IdxVal, DAG, dl, 128);
}
/// Generate a DAG to grab 256-bits from a 512-bit vector.
static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
return extractSubVector(Vec, IdxVal, DAG, dl, 256);
}
static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl,
unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
// Inserting UNDEF is Result
if (Vec.isUndef())
return Result;
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
EVT ResultVT = Result.getValueType();
// Insert the relevant vectorWidth bits.
unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// This is the index of the first element of the vectorWidth-bit chunk
// we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
IdxVal &= ~(ElemsPerChunk - 1);
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
}
/// Generate a DAG to put 128-bits into a vector > 128 bits. This
/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
/// simple superregister reference. Idx is an index in the 128 bits
/// we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}
/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl) {
assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
Vec.getValueType().getScalarType() == VT.getScalarType() &&
"Unsupported vector widening type");
SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
: DAG.getUNDEF(VT);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
DAG.getIntPtrConstant(0, dl));
}
// Helper for splitting operands of an operation to legal target size and
// apply a function on each part.
// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
// The argument Builder is a function that will be applied on each split part:
// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
template <typename F>
SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
F Builder, bool CheckBWI = true) {
assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
unsigned NumSubs = 1;
if ((CheckBWI && Subtarget.useBWIRegs()) ||
(!CheckBWI && Subtarget.useAVX512Regs())) {
if (VT.getSizeInBits() > 512) {
NumSubs = VT.getSizeInBits() / 512;
assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
}
} else if (Subtarget.hasAVX2()) {
if (VT.getSizeInBits() > 256) {
NumSubs = VT.getSizeInBits() / 256;
assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
}
} else {
if (VT.getSizeInBits() > 128) {
NumSubs = VT.getSizeInBits() / 128;
assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
}
}
if (NumSubs == 1)
return Builder(DAG, DL, Ops);
SmallVector<SDValue, 4> Subs;
for (unsigned i = 0; i != NumSubs; ++i) {
SmallVector<SDValue, 2> SubOps;
for (SDValue Op : Ops) {
EVT OpVT = Op.getValueType();
unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
}
Subs.push_back(Builder(DAG, DL, SubOps));
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
}
// Return true if the instruction zeroes the unused upper part of the
// destination and accepts mask.
static bool isMaskedZeroUpperBitsvXi1(unsigned int Opcode) {
switch (Opcode) {
default:
return false;
case X86ISD::CMPM:
case X86ISD::CMPM_RND:
case ISD::SETCC:
return true;
}
}
/// Insert i1-subvector to i1-vector.
static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue SubVec = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
// Inserting undef is a nop. We can just return the original vector.
if (SubVec.isUndef())
return Vec;
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
return Op;
MVT OpVT = Op.getSimpleValueType();
unsigned NumElems = OpVT.getVectorNumElements();
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
// Extend to natively supported kshift.
MVT WideOpVT = OpVT;
if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
// Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
// if necessary.
if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
// May need to promote to a legal type.
Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
SubVec, Idx);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
MVT SubVecVT = SubVec.getSimpleValueType();
unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
assert(IdxVal + SubVecNumElems <= NumElems &&
IdxVal % SubVecVT.getSizeInBits() == 0 &&
"Unexpected index value in INSERT_SUBVECTOR");
SDValue Undef = DAG.getUNDEF(WideOpVT);
if (IdxVal == 0) {
// Zero lower bits of the Vec
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
ZeroIdx);
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
// Merge them together, SubVec should be zero extended.
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
SubVec, ZeroIdx);
Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
Undef, SubVec, ZeroIdx);
if (Vec.isUndef()) {
assert(IdxVal != 0 && "Unexpected index");
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
}
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
assert(IdxVal != 0 && "Unexpected index");
NumElems = WideOpVT.getVectorNumElements();
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(ShiftLeft, dl, MVT::i8));
if (ShiftRight != 0)
SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
DAG.getConstant(ShiftRight, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
}
// Simple case when we put subvector in the upper part
if (IdxVal + SubVecNumElems == NumElems) {
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
if (SubVecNumElems * 2 == NumElems) {
// Special case, use legal zero extending insert_subvector. This allows
// isel to opimitize when bits are known zero.
Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
Vec, ZeroIdx);
} else {
// Otherwise use explicit shifts to zero the bits.
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
Undef, Vec, ZeroIdx);
NumElems = WideOpVT.getVectorNumElements();
SDValue ShiftBits = DAG.getConstant(NumElems - IdxVal, dl, MVT::i8);
Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
}
Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
// Inserting into the middle is more complicated.
NumElems = WideOpVT.getVectorNumElements();
// Widen the vector if needed.
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
// Move the current value of the bit to be replace to the lsbs.
Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
// Xor with the new bit.
Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Op, SubVec);
// Shift to MSB, filling bottom bits with 0.
unsigned ShiftLeft = NumElems - SubVecNumElems;
Op = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Op,
DAG.getConstant(ShiftLeft, dl, MVT::i8));
// Shift to the final position, filling upper bits with 0.
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Op,
DAG.getConstant(ShiftRight, dl, MVT::i8));
// Xor with original vector leaving the new value.
Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Vec, Op);
// Reduce to original width if needed.
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
static SDValue concatSubVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
const SDLoc &dl, unsigned VectorWidth) {
SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, VectorWidth);
return insertSubVector(V, V2, NumElems / 2, DAG, dl, VectorWidth);
}
/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
/// Then bitcast to their original type, ensuring they get CSE'd.
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Expected a 128/256/512-bit vector type");
APInt Ones = APInt::getAllOnesValue(32);
unsigned NumElts = VT.getSizeInBits() / 32;
SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
return DAG.getBitcast(VT, Vec);
}
static SDValue getExtendInVec(bool Signed, const SDLoc &DL, EVT VT, SDValue In,
SelectionDAG &DAG) {
EVT InVT = In.getValueType();
assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
// For 256-bit vectors, we only need the lower (128-bit) input half.
// For 512-bit vectors, we only need the lower input half or quarter.
if (InVT.getSizeInBits() > 128) {
assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
"Expected VTs to be the same size!");
unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
In = extractSubVector(In, 0, DAG, DL,
std::max(128U, VT.getSizeInBits() / Scale));
InVT = In.getValueType();
}
if (VT.getVectorNumElements() == InVT.getVectorNumElements())
return DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
DL, VT, In);
return DAG.getNode(Signed ? ISD::SIGN_EXTEND_VECTOR_INREG
: ISD::ZERO_EXTEND_VECTOR_INREG,
DL, VT, In);
}
/// Returns a vector_shuffle node for an unpackl operation.
static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2) {
SmallVector<int, 8> Mask;
createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Returns a vector_shuffle node for an unpackh operation.
static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2) {
SmallVector<int, 8> Mask;
createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
bool IsZero,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = V2.getSimpleValueType();
SDValue V1 = IsZero
? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
int NumElems = VT.getVectorNumElements();
SmallVector<int, 16> MaskVec(NumElems);
for (int i = 0; i != NumElems; ++i)
// If this is the insertion idx, put the low elt of V2 here.
MaskVec[i] = (i == Idx) ? NumElems : i;
return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
}
// Peek through EXTRACT_SUBVECTORs - typically used for AVX1 256-bit intops.
static SDValue peekThroughEXTRACT_SUBVECTORs(SDValue V) {
while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
V = V.getOperand(0);
return V;
}
static const Constant *getTargetConstantFromNode(SDValue Op) {
Op = peekThroughBitcasts(Op);
auto *Load = dyn_cast<LoadSDNode>(Op);
if (!Load)
return nullptr;
SDValue Ptr = Load->getBasePtr();
if (Ptr->getOpcode() == X86ISD::Wrapper ||
Ptr->getOpcode() == X86ISD::WrapperRIP)
Ptr = Ptr->getOperand(0);
auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
return nullptr;
return CNode->getConstVal();
}
// Extract raw constant bits from constant pools.
static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
APInt &UndefElts,
SmallVectorImpl<APInt> &EltBits,
bool AllowWholeUndefs = true,
bool AllowPartialUndefs = true) {
assert(EltBits.empty() && "Expected an empty EltBits vector");
Op = peekThroughBitcasts(Op);
EVT VT = Op.getValueType();
unsigned SizeInBits = VT.getSizeInBits();
assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
unsigned NumElts = SizeInBits / EltSizeInBits;
// Bitcast a source array of element bits to the target size.
auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
unsigned NumSrcElts = UndefSrcElts.getBitWidth();
unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
"Constant bit sizes don't match");
// Don't split if we don't allow undef bits.
bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
if (UndefSrcElts.getBoolValue() && !AllowUndefs)
return false;
// If we're already the right size, don't bother bitcasting.
if (NumSrcElts == NumElts) {
UndefElts = UndefSrcElts;
EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
return true;
}
// Extract all the undef/constant element data and pack into single bitsets.
APInt UndefBits(SizeInBits, 0);
APInt MaskBits(SizeInBits, 0);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned BitOffset = i * SrcEltSizeInBits;
if (UndefSrcElts[i])
UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
MaskBits.insertBits(SrcEltBits[i], BitOffset);
}
// Split the undef/constant single bitset data into the target elements.
UndefElts = APInt(NumElts, 0);
EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
for (unsigned i = 0; i != NumElts; ++i) {
unsigned BitOffset = i * EltSizeInBits;
APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
// Only treat an element as UNDEF if all bits are UNDEF.
if (UndefEltBits.isAllOnesValue()) {
if (!AllowWholeUndefs)
return false;
UndefElts.setBit(i);
continue;
}
// If only some bits are UNDEF then treat them as zero (or bail if not
// supported).
if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
return false;
APInt Bits = MaskBits.extractBits(EltSizeInBits, BitOffset);
EltBits[i] = Bits.getZExtValue();
}
return true;
};
// Collect constant bits and insert into mask/undef bit masks.
auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
unsigned UndefBitIndex) {
if (!Cst)
return false;
if (isa<UndefValue>(Cst)) {
Undefs.setBit(UndefBitIndex);
return true;
}
if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
Mask = CInt->getValue();
return true;
}
if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
Mask = CFP->getValueAPF().bitcastToAPInt();
return true;
}
return false;
};
// Handle UNDEFs.
if (Op.isUndef()) {
APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract scalar constant bits.
if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
return CastBitData(UndefSrcElts, SrcEltBits);
}
if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
SmallVector<APInt, 64> SrcEltBits(1, RawBits);
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from build vector.
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
const SDValue &Src = Op.getOperand(i);
if (Src.isUndef()) {
UndefSrcElts.setBit(i);
continue;
}
auto *Cst = cast<ConstantSDNode>(Src);
SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
}
return CastBitData(UndefSrcElts, SrcEltBits);
}
if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
const SDValue &Src = Op.getOperand(i);
if (Src.isUndef()) {
UndefSrcElts.setBit(i);
continue;
}
auto *Cst = cast<ConstantFPSDNode>(Src);
APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
}
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from constant pool vector.
if (auto *Cst = getTargetConstantFromNode(Op)) {
Type *CstTy = Cst->getType();
unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
return false;
unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
for (unsigned i = 0; i != NumSrcElts; ++i)
if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
UndefSrcElts, i))
return false;
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from a broadcasted constant pool scalar.
if (Op.getOpcode() == X86ISD::VBROADCAST &&
EltSizeInBits <= VT.getScalarSizeInBits()) {
if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
if (UndefSrcElts[0])
UndefSrcElts.setBits(0, NumSrcElts);
SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
return CastBitData(UndefSrcElts, SrcEltBits);
}
}
}
// Extract a rematerialized scalar constant insertion.
if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits;
auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from a subvector's source.
if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
isa<ConstantSDNode>(Op.getOperand(1))) {
// TODO - support extract_subvector through bitcasts.
if (EltSizeInBits != VT.getScalarSizeInBits())
return false;
if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
UndefElts, EltBits, AllowWholeUndefs,
AllowPartialUndefs)) {
EVT SrcVT = Op.getOperand(0).getValueType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
unsigned NumSubElts = VT.getVectorNumElements();
unsigned BaseIdx = Op.getConstantOperandVal(1);
UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
if ((BaseIdx + NumSubElts) != NumSrcElts)
EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
if (BaseIdx != 0)
EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
return true;
}
}
// Extract constant bits from shuffle node sources.
if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
// TODO - support shuffle through bitcasts.
if (EltSizeInBits != VT.getScalarSizeInBits())
return false;
ArrayRef<int> Mask = SVN->getMask();
if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
llvm::any_of(Mask, [](int M) { return M < 0; }))
return false;
APInt UndefElts0, UndefElts1;
SmallVector<APInt, 32> EltBits0, EltBits1;
if (isAnyInRange(Mask, 0, NumElts) &&
!getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
UndefElts0, EltBits0, AllowWholeUndefs,
AllowPartialUndefs))
return false;
if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
!getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
UndefElts1, EltBits1, AllowWholeUndefs,
AllowPartialUndefs))
return false;
UndefElts = APInt::getNullValue(NumElts);
for (int i = 0; i != (int)NumElts; ++i) {
int M = Mask[i];
if (M < 0) {
UndefElts.setBit(i);
EltBits.push_back(APInt::getNullValue(EltSizeInBits));
} else if (M < (int)NumElts) {
if (UndefElts0[M])
UndefElts.setBit(i);
EltBits.push_back(EltBits0[M]);
} else {
if (UndefElts1[M - NumElts])
UndefElts.setBit(i);
EltBits.push_back(EltBits1[M - NumElts]);
}
}
return true;
}
return false;
}
static bool isConstantSplat(SDValue Op, APInt &SplatVal) {
APInt UndefElts;
SmallVector<APInt, 16> EltBits;
if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
UndefElts, EltBits, true, false)) {
int SplatIndex = -1;
for (int i = 0, e = EltBits.size(); i != e; ++i) {
if (UndefElts[i])
continue;
if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
SplatIndex = -1;
break;
}
SplatIndex = i;
}
if (0 <= SplatIndex) {
SplatVal = EltBits[SplatIndex];
return true;
}
}
return false;
}
static bool getTargetShuffleMaskIndices(SDValue MaskNode,
unsigned MaskEltSizeInBits,
SmallVectorImpl<uint64_t> &RawMask,
APInt &UndefElts) {
// Extract the raw target constant bits.
SmallVector<APInt, 64> EltBits;
if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
EltBits, /* AllowWholeUndefs */ true,
/* AllowPartialUndefs */ false))
return false;
// Insert the extracted elements into the mask.
for (APInt Elt : EltBits)
RawMask.push_back(Elt.getZExtValue());
return true;
}
/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
/// Note: This ignores saturation, so inputs must be checked first.
static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
bool Unary) {
assert(Mask.empty() && "Expected an empty shuffle mask vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
unsigned Offset = Unary ? 0 : NumElts;
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
Mask.push_back(Elt + (Lane * NumEltsPerLane));
for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
}
}
// Split the demanded elts of a PACKSS/PACKUS node between its operands.
static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
APInt &DemandedLHS, APInt &DemandedRHS) {
int NumLanes = VT.getSizeInBits() / 128;
int NumElts = DemandedElts.getBitWidth();
int NumInnerElts = NumElts / 2;
int NumEltsPerLane = NumElts / NumLanes;
int NumInnerEltsPerLane = NumInnerElts / NumLanes;
DemandedLHS = APInt::getNullValue(NumInnerElts);
DemandedRHS = APInt::getNullValue(NumInnerElts);
// Map DemandedElts to the packed operands.
for (int Lane = 0; Lane != NumLanes; ++Lane) {
for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
int OuterIdx = (Lane * NumEltsPerLane) + Elt;
int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
if (DemandedElts[OuterIdx])
DemandedLHS.setBit(InnerIdx);
if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
DemandedRHS.setBit(InnerIdx);
}
}
}
/// Calculates the shuffle mask corresponding to the target-specific opcode.
/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
/// operands in \p Ops, and returns true.
/// Sets \p IsUnary to true if only one source is used. Note that this will set
/// IsUnary for shuffles which use a single input multiple times, and in those
/// cases it will adjust the mask to only have indices within that single input.
/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
SmallVectorImpl<SDValue> &Ops,
SmallVectorImpl<int> &Mask, bool &IsUnary) {
unsigned NumElems = VT.getVectorNumElements();
unsigned MaskEltSize = VT.getScalarSizeInBits();
SmallVector<uint64_t, 32> RawMask;
APInt RawUndefs;
SDValue ImmN;
assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
IsUnary = false;
bool IsFakeUnary = false;
switch (N->getOpcode()) {
case X86ISD::BLENDI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::SHUFP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodeSHUFPMask(NumElems, MaskEltSize,
cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::INSERTPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::EXTRQI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
if (isa<ConstantSDNode>(N->getOperand(1)) &&
isa<ConstantSDNode>(N->getOperand(2))) {
int BitLen = N->getConstantOperandVal(1);
int BitIdx = N->getConstantOperandVal(2);
DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
IsUnary = true;
}
break;
case X86ISD::INSERTQI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
if (isa<ConstantSDNode>(N->getOperand(2)) &&
isa<ConstantSDNode>(N->getOperand(3))) {
int BitLen = N->getConstantOperandVal(2);
int BitIdx = N->getConstantOperandVal(3);
DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
}
break;
case X86ISD::UNPCKH:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::UNPCKL:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVHLPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeMOVHLPSMask(NumElems, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVLHPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeMOVLHPSMask(NumElems, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::PALIGNR:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
Ops.push_back(N->getOperand(1));
Ops.push_back(N->getOperand(0));
break;
case X86ISD::VSHLDQ:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::VSRLDQ:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSHUFMask(NumElems, MaskEltSize,
cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::PSHUFHW:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::PSHUFLW:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::VZEXT_MOVL:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeZeroMoveLowMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VBROADCAST: {
SDValue N0 = N->getOperand(0);
// See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
// add the pre-extracted value to the Ops vector.
if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N0.getOperand(0).getValueType() == VT &&
N0.getConstantOperandVal(1) == 0)
Ops.push_back(N0.getOperand(0));
// We only decode broadcasts of same-sized vectors, unless the broadcast
// came from an extract from the original width. If we found one, we
// pushed it the Ops vector above.
if (N0.getValueType() == VT || !Ops.empty()) {
DecodeVectorBroadcast(NumElems, Mask);
IsUnary = true;
break;
}
return false;
}
case X86ISD::VPERMILPV: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
RawUndefs)) {
DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
break;
}
return false;
}
case X86ISD::PSHUFB: {
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
DecodePSHUFBMask(RawMask, RawUndefs, Mask);
break;
}
return false;
}
case X86ISD::VPERMI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::MOVSS:
case X86ISD::MOVSD:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
break;
case X86ISD::VPERM2X128:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::SHUF128:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVSLDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVSLDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::MOVSHDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVSHDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::MOVDDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVDDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VPERMIL2: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
SDValue MaskNode = N->getOperand(2);
SDValue CtrlNode = N->getOperand(3);
if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
unsigned CtrlImm = CtrlOp->getZExtValue();
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
RawUndefs)) {
DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
Mask);
break;
}
}
return false;
}
case X86ISD::VPPERM: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
SDValue MaskNode = N->getOperand(2);
if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
DecodeVPPERMMask(RawMask, RawUndefs, Mask);
break;
}
return false;
}
case X86ISD::VPERMV: {
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = true;
// Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
Ops.push_back(N->getOperand(1));
SDValue MaskNode = N->getOperand(0);
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
RawUndefs)) {
DecodeVPERMVMask(RawMask, RawUndefs, Mask);
break;
}
return false;
}
case X86ISD::VPERMV3: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
// Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
Ops.push_back(N->getOperand(0));
Ops.push_back(N->getOperand(2));
SDValue MaskNode = N->getOperand(1);
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
RawUndefs)) {
DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
break;
}
return false;
}
default: llvm_unreachable("unknown target shuffle node");
}
// Empty mask indicates the decode failed.
if (Mask.empty())
return false;
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
// inputs that are actually the same node. Re-map the mask to always point
// into the first input.
if (IsFakeUnary)
for (int &M : Mask)
if (M >= (int)Mask.size())
M -= Mask.size();
// If we didn't already add operands in the opcode-specific code, default to
// adding 1 or 2 operands starting at 0.
if (Ops.empty()) {
Ops.push_back(N->getOperand(0));
if (!IsUnary || IsFakeUnary)
Ops.push_back(N->getOperand(1));
}
return true;
}
/// Check a target shuffle mask's inputs to see if we can set any values to
/// SM_SentinelZero - this is for elements that are known to be zero
/// (not just zeroable) from their inputs.
/// Returns true if the target shuffle mask was decoded.
static bool setTargetShuffleZeroElements(SDValue N,
SmallVectorImpl<int> &Mask,
SmallVectorImpl<SDValue> &Ops) {
bool IsUnary;
if (!isTargetShuffle(N.getOpcode()))
return false;
MVT VT = N.getSimpleValueType();
if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
return false;
SDValue V1 = Ops[0];
SDValue V2 = IsUnary ? V1 : Ops[1];
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
assert((VT.getSizeInBits() % Mask.size()) == 0 &&
"Illegal split of shuffle value type");
unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size();
// Extract known constant input data.
APInt UndefSrcElts[2];
SmallVector<APInt, 32> SrcEltBits[2];
bool IsSrcConstant[2] = {
getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
SrcEltBits[0], true, false),
getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
SrcEltBits[1], true, false)};
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Already decoded as SM_SentinelZero / SM_SentinelUndef.
if (M < 0)
continue;
// Determine shuffle input and normalize the mask.
unsigned SrcIdx = M / Size;
SDValue V = M < Size ? V1 : V2;
M %= Size;
// We are referencing an UNDEF input.
if (V.isUndef()) {
Mask[i] = SM_SentinelUndef;
continue;
}
// SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
// TODO: We currently only set UNDEF for integer types - floats use the same
// registers as vectors and many of the scalar folded loads rely on the
// SCALAR_TO_VECTOR pattern.
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
(Size % V.getValueType().getVectorNumElements()) == 0) {
int Scale = Size / V.getValueType().getVectorNumElements();
int Idx = M / Scale;
if (Idx != 0 && !VT.isFloatingPoint())
Mask[i] = SM_SentinelUndef;
else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
Mask[i] = SM_SentinelZero;
continue;
}
// Attempt to extract from the source's constant bits.
if (IsSrcConstant[SrcIdx]) {
if (UndefSrcElts[SrcIdx][M])
Mask[i] = SM_SentinelUndef;
else if (SrcEltBits[SrcIdx][M] == 0)
Mask[i] = SM_SentinelZero;
}
}
assert(VT.getVectorNumElements() == Mask.size() &&
"Different mask size from vector size!");
return true;
}
// Forward declaration (for getFauxShuffleMask recursive check).
static bool resolveTargetShuffleInputs(SDValue Op,
SmallVectorImpl<SDValue> &Inputs,
SmallVectorImpl<int> &Mask,
const SelectionDAG &DAG);
// Attempt to decode ops that could be represented as a shuffle mask.
// The decoded shuffle mask may contain a different number of elements to the
// destination value type.
static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
SmallVectorImpl<SDValue> &Ops,
const SelectionDAG &DAG) {
Mask.clear();
Ops.clear();
MVT VT = N.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();
unsigned NumSizeInBits = VT.getSizeInBits();
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
assert((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 &&
"Expected byte aligned value types");
unsigned Opcode = N.getOpcode();
switch (Opcode) {
case ISD::VECTOR_SHUFFLE: {
// Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
Mask.append(ShuffleMask.begin(), ShuffleMask.end());
Ops.push_back(N.getOperand(0));
Ops.push_back(N.getOperand(1));
return true;
}
return false;
}
case ISD::AND:
case X86ISD::ANDNP: {
// Attempt to decode as a per-byte mask.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
bool IsAndN = (X86ISD::ANDNP == Opcode);
uint64_t ZeroMask = IsAndN ? 255 : 0;
if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
return false;
for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
if (UndefElts[i]) {
Mask.push_back(SM_SentinelUndef);
continue;
}
uint64_t ByteBits = EltBits[i].getZExtValue();
if (ByteBits != 0 && ByteBits != 255)
return false;
Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
}
Ops.push_back(IsAndN ? N1 : N0);
return true;
}
case ISD::OR: {
// Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
// is a valid shuffle index.
SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
return false;
SmallVector<int, 64> SrcMask0, SrcMask1;
SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
if (!resolveTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG) ||
!resolveTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG))
return false;
int MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
SmallVector<int, 64> Mask0, Mask1;
scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
for (int i = 0; i != MaskSize; ++i) {
if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
Mask.push_back(SM_SentinelUndef);
else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
Mask.push_back(SM_SentinelZero);
else if (Mask1[i] == SM_SentinelZero)
Mask.push_back(Mask0[i]);
else if (Mask0[i] == SM_SentinelZero)
Mask.push_back(Mask1[i] + (MaskSize * SrcInputs0.size()));
else
return false;
}
for (SDValue &Op : SrcInputs0)
Ops.push_back(Op);
for (SDValue &Op : SrcInputs1)
Ops.push_back(Op);
return true;
}
case ISD::INSERT_SUBVECTOR: {
// Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(EXTRACT_SUBVECTOR(SRC1)) where
// SRC0/SRC1 are both of the same valuetype VT.
// TODO - add peekThroughOneUseBitcasts support.
SDValue Src = N.getOperand(0);
SDValue Sub = N.getOperand(1);
EVT SubVT = Sub.getValueType();
unsigned NumSubElts = SubVT.getVectorNumElements();
if (!isa<ConstantSDNode>(N.getOperand(2)) ||
!N->isOnlyUserOf(Sub.getNode()))
return false;
SmallVector<int, 64> SubMask;
SmallVector<SDValue, 2> SubInputs;
if (!resolveTargetShuffleInputs(Sub, SubInputs, SubMask, DAG) ||
SubMask.size() != NumSubElts)
return false;
Ops.push_back(Src);
for (SDValue &SubInput : SubInputs) {
if (SubInput.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
SubInput.getOperand(0).getValueType() != VT ||
!isa<ConstantSDNode>(SubInput.getOperand(1)))
return false;
Ops.push_back(SubInput.getOperand(0));
}
int InsertIdx = N.getConstantOperandVal(2);
for (int i = 0; i != (int)NumElts; ++i)
Mask.push_back(i);
for (int i = 0; i != (int)NumSubElts; ++i) {
int M = SubMask[i];
if (0 <= M) {
int InputIdx = M / NumSubElts;
int ExtractIdx = SubInputs[InputIdx].getConstantOperandVal(1);
M = (NumElts * (1 + InputIdx)) + ExtractIdx + (M % NumSubElts);
}
Mask[i + InsertIdx] = M;
}
return true;
}
case ISD::SCALAR_TO_VECTOR: {
// Match against a scalar_to_vector of an extract from a vector,
// for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
SDValue N0 = N.getOperand(0);
SDValue SrcExtract;
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
N0.getOperand(0).getValueType() == VT) ||
(N0.getOpcode() == X86ISD::PEXTRW &&
N0.getOperand(0).getValueType() == MVT::v8i16) ||
(N0.getOpcode() == X86ISD::PEXTRB &&
N0.getOperand(0).getValueType() == MVT::v16i8)) {
SrcExtract = N0;
}
if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
return false;
SDValue SrcVec = SrcExtract.getOperand(0);
EVT SrcVT = SrcVec.getValueType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
if (NumSrcElts <= SrcIdx)
return false;
Ops.push_back(SrcVec);
Mask.push_back(SrcIdx);
Mask.append(NumZeros, SM_SentinelZero);
Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
return true;
}
case X86ISD::PINSRB:
case X86ISD::PINSRW: {
SDValue InVec = N.getOperand(0);
SDValue InScl = N.getOperand(1);
SDValue InIndex = N.getOperand(2);
if (!isa<ConstantSDNode>(InIndex) ||
cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
return false;
uint64_t InIdx = N.getConstantOperandVal(2);
// Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
if (X86::isZeroNode(InScl)) {
Ops.push_back(InVec);
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
return true;
}
// Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
// TODO: Expand this to support INSERT_VECTOR_ELT/etc.
unsigned ExOp =
(X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
if (InScl.getOpcode() != ExOp)
return false;
SDValue ExVec = InScl.getOperand(0);
SDValue ExIndex = InScl.getOperand(1);
if (!isa<ConstantSDNode>(ExIndex) ||
cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
return false;
uint64_t ExIdx = InScl.getConstantOperandVal(1);
Ops.push_back(InVec);
Ops.push_back(ExVec);
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
return true;
}
case X86ISD::PACKSS:
case X86ISD::PACKUS: {
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
"Unexpected input value type");
// If we know input saturation won't happen we can treat this
// as a truncation shuffle.
if (Opcode == X86ISD::PACKSS) {
if ((!N0.isUndef() && DAG.ComputeNumSignBits(N0) <= NumBitsPerElt) ||
(!N1.isUndef() && DAG.ComputeNumSignBits(N1) <= NumBitsPerElt))
return false;
} else {
APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
if ((!N0.isUndef() && !DAG.MaskedValueIsZero(N0, ZeroMask)) ||
(!N1.isUndef() && !DAG.MaskedValueIsZero(N1, ZeroMask)))
return false;
}
bool IsUnary = (N0 == N1);
Ops.push_back(N0);
if (!IsUnary)
Ops.push_back(N1);
createPackShuffleMask(VT, Mask, IsUnary);
return true;
}
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
uint64_t ShiftVal = N.getConstantOperandVal(1);
// Out of range bit shifts are guaranteed to be zero.
if (NumBitsPerElt <= ShiftVal) {
Mask.append(NumElts, SM_SentinelZero);
return true;
}
// We can only decode 'whole byte' bit shifts as shuffles.
if ((ShiftVal % 8) != 0)
break;
uint64_t ByteShift = ShiftVal / 8;
unsigned NumBytes = NumSizeInBits / 8;
unsigned NumBytesPerElt = NumBitsPerElt / 8;
Ops.push_back(N.getOperand(0));
// Clear mask to all zeros and insert the shifted byte indices.
Mask.append(NumBytes, SM_SentinelZero);
if (X86ISD::VSHLI == Opcode) {
for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
Mask[i + j] = i + j - ByteShift;
} else {
for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
Mask[i + j - ByteShift] = i + j;
}
return true;
}
case ISD::ZERO_EXTEND_VECTOR_INREG:
case ISD::ZERO_EXTEND: {
// TODO - add support for VPMOVZX with smaller input vector types.
SDValue Src = N.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
if (NumSizeInBits != SrcVT.getSizeInBits())
break;
DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
Mask);
Ops.push_back(Src);
return true;
}
}
return false;
}
/// Removes unused shuffle source inputs and adjusts the shuffle mask accordingly.
static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
SmallVectorImpl<int> &Mask) {
int MaskWidth = Mask.size();
SmallVector<SDValue, 16> UsedInputs;
for (int i = 0, e = Inputs.size(); i < e; ++i) {
int lo = UsedInputs.size() * MaskWidth;
int hi = lo + MaskWidth;
// Strip UNDEF input usage.
if (Inputs[i].isUndef())
for (int &M : Mask)
if ((lo <= M) && (M < hi))
M = SM_SentinelUndef;
// Check for unused inputs.
if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
UsedInputs.push_back(Inputs[i]);
continue;
}
for (int &M : Mask)
if (lo <= M)
M -= MaskWidth;
}
Inputs = UsedInputs;
}
/// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
/// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
/// remaining input indices in case we now have a unary shuffle and adjust the
/// inputs accordingly.
/// Returns true if the target shuffle mask was decoded.
static bool resolveTargetShuffleInputs(SDValue Op,
SmallVectorImpl<SDValue> &Inputs,
SmallVectorImpl<int> &Mask,
const SelectionDAG &DAG) {
if (!setTargetShuffleZeroElements(Op, Mask, Inputs))
if (!getFauxShuffleMask(Op, Mask, Inputs, DAG))
return false;
resolveTargetShuffleInputsAndMask(Inputs, Mask);
return true;
}
/// Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
unsigned Depth) {
if (Depth == 6)
return SDValue(); // Limit search depth.
SDValue V = SDValue(N, 0);
EVT VT = V.getValueType();
unsigned Opcode = V.getOpcode();
// Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
int Elt = SV->getMaskElt(Index);
if (Elt < 0)
return DAG.getUNDEF(VT.getVectorElementType());
unsigned NumElems = VT.getVectorNumElements();
SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
: SV->getOperand(1);
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
}
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
MVT ShufVT = V.getSimpleValueType();
MVT ShufSVT = ShufVT.getVectorElementType();
int NumElems = (int)ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
SmallVector<SDValue, 16> ShuffleOps;
bool IsUnary;
if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
return SDValue();
int Elt = ShuffleMask[Index];
if (Elt == SM_SentinelZero)
return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
: DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
if (Elt == SM_SentinelUndef)
return DAG.getUNDEF(ShufSVT);
assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
// Actual nodes that may contain scalar elements
if (Opcode == ISD::BITCAST) {
V = V.getOperand(0);
EVT SrcVT = V.getValueType();
unsigned NumElems = VT.getVectorNumElements();
if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
return SDValue();
}
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
return (Index == 0) ? V.getOperand(0)
: DAG.getUNDEF(VT.getVectorElementType());
if (V.getOpcode() == ISD::BUILD_VECTOR)
return V.getOperand(Index);
return SDValue();
}
// Use PINSRB/PINSRW/PINSRD to create a build vector.
static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();
assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
"Illegal vector insertion");
SDLoc dl(Op);
SDValue V;
bool First = true;
for (unsigned i = 0; i < NumElts; ++i) {
bool IsNonZero = (NonZeros & (1 << i)) != 0;
if (!IsNonZero)
continue;
// If the build vector contains zeros or our first insertion is not the
// first index then insert into zero vector to break any register
// dependency else use SCALAR_TO_VECTOR/VZEXT_MOVL.
if (First) {
First = false;
if (NumZero || 0 != i)
V = getZeroVector(VT, Subtarget, DAG, dl);
else {
assert(0 == i && "Expected insertion into zero-index");
V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
V = DAG.getBitcast(VT, V);
continue;
}
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
DAG.getIntPtrConstant(i, dl));
}
return V;
}
/// Custom lower build_vector of v16i8.
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (NumNonZero > 8 && !Subtarget.hasSSE41())
return SDValue();
// SSE4.1 - use PINSRB to insert each byte directly.
if (Subtarget.hasSSE41())
return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
Subtarget);
SDLoc dl(Op);
SDValue V;
bool First = true;
// Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
for (unsigned i = 0; i < 16; ++i) {
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
if (NumZero)
V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
else
V = DAG.getUNDEF(MVT::v8i16);
First = false;
}
if ((i & 1) != 0) {
// FIXME: Investigate extending to i32 instead of just i16.
// FIXME: Investigate combining the first 4 bytes as a i32 instead.
SDValue ThisElt, LastElt;
bool LastIsNonZero = (NonZeros & (1 << (i - 1))) != 0;
if (LastIsNonZero) {
LastElt =
DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i - 1));
}
if (ThisIsNonZero) {
ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, ThisElt,
DAG.getConstant(8, dl, MVT::i8));
if (LastIsNonZero)
ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
} else
ThisElt = LastElt;
if (ThisElt) {
if (1 == i) {
V = NumZero ? DAG.getZExtOrTrunc(ThisElt, dl, MVT::i32)
: DAG.getAnyExtOrTrunc(ThisElt, dl, MVT::i32);
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
V = DAG.getBitcast(MVT::v8i16, V);
} else {
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
DAG.getIntPtrConstant(i / 2, dl));
}
}
}
}
return DAG.getBitcast(MVT::v16i8, V);
}
/// Custom lower build_vector of v8i16.
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (NumNonZero > 4 && !Subtarget.hasSSE41())
return SDValue();
// Use PINSRW to insert each byte directly.
return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
Subtarget);
}
/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// If this is a splat of a pair of elements, use MOVDDUP (unless the target
// has XOP; in that case defer lowering to potentially use VPERMIL2PS).
// Because we're creating a less complicated build vector here, we may enable
// further folding of the MOVDDUP via shuffle transforms.
if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
Op.getOperand(0) == Op.getOperand(2) &&
Op.getOperand(1) == Op.getOperand(3) &&
Op.getOperand(0) != Op.getOperand(1)) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
// Create a new build vector with the first 2 elements followed by undef
// padding, bitcast to v2f64, duplicate, and bitcast back.
SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
return DAG.getBitcast(VT, Dup);
}
// Find all zeroable elements.
std::bitset<4> Zeroable;
for (int i=0; i < 4; ++i) {
SDValue Elt = Op->getOperand(i);
Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
}
assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
// We only know how to deal with build_vector nodes where elements are either
// zeroable or extract_vector_elt with constant index.
SDValue FirstNonZero;
unsigned FirstNonZeroIdx;
for (unsigned i=0; i < 4; ++i) {
if (Zeroable[i])
continue;
SDValue Elt = Op->getOperand(i);
if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Elt.getOperand(1)))
return SDValue();
// Make sure that this node is extracting from a 128-bit vector.
MVT VT = Elt.getOperand(0).getSimpleValueType();
if (!VT.is128BitVector())
return SDValue();
if (!FirstNonZero.getNode()) {
FirstNonZero = Elt;
FirstNonZeroIdx = i;
}
}
assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
SDValue V1 = FirstNonZero.getOperand(0);
MVT VT = V1.getSimpleValueType();
// See if this build_vector can be lowered as a blend with zero.
SDValue Elt;
unsigned EltMaskIdx, EltIdx;
int Mask[4];
for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
if (Zeroable[EltIdx]) {
// The zero vector will be on the right hand side.
Mask[EltIdx] = EltIdx+4;
continue;
}
Elt = Op->getOperand(EltIdx);
// By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
EltMaskIdx = Elt.getConstantOperandVal(1);
if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
break;
Mask[EltIdx] = EltIdx;
}
if (EltIdx == 4) {
// Let the shuffle legalizer deal with blend operations.
SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
if (V1.getSimpleValueType() != VT)
V1 = DAG.getBitcast(VT, V1);
return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
}
// See if we can lower this build_vector to a INSERTPS.
if (!Subtarget.hasSSE41())
return SDValue();
SDValue V2 = Elt.getOperand(0);
if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
V1 = SDValue();
bool CanFold = true;
for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
if (Zeroable[i])
continue;
SDValue Current = Op->getOperand(i);
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
V1 = SrcVector;
CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
}
if (!CanFold)
return SDValue();
assert(V1.getNode() && "Expected at least two non-zero elements!");
if (V1.getSimpleValueType() != MVT::v4f32)
V1 = DAG.getBitcast(MVT::v4f32, V1);
if (V2.getSimpleValueType() != MVT::v4f32)
V2 = DAG.getBitcast(MVT::v4f32, V2);
// Ok, we can emit an INSERTPS instruction.
unsigned ZMask = Zeroable.to_ulong();
unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
SDLoc DL(Op);
SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getIntPtrConstant(InsertPSMask, DL));
return DAG.getBitcast(VT, Result);
}
/// Return a vector logical shift node.
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
SelectionDAG &DAG, const TargetLowering &TLI,
const SDLoc &dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
MVT ShVT = MVT::v16i8;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getBitcast(ShVT, SrcOp);
assert(NumBits % 8 == 0 && "Only support byte sized shifts");
SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, MVT::i8);
return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
SelectionDAG &DAG) {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
// the shuffle mask.
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
SDValue Ptr = LD->getBasePtr();
if (!ISD::isNormalLoad(LD) || LD->isVolatile())
return SDValue();
EVT PVT = LD->getValueType(0);
if (PVT != MVT::i32 && PVT != MVT::f32)
return SDValue();
int FI = -1;
int64_t Offset = 0;
if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
FI = FINode->getIndex();
Offset = 0;
} else if (DAG.isBaseWithConstantOffset(Ptr) &&
isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
Offset = Ptr.getConstantOperandVal(1);
Ptr = Ptr.getOperand(0);
} else {
return SDValue();
}
// FIXME: 256-bit vector instructions don't require a strict alignment,
// improve this code to support it better.
unsigned RequiredAlign = VT.getSizeInBits()/8;
SDValue Chain = LD->getChain();
// Make sure the stack object alignment is at least 16 or 32.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
if (MFI.isFixedObjectIndex(FI)) {
// Can't change the alignment. FIXME: It's possible to compute
// the exact stack offset and reference FI + adjust offset instead.
// If someone *really* cares about this. That's the way to implement it.
return SDValue();
} else {
MFI.setObjectAlignment(FI, RequiredAlign);
}
}
// (Offset % 16 or 32) must be multiple of 4. Then address is then
// Ptr + (Offset & ~15).
if (Offset < 0)
return SDValue();
if ((Offset % RequiredAlign) & 3)
return SDValue();
int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
if (StartOffset) {
SDLoc DL(Ptr);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
}
int EltNo = (Offset - StartOffset) >> 2;
unsigned NumElems = VT.getVectorNumElements();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(StartOffset));
SmallVector<int, 8> Mask(NumElems, EltNo);
return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
}
return SDValue();
}
/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
bool isAfterLegalize) {
unsigned NumElems = Elts.size();
int LastLoadedElt = -1;
SmallBitVector LoadMask(NumElems, false);
SmallBitVector ZeroMask(NumElems, false);
SmallBitVector UndefMask(NumElems, false);
// For each element in the initializer, see if we've found a load, zero or an
// undef.
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = peekThroughBitcasts(Elts[i]);
if (!Elt.getNode())
return SDValue();
if (Elt.isUndef())
UndefMask[i] = true;
else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
ZeroMask[i] = true;
else if (ISD::isNON_EXTLoad(Elt.getNode())) {
LoadMask[i] = true;
LastLoadedElt = i;
// Each loaded element must be the correct fractional portion of the
// requested vector load.
if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
return SDValue();
} else
return SDValue();
}
assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
"Incomplete element masks");
// Handle Special Cases - all undef or undef/zero.
if (UndefMask.count() == NumElems)
return DAG.getUNDEF(VT);
// FIXME: Should we return this as a BUILD_VECTOR instead?
if ((ZeroMask | UndefMask).count() == NumElems)
return VT.isInteger() ? DAG.getConstant(0, DL, VT)
: DAG.getConstantFP(0.0, DL, VT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
int FirstLoadedElt = LoadMask.find_first();
SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
EVT LDBaseVT = EltBase.getValueType();
// Consecutive loads can contain UNDEFS but not ZERO elements.
// Consecutive loads with UNDEFs and ZEROs elements require a
// an additional shuffle stage to clear the ZERO elements.
bool IsConsecutiveLoad = true;
bool IsConsecutiveLoadWithZeros = true;
for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
if (LoadMask[i]) {
SDValue Elt = peekThroughBitcasts(Elts[i]);
LoadSDNode *LD = cast<LoadSDNode>(Elt);
if (!DAG.areNonVolatileConsecutiveLoads(
LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
i - FirstLoadedElt)) {
IsConsecutiveLoad = false;
IsConsecutiveLoadWithZeros = false;
break;
}
} else if (ZeroMask[i]) {
IsConsecutiveLoad = false;
}
}
SmallVector<LoadSDNode *, 8> Loads;
for (int i = FirstLoadedElt; i <= LastLoadedElt; ++i)
if (LoadMask[i])
Loads.push_back(cast<LoadSDNode>(peekThroughBitcasts(Elts[i])));
auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
auto MMOFlags = LDBase->getMemOperand()->getFlags();
assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
"Cannot merge volatile loads.");
SDValue NewLd =
DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
for (auto *LD : Loads)
DAG.makeEquivalentMemoryOrdering(LD, NewLd);
return NewLd;
};
// LOAD - all consecutive load/undefs (must start/end with a load).
// If we have found an entire vector of loads and undefs, then return a large
// load of the entire vector width starting at the base pointer.
// If the vector contains zeros, then attempt to shuffle those elements.
if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
(IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
assert(LDBase && "Did not find base load for merging consecutive loads");
EVT EltVT = LDBase->getValueType(0);
// Ensure that the input vector size for the merged loads matches the
// cumulative size of the input elements.
if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
return SDValue();
if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
return SDValue();
// Don't create 256-bit non-temporal aligned loads without AVX2 as these
// will lower to regular temporal loads and use the cache.
if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
VT.is256BitVector() && !Subtarget.hasInt256())
return SDValue();
if (IsConsecutiveLoad)
return CreateLoad(VT, LDBase);
// IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
// vector and a zero vector to clear out the zero elements.
if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
SmallVector<int, 4> ClearMask(NumElems, -1);
for (unsigned i = 0; i < NumElems; ++i) {
if (ZeroMask[i])
ClearMask[i] = i + NumElems;
else if (LoadMask[i])
ClearMask[i] = i;
}
SDValue V = CreateLoad(VT, LDBase);
SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
: DAG.getConstantFP(0.0, DL, VT);
return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
}
}
int LoadSize =
(1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
// VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
(LoadSize == 32 || LoadSize == 64) &&
((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSize)
: MVT::getIntegerVT(LoadSize);
MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSize);
if (TLI.isTypeLegal(VecVT)) {
SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
SDValue ResNode =
DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
LDBase->getPointerInfo(),
LDBase->getAlignment(),
MachineMemOperand::MOLoad);
for (auto *LD : Loads)
DAG.makeEquivalentMemoryOrdering(LD, ResNode);
return DAG.getBitcast(VT, ResNode);
}
}
return SDValue();
}
static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
unsigned SplatBitSize, LLVMContext &C) {
unsigned ScalarSize = VT.getScalarSizeInBits();
unsigned NumElm = SplatBitSize / ScalarSize;
SmallVector<Constant *, 32> ConstantVec;
for (unsigned i = 0; i < NumElm; i++) {
APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
Constant *Const;
if (VT.isFloatingPoint()) {
if (ScalarSize == 32) {
Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
} else {
assert(ScalarSize == 64 && "Unsupported floating point scalar size");
Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
}
} else
Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
ConstantVec.push_back(Const);
}
return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
}
static bool isUseOfShuffle(SDNode *N) {
for (auto *U : N->uses()) {
if (isTargetShuffle(U->getOpcode()))
return true;
if (U->getOpcode() == ISD::BITCAST) // Ignore bitcasts
return isUseOfShuffle(U);
}
return false;
}
// Check if the current node of build vector is a zero extended vector.
// // If so, return the value extended.
// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
// // NumElt - return the number of zero extended identical values.
// // EltType - return the type of the value include the zero extend.
static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
unsigned &NumElt, MVT &EltType) {
SDValue ExtValue = Op->getOperand(0);
unsigned NumElts = Op->getNumOperands();
unsigned Delta = NumElts;
for (unsigned i = 1; i < NumElts; i++) {
if (Op->getOperand(i) == ExtValue) {
Delta = i;
break;
}
if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
return SDValue();
}
if (!isPowerOf2_32(Delta) || Delta == 1)
return SDValue();
for (unsigned i = Delta; i < NumElts; i++) {
if (i % Delta == 0) {
if (Op->getOperand(i) != ExtValue)
return SDValue();
} else if (!(isNullConstant(Op->getOperand(i)) ||
Op->getOperand(i).isUndef()))
return SDValue();
}
unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
unsigned ExtVTSize = EltSize * Delta;
EltType = MVT::getIntegerVT(ExtVTSize);
NumElt = NumElts / Delta;
return ExtValue;
}
/// Attempt to use the vbroadcast instruction to generate a splat value
/// from a splat BUILD_VECTOR which uses:
/// a. A single scalar load, or a constant.
/// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
///
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// VBROADCAST requires AVX.
// TODO: Splats could be generated for non-AVX CPUs using SSE
// instructions, but there's less potential gain for only 128-bit vectors.
if (!Subtarget.hasAVX())
return SDValue();
MVT VT = BVOp->getSimpleValueType(0);
SDLoc dl(BVOp);
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Unsupported vector type for broadcast.");
BitVector UndefElements;
SDValue Ld = BVOp->getSplatValue(&UndefElements);
// Attempt to use VBROADCASTM
// From this paterrn:
// a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
// b. t1 = (build_vector t0 t0)
//
// Create (VBROADCASTM v2i1 X)
if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
MVT EltType = VT.getScalarType();
unsigned NumElts = VT.getVectorNumElements();
SDValue BOperand;
SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
(Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
if (ZeroExtended)
BOperand = ZeroExtended.getOperand(0);
else
BOperand = Ld.getOperand(0).getOperand(0);
MVT MaskVT = BOperand.getSimpleValueType();
if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
(EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
SDValue Brdcst =
DAG.getNode(X86ISD::VBROADCASTM, dl,
MVT::getVectorVT(EltType, NumElts), BOperand);
return DAG.getBitcast(VT, Brdcst);
}
}
}
unsigned NumElts = VT.getVectorNumElements();
unsigned NumUndefElts = UndefElements.count();
if (!Ld || (NumElts - NumUndefElts) <= 1) {
APInt SplatValue, Undef;
unsigned SplatBitSize;
bool HasUndef;
// Check if this is a repeated constant pattern suitable for broadcasting.
if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
SplatBitSize > VT.getScalarSizeInBits() &&
SplatBitSize < VT.getSizeInBits()) {
// Avoid replacing with broadcast when it's a use of a shuffle
// instruction to preserve the present custom lowering of shuffles.
if (isUseOfShuffle(BVOp) || BVOp->hasOneUse())
return SDValue();
// replace BUILD_VECTOR with broadcast of the repeated constants.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
LLVMContext *Ctx = DAG.getContext();
MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
if (Subtarget.hasAVX()) {
if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
!(SplatBitSize == 64 && Subtarget.is32Bit())) {
// Splatted value can fit in one INTEGER constant in constant pool.
// Load the constant and broadcast it.
MVT CVT = MVT::getIntegerVT(SplatBitSize);
Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
SDValue CP = DAG.getConstantPool(C, PVT);
unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
MVT::getVectorVT(CVT, Repeat), Ld);
return DAG.getBitcast(VT, Brdcst);
} else if (SplatBitSize == 32 || SplatBitSize == 64) {
// Splatted value can fit in one FLOAT constant in constant pool.
// Load the constant and broadcast it.
// AVX have support for 32 and 64 bit broadcast for floats only.
// No 64bit integer in 32bit subtarget.
MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
// Lower the splat via APFloat directly, to avoid any conversion.
Constant *C =
SplatBitSize == 32
? ConstantFP::get(*Ctx,
APFloat(APFloat::IEEEsingle(), SplatValue))
: ConstantFP::get(*Ctx,
APFloat(APFloat::IEEEdouble(), SplatValue));
SDValue CP = DAG.getConstantPool(C, PVT);
unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
MVT::getVectorVT(CVT, Repeat), Ld);
return DAG.getBitcast(VT, Brdcst);
} else if (SplatBitSize > 64) {
// Load the vector of constants and broadcast it.
MVT CVT = VT.getScalarType();
Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
*Ctx);
SDValue VCP = DAG.getConstantPool(VecC, PVT);
unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
Ld = DAG.getLoad(
MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
return DAG.getBitcast(VT, Brdcst);
}
}
}
// If we are moving a scalar into a vector (Ld must be set and all elements
// but 1 are undef) and that operation is not obviously supported by
// vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
// That's better than general shuffling and may eliminate a load to GPR and
// move from scalar to vector register.
if (!Ld || NumElts - NumUndefElts != 1)
return SDValue();
unsigned ScalarSize = Ld.getValueSizeInBits();
if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
return SDValue();
}
bool ConstSplatVal =
(Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
// Make sure that all of the users of a non-constant load are from the
// BUILD_VECTOR node.
if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
return SDValue();
unsigned ScalarSize = Ld.getValueSizeInBits();
bool IsGE256 = (VT.getSizeInBits() >= 256);
// When optimizing for size, generate up to 5 extra bytes for a broadcast
// instruction to save 8 or more bytes of constant pool data.
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
// On Sandybridge (no AVX2), it is still better to load a constant vector
// from the constant pool and not to broadcast it from a scalar.
// But override that restriction when optimizing for size.
// TODO: Check if splatting is recommended for other AVX-capable CPUs.
if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
EVT CVT = Ld.getValueType();
assert(!CVT.isVector() && "Must not broadcast a vector type");
// Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
// For size optimization, also splat v2f64 and v2i64, and for size opt
// with AVX2, also splat i8 and i16.
// With pattern matching, the VBROADCAST node may become a VMOVDDUP.
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
(OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
const Constant *C = nullptr;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
C = CF->getConstantFPValue();
assert(C && "Invalid constant type");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue CP =
DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
}
bool IsLoad = ISD::isNormalLoad(Ld.getNode());
// Handle AVX2 in-register broadcasts.
if (!IsLoad && Subtarget.hasInt256() &&
(ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The scalar source must be a normal load.
if (!IsLoad)
return SDValue();
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
(Subtarget.hasVLX() && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
// double since there is no vbroadcastsd xmm
if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
// Unsupported broadcast.
return SDValue();
}
/// For an EXTRACT_VECTOR_ELT with a constant index return the real
/// underlying vector and index.
///
/// Modifies \p ExtractedFromVec to the real vector and returns the real
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
// For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
// lowered this:
// (extract_vector_elt (v8f32 %1), Constant<6>)
// to:
// (extract_vector_elt (vector_shuffle<2,u,u,u>
// (extract_subvector (v8f32 %0), Constant<4>),
// undef)
// Constant<0>)
// In this case the vector is the extract_subvector expression and the index
// is 2, as specified by the shuffle.
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
SDValue ShuffleVec = SVOp->getOperand(0);
MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
assert(ShuffleVecVT.getVectorElementType() ==
ExtractedFromVec.getSimpleValueType().getVectorElementType());
int ShuffleIdx = SVOp->getMaskElt(Idx);
if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
ExtractedFromVec = ShuffleVec;
return ShuffleIdx;
}
return Idx;
}
static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
// Skip if insert_vec_elt is not supported.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
return SDValue();
SDLoc DL(Op);
unsigned NumElems = Op.getNumOperands();
SDValue VecIn1;
SDValue VecIn2;
SmallVector<unsigned, 4> InsertIndices;
SmallVector<int, 8> Mask(NumElems, -1);
for (unsigned i = 0; i != NumElems; ++i) {
unsigned Opc = Op.getOperand(i).getOpcode();
if (Opc == ISD::UNDEF)
continue;
if (Opc != ISD::EXTRACT_VECTOR_ELT) {
// Quit if more than 1 elements need inserting.
if (InsertIndices.size() > 1)
return SDValue();
InsertIndices.push_back(i);
continue;
}
SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
SDValue ExtIdx = Op.getOperand(i).getOperand(1);
// Quit if non-constant index.
if (!isa<ConstantSDNode>(ExtIdx))
return SDValue();
int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
// Quit if extracted from vector of different type.
if (ExtractedFromVec.getValueType() != VT)
return SDValue();
if (!VecIn1.getNode())
VecIn1 = ExtractedFromVec;
else if (VecIn1 != ExtractedFromVec) {
if (!VecIn2.getNode())
VecIn2 = ExtractedFromVec;
else if (VecIn2 != ExtractedFromVec)
// Quit if more than 2 vectors to shuffle
return SDValue();
}
if (ExtractedFromVec == VecIn1)
Mask[i] = Idx;
else if (ExtractedFromVec == VecIn2)
Mask[i] = Idx + NumElems;
}
if (!VecIn1.getNode())
return SDValue();
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
for (unsigned Idx : InsertIndices)
NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
DAG.getIntPtrConstant(Idx, DL));
return NV;
}
static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
Op.getScalarValueSizeInBits() == 1 &&
"Can not convert non-constant vector");
uint64_t Immediate = 0;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (!In.isUndef())
Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
}
SDLoc dl(Op);
MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
return DAG.getConstant(Immediate, dl, VT);
}
// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
assert((VT.getVectorElementType() == MVT::i1) &&
"Unexpected type in LowerBUILD_VECTORvXi1!");
SDLoc dl(Op);
if (ISD::isBuildVectorAllZeros(Op.getNode()))
return Op;
if (ISD::isBuildVectorAllOnes(Op.getNode()))
return Op;
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
// Split the pieces.
SDValue Lower =
DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
SDValue Upper =
DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
// We have to manually lower both halves so getNode doesn't try to
// reassemble the build_vector.
Lower = LowerBUILD_VECTORvXi1(Lower, DAG, Subtarget);
Upper = LowerBUILD_VECTORvXi1(Upper, DAG, Subtarget);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
}
SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
return DAG.getBitcast(VT, Imm);
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
DAG.getIntPtrConstant(0, dl));
}
// Vector has one or more non-const elements
uint64_t Immediate = 0;
SmallVector<unsigned, 16> NonConstIdx;
bool IsSplat = true;
bool HasConstElts = false;
int SplatIdx = -1;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (In.isUndef())
continue;
if (!isa<ConstantSDNode>(In))
NonConstIdx.push_back(idx);
else {
Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
HasConstElts = true;
}
if (SplatIdx < 0)
SplatIdx = idx;
else if (In != Op.getOperand(SplatIdx))
IsSplat = false;
}
// for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
if (IsSplat)
return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx),
DAG.getConstant(1, dl, VT),
DAG.getConstant(0, dl, VT));
// insert elements one by one
SDValue DstVec;
SDValue Imm;
if (Immediate) {
MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
Imm = DAG.getConstant(Immediate, dl, ImmVT);
}
else if (HasConstElts)
Imm = DAG.getConstant(0, dl, VT);
else
Imm = DAG.getUNDEF(VT);
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
DstVec = DAG.getBitcast(VT, Imm);
else {
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
DAG.getIntPtrConstant(0, dl));
}
for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
unsigned InsertIdx = NonConstIdx[i];
DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(InsertIdx),
DAG.getIntPtrConstant(InsertIdx, dl));
}
return DstVec;
}
/// This is a helper function of LowerToHorizontalOp().
/// This function checks that the build_vector \p N in input implements a
/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
/// may not match the layout of an x86 256-bit horizontal instruction.
/// In other words, if this returns true, then some extraction/insertion will
/// be required to produce a valid horizontal instruction.
///
/// Parameter \p Opcode defines the kind of horizontal operation to match.
/// For example, if \p Opcode is equal to ISD::ADD, then this function
/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
/// is equal to ISD::SUB, then this function checks if this is a horizontal
/// arithmetic sub.
///
/// This function only analyzes elements of \p N whose indices are
/// in range [BaseIdx, LastIdx).
///
/// TODO: This function was originally used to match both real and fake partial
/// horizontal operations, but the index-matching logic is incorrect for that.
/// See the corrected implementation in isHopBuildVector(). Can we reduce this
/// code because it is only used for partial h-op matching now?
static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
SelectionDAG &DAG,
unsigned BaseIdx, unsigned LastIdx,
SDValue &V0, SDValue &V1) {
EVT VT = N->getValueType(0);
assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
"Invalid Vector in input!");
bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
bool CanFold = true;
unsigned ExpectedVExtractIdx = BaseIdx;
unsigned NumElts = LastIdx - BaseIdx;
V0 = DAG.getUNDEF(VT);
V1 = DAG.getUNDEF(VT);
// Check if N implements a horizontal binop.
for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
SDValue Op = N->getOperand(i + BaseIdx);
// Skip UNDEFs.
if (Op->isUndef()) {
// Update the expected vector extract index.
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
ExpectedVExtractIdx += 2;
continue;
}
CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
if (!CanFold)
break;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
// Try to match the following pattern:
// (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0) == Op1.getOperand(0) &&
isa<ConstantSDNode>(Op0.getOperand(1)) &&
isa<ConstantSDNode>(Op1.getOperand(1)));
if (!CanFold)
break;
unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
if (i * 2 < NumElts) {
if (V0.isUndef()) {
V0 = Op0.getOperand(0);
if (V0.getValueType() != VT)
return false;
}
} else {
if (V1.isUndef()) {
V1 = Op0.getOperand(0);
if (V1.getValueType() != VT)
return false;
}
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
}
SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
if (I0 == ExpectedVExtractIdx)
CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
else if (IsCommutable && I1 == ExpectedVExtractIdx) {
// Try to match the following dag sequence:
// (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
} else
CanFold = false;
ExpectedVExtractIdx += 2;
}
return CanFold;
}
/// Emit a sequence of two 128-bit horizontal add/sub followed by
/// a concat_vector.
///
/// This is a helper function of LowerToHorizontalOp().
/// This function expects two 256-bit vectors called V0 and V1.
/// At first, each vector is split into two separate 128-bit vectors.
/// Then, the resulting 128-bit vectors are used to implement two
/// horizontal binary operations.
///
/// The kind of horizontal binary operation is defined by \p X86Opcode.
///
/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
/// the two new horizontal binop.
/// When Mode is set, the first horizontal binop dag node would take as input
/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
/// horizontal binop dag node would take as input the lower 128-bit of V1
/// and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V0_HI
/// HADD V1_LO, V1_HI
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V1_LO
/// HADD V0_HI, V1_HI
///
/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
const SDLoc &DL, SelectionDAG &DAG,
unsigned X86Opcode, bool Mode,
bool isUndefLO, bool isUndefHI) {
MVT VT = V0.getSimpleValueType();
assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
"Invalid nodes in input!");
unsigned NumElts = VT.getVectorNumElements();
SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
MVT NewVT = V0_LO.getSimpleValueType();
SDValue LO = DAG.getUNDEF(NewVT);
SDValue HI = DAG.getUNDEF(NewVT);
if (Mode) {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
if (!isUndefLO && !V0->isUndef())
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
if (!isUndefHI && !V1->isUndef())
HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
}
/// Returns true iff \p BV builds a vector with the result equivalent to
/// the result of ADDSUB/SUBADD operation.
/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
/// \p Opnd0 and \p Opnd1.
static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
SDValue &Opnd0, SDValue &Opnd1,
unsigned &NumExtracts,
bool &IsSubAdd) {
MVT VT = BV->getSimpleValueType(0);
if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
return false;
unsigned NumElts = VT.getVectorNumElements();
SDValue InVec0 = DAG.getUNDEF(VT);
SDValue InVec1 = DAG.getUNDEF(VT);
NumExtracts = 0;
// Odd-numbered elements in the input build vector are obtained from
// adding/subtracting two integer/float elements.
// Even-numbered elements in the input build vector are obtained from
// subtracting/adding two integer/float elements.
unsigned Opc[2] = {0, 0};
for (unsigned i = 0, e = NumElts; i != e; ++i) {
SDValue Op = BV->getOperand(i);
// Skip 'undef' values.
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::UNDEF)
continue;
// Early exit if we found an unexpected opcode.
if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
return false;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
// Try to match the following pattern:
// (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
// Early exit if we cannot match that sequence.
if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Op0.getOperand(1)) ||
!isa<ConstantSDNode>(Op1.getOperand(1)) ||
Op0.getOperand(1) != Op1.getOperand(1))
return false;
unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
if (I0 != i)
return false;
// We found a valid add/sub node, make sure its the same opcode as previous
// elements for this parity.
if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
return false;
Opc[i % 2] = Opcode;
// Update InVec0 and InVec1.
if (InVec0.isUndef()) {
InVec0 = Op0.getOperand(0);
if (InVec0.getSimpleValueType() != VT)
return false;
}
if (InVec1.isUndef()) {
InVec1 = Op1.getOperand(0);
if (InVec1.getSimpleValueType() != VT)
return false;
}
// Make sure that operands in input to each add/sub node always
// come from a same pair of vectors.
if (InVec0 != Op0.getOperand(0)) {
if (Opcode == ISD::FSUB)
return false;
// FADD is commutable. Try to commute the operands
// and then test again.
std::swap(Op0, Op1);
if (InVec0 != Op0.getOperand(0))
return false;
}
if (InVec1 != Op1.getOperand(0))
return false;
// Increment the number of extractions done.
++NumExtracts;
}
// Ensure we have found an opcode for both parities and that they are
// different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
// inputs are undef.
if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
InVec0.isUndef() || InVec1.isUndef())
return false;
IsSubAdd = Opc[0] == ISD::FADD;
Opnd0 = InVec0;
Opnd1 = InVec1;
return true;
}
/// Returns true if is possible to fold MUL and an idiom that has already been
/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
///
/// Prior to calling this function it should be known that there is some
/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
/// before replacement of such SDNode with ADDSUB operation. Thus the number
/// of \p Opnd0 uses is expected to be equal to 2.
/// For example, this function may be called for the following IR:
/// %AB = fmul fast <2 x double> %A, %B
/// %Sub = fsub fast <2 x double> %AB, %C
/// %Add = fadd fast <2 x double> %AB, %C
/// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
/// <2 x i32> <i32 0, i32 3>
/// There is a def for %Addsub here, which potentially can be replaced by
/// X86ISD::ADDSUB operation:
/// %Addsub = X86ISD::ADDSUB %AB, %C
/// and such ADDSUB can further be replaced with FMADDSUB:
/// %Addsub = FMADDSUB %A, %B, %C.
///
/// The main reason why this method is called before the replacement of the
/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
/// FMADDSUB is.
static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
SelectionDAG &DAG,
SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
unsigned ExpectedUses) {
if (Opnd0.getOpcode() != ISD::FMUL ||
!Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
return false;
// FIXME: These checks must match the similar ones in
// DAGCombiner::visitFADDForFMACombine. It would be good to have one
// function that would answer if it is Ok to fuse MUL + ADD to FMADD
// or MUL + ADDSUB to FMADDSUB.
const TargetOptions &Options = DAG.getTarget().Options;
bool AllowFusion =
(Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
if (!AllowFusion)
return false;
Opnd2 = Opnd1;
Opnd1 = Opnd0.getOperand(1);
Opnd0 = Opnd0.getOperand(0);
return true;
}
/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
/// X86ISD::FMSUBADD node.
static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Opnd0, Opnd1;
unsigned NumExtracts;
bool IsSubAdd;
if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
IsSubAdd))
return SDValue();
MVT VT = BV->getSimpleValueType(0);
SDLoc DL(BV);
// Try to generate X86ISD::FMADDSUB node here.
SDValue Opnd2;
if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
}
// We only support ADDSUB.
if (IsSubAdd)
return SDValue();
// Do not generate X86ISD::ADDSUB node for 512-bit types even though
// the ADDSUB idiom has been successfully recognized. There are no known
// X86 targets with 512-bit ADDSUB instructions!
// 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
// recognition.
if (VT.is512BitVector())
return SDValue();
return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}
static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
unsigned &HOpcode, SDValue &V0, SDValue &V1) {
// Initialize outputs to known values.
MVT VT = BV->getSimpleValueType(0);
HOpcode = ISD::DELETED_NODE;
V0 = DAG.getUNDEF(VT);
V1 = DAG.getUNDEF(VT);
// x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
// half of the result is calculated independently from the 128-bit halves of
// the inputs, so that makes the index-checking logic below more complicated.
unsigned NumElts = VT.getVectorNumElements();
unsigned GenericOpcode = ISD::DELETED_NODE;
unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
for (unsigned i = 0; i != Num128BitChunks; ++i) {
for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
// Ignore undef elements.
SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
if (Op.isUndef())
continue;
// If there's an opcode mismatch, we're done.
if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
return false;
// Initialize horizontal opcode.
if (HOpcode == ISD::DELETED_NODE) {
GenericOpcode = Op.getOpcode();
switch (GenericOpcode) {
case ISD::ADD: HOpcode = X86ISD::HADD; break;
case ISD::SUB: HOpcode = X86ISD::HSUB; break;
case ISD::FADD: HOpcode = X86ISD::FHADD; break;
case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
default: return false;
}
}
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op0.getOperand(0) != Op1.getOperand(0) ||
!isa<ConstantSDNode>(Op0.getOperand(1)) ||
!isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
return false;
// The source vector is chosen based on which 64-bit half of the
// destination vector is being calculated.
if (j < NumEltsIn64Bits) {
if (V0.isUndef())
V0 = Op0.getOperand(0);
} else {
if (V1.isUndef())
V1 = Op0.getOperand(0);
}
SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
if (SourceVec != Op0.getOperand(0))
return false;
// op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
unsigned ExpectedIndex = i * NumEltsIn128Bits +
(j % NumEltsIn64Bits) * 2;
if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
continue;
// If this is not a commutative op, this does not match.
if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
return false;
// Addition is commutative, so try swapping the extract indexes.
// op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
continue;
// Extract indexes do not match horizontal requirement.
return false;
}
}
// We matched. Opcode and operands are returned by reference as arguments.
return true;
}
static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
SelectionDAG &DAG, unsigned HOpcode,
SDValue V0, SDValue V1) {
// If either input vector is not the same size as the build vector,
// extract/insert the low bits to the correct size.
// This is free (examples: zmm --> xmm, xmm --> ymm).
MVT VT = BV->getSimpleValueType(0);
unsigned Width = VT.getSizeInBits();
if (V0.getValueSizeInBits() > Width)
V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
else if (V0.getValueSizeInBits() < Width)
V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
if (V1.getValueSizeInBits() > Width)
V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
else if (V1.getValueSizeInBits() < Width)
V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
}
/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// We need at least 2 non-undef elements to make this worthwhile by default.
unsigned NumNonUndefs = 0;
for (const SDValue &V : BV->op_values())
if (!V.isUndef())
++NumNonUndefs;
if (NumNonUndefs < 2)
return SDValue();
// There are 4 sets of horizontal math operations distinguished by type:
// int/FP at 128-bit/256-bit. Each type was introduced with a different
// subtarget feature. Try to match those "native" patterns first.
MVT VT = BV->getSimpleValueType(0);
unsigned HOpcode;
SDValue V0, V1;
if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3())
if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
if ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3())
if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
if ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX())
if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
if ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())
if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
// Try harder to match 256-bit ops by using extract/concat.
if (!Subtarget.hasAVX() || !VT.is256BitVector())
return SDValue();
// Count the number of UNDEF operands in the build_vector in input.
unsigned NumElts = VT.getVectorNumElements();
unsigned Half = NumElts / 2;
unsigned NumUndefsLO = 0;
unsigned NumUndefsHI = 0;
for (unsigned i = 0, e = Half; i != e; ++i)
if (BV->getOperand(i)->isUndef())
NumUndefsLO++;
for (unsigned i = Half, e = NumElts; i != e; ++i)
if (BV->getOperand(i)->isUndef())
NumUndefsHI++;
SDLoc DL(BV);
SDValue InVec0, InVec1;
if (VT == MVT::v8i32 || VT == MVT::v16i16) {
SDValue InVec2, InVec3;
unsigned X86Opcode;
bool CanFold = true;
if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
InVec1) &&
isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HSUB;
else
CanFold = false;
if (CanFold) {
// Do not try to expand this build_vector into a pair of horizontal
// add/sub if we can emit a pair of scalar add/sub.
if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
return SDValue();
// Convert this build_vector into a pair of horizontal binops followed by
// a concat vector. We must adjust the outputs from the partial horizontal
// matching calls above to account for undefined vector halves.
SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
bool isUndefLO = NumUndefsLO == Half;
bool isUndefHI = NumUndefsHI == Half;
return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
isUndefHI);
}
}
if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
VT == MVT::v16i16) {
unsigned X86Opcode;
if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
InVec1))
X86Opcode = X86ISD::HSUB;
else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
InVec1))
X86Opcode = X86ISD::FHADD;
else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
InVec1))
X86Opcode = X86ISD::FHSUB;
else
return SDValue();
// Don't try to expand this build_vector into a pair of horizontal add/sub
// if we can simply emit a pair of scalar add/sub.
if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
return SDValue();
// Convert this build_vector into two horizontal add/sub followed by
// a concat vector.
bool isUndefLO = NumUndefsLO == Half;
bool isUndefHI = NumUndefsHI == Half;
return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
isUndefLO, isUndefHI);
}
return SDValue();
}
/// If a BUILD_VECTOR's source elements all apply the same bit operation and
/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
/// just apply the bit to the vectors.
/// NOTE: Its not in our interest to start make a general purpose vectorizer
/// from this, but enough scalar bit operations are created from the later
/// legalization + scalarization stages to need basic support.
static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op->getSimpleValueType(0);
unsigned NumElems = VT.getVectorNumElements();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Check that all elements have the same opcode.
// TODO: Should we allow UNDEFS and if so how many?
unsigned Opcode = Op->getOperand(0).getOpcode();
for (unsigned i = 1; i < NumElems; ++i)
if (Opcode != Op->getOperand(i).getOpcode())
return SDValue();
// TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
switch (Opcode) {
default:
return SDValue();
case ISD::AND:
case ISD::XOR:
case ISD::OR:
// Don't do this if the buildvector is a splat - we'd replace one
// constant with an entire vector.
if (Op->getSplatValue())
return SDValue();
if (!TLI.isOperationLegalOrPromote(Opcode, VT))
return SDValue();
break;
}
SmallVector<SDValue, 4> LHSElts, RHSElts;
for (SDValue Elt : Op->ops()) {
SDValue LHS = Elt.getOperand(0);
SDValue RHS = Elt.getOperand(1);
// We expect the canonicalized RHS operand to be the constant.
if (!isa<ConstantSDNode>(RHS))
return SDValue();
LHSElts.push_back(LHS);
RHSElts.push_back(RHS);
}
SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
}
/// Create a vector constant without a load. SSE/AVX provide the bare minimum
/// functionality to do this, so it's all zeros, all ones, or some derivation
/// that is cheap to calculate.
static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
// Vectors containing all zeros can be matched by pxor and xorps.
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
return Op;
return getZeroVector(VT, Subtarget, DAG, DL);
}
// Vectors containing all ones can be matched by pcmpeqd on 128-bit width
// vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
// vpcmpeqd on 256-bit vectors.
if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
(VT == MVT::v8i32 && Subtarget.hasInt256()))
return Op;
return getOnesVector(VT, DAG, DL);
}
return SDValue();
}
/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
/// from a vector of source values and a vector of extraction indices.
/// The vectors might be manipulated to match the type of the permute op.
static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT ShuffleVT = VT;
EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
unsigned NumElts = VT.getVectorNumElements();
unsigned SizeInBits = VT.getSizeInBits();
// Adjust IndicesVec to match VT size.
assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
"Illegal variable permute mask size");
if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
NumElts * VT.getScalarSizeInBits());
IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
// Handle SrcVec that don't match VT type.
if (SrcVec.getValueSizeInBits() != SizeInBits) {
if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
// Handle larger SrcVec by treating it as a larger permute.
unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
Subtarget, DAG, SDLoc(IndicesVec));
return extractSubVector(
createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
DAG, DL, SizeInBits);
} else if (SrcVec.getValueSizeInBits() < SizeInBits) {
// Widen smaller SrcVec to match VT.
SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
} else
return SDValue();
}
auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
EVT SrcVT = Idx.getValueType();
unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
uint64_t IndexScale = 0;
uint64_t IndexOffset = 0;
// If we're scaling a smaller permute op, then we need to repeat the
// indices, scaling and offsetting them as well.
// e.g. v4i32 -> v16i8 (Scale = 4)
// IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
// IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
for (uint64_t i = 0; i != Scale; ++i) {
IndexScale |= Scale << (i * NumDstBits);
IndexOffset |= i << (i * NumDstBits);
}
Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
return Idx;
};
unsigned Opcode = 0;
switch (VT.SimpleTy) {
default:
break;
case MVT::v16i8:
if (Subtarget.hasSSSE3())
Opcode = X86ISD::PSHUFB;
break;
case MVT::v8i16:
if (Subtarget.hasVLX() && Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasSSSE3()) {
Opcode = X86ISD::PSHUFB;
ShuffleVT = MVT::v16i8;
}
break;
case MVT::v4f32:
case MVT::v4i32:
if (Subtarget.hasAVX()) {
Opcode = X86ISD::VPERMILPV;
ShuffleVT = MVT::v4f32;
} else if (Subtarget.hasSSSE3()) {
Opcode = X86ISD::PSHUFB;
ShuffleVT = MVT::v16i8;
}
break;
case MVT::v2f64:
case MVT::v2i64:
if (Subtarget.hasAVX()) {
// VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
Opcode = X86ISD::VPERMILPV;
ShuffleVT = MVT::v2f64;
} else if (Subtarget.hasSSE41()) {
// SSE41 can compare v2i64 - select between indices 0 and 1.
return DAG.getSelectCC(
DL, IndicesVec,
getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
ISD::CondCode::SETEQ);
}
break;
case MVT::v32i8:
if (Subtarget.hasVLX() && Subtarget.hasVBMI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasXOP()) {
SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
return DAG.getNode(
ISD::CONCAT_VECTORS, DL, VT,
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
} else if (Subtarget.hasAVX()) {
SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Permute Lo and Hi and then select based on index range.
// This works as SHUFB uses bits[3:0] to permute elements and we don't
// care about the bit[7] as its just an index vector.
SDValue Idx = Ops[2];
EVT VT = Idx.getValueType();
return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
ISD::CondCode::SETGT);
};
SDValue Ops[] = {LoLo, HiHi, IndicesVec};
return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
PSHUFBBuilder);
}
break;
case MVT::v16i16:
if (Subtarget.hasVLX() && Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasAVX()) {
// Scale to v32i8 and perform as v32i8.
IndicesVec = ScaleIndices(IndicesVec, 2);
return DAG.getBitcast(
VT, createVariablePermute(
MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
}
break;
case MVT::v8f32:
case MVT::v8i32:
if (Subtarget.hasAVX2())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasAVX()) {
SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
{0, 1, 2, 3, 0, 1, 2, 3});
SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
{4, 5, 6, 7, 4, 5, 6, 7});
if (Subtarget.hasXOP())
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32,
LoLo, HiHi, IndicesVec,
DAG.getConstant(0, DL, MVT::i8)));
// Permute Lo and Hi and then select based on index range.
// This works as VPERMILPS only uses index bits[0:1] to permute elements.
SDValue Res = DAG.getSelectCC(
DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
ISD::CondCode::SETGT);
return DAG.getBitcast(VT, Res);
}
break;
case MVT::v4i64:
case MVT::v4f64:
if (Subtarget.hasAVX512()) {
if (!Subtarget.hasVLX()) {
MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
SDLoc(SrcVec));
IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
DAG, SDLoc(IndicesVec));
SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
DAG, Subtarget);
return extract256BitVector(Res, 0, DAG, DL);
}
Opcode = X86ISD::VPERMV;
} else if (Subtarget.hasAVX()) {
SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
SDValue LoLo =
DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
SDValue HiHi =
DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
// VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
if (Subtarget.hasXOP())
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64,
LoLo, HiHi, IndicesVec,
DAG.getConstant(0, DL, MVT::i8)));
// Permute Lo and Hi and then select based on index range.
// This works as VPERMILPD only uses index bit[1] to permute elements.
SDValue Res = DAG.getSelectCC(
DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
ISD::CondCode::SETGT);
return DAG.getBitcast(VT, Res);
}
break;
case MVT::v64i8:
if (Subtarget.hasVBMI())
Opcode = X86ISD::VPERMV;
break;
case MVT::v32i16:
if (Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
break;
case MVT::v16f32:
case MVT::v16i32:
case MVT::v8f64:
case MVT::v8i64:
if (Subtarget.hasAVX512())
Opcode = X86ISD::VPERMV;
break;
}
if (!Opcode)
return SDValue();
assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
(VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
"Illegal variable permute shuffle type");
uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
if (Scale > 1)
IndicesVec = ScaleIndices(IndicesVec, Scale);
EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
SDValue Res = Opcode == X86ISD::VPERMV
? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
: DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
return DAG.getBitcast(VT, Res);
}
// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
// reasoned to be a permutation of a vector by indices in a non-constant vector.
// (build_vector (extract_elt V, (extract_elt I, 0)),
// (extract_elt V, (extract_elt I, 1)),
// ...
// ->
// (vpermv I, V)
//
// TODO: Handle undefs
// TODO: Utilize pshufb and zero mask blending to support more efficient
// construction of vectors with constant-0 elements.
static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue SrcVec, IndicesVec;
// Check for a match of the permute source vector and permute index elements.
// This is done by checking that the i-th build_vector operand is of the form:
// (extract_elt SrcVec, (extract_elt IndicesVec, i)).
for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
SDValue Op = V.getOperand(Idx);
if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// If this is the first extract encountered in V, set the source vector,
// otherwise verify the extract is from the previously defined source
// vector.
if (!SrcVec)
SrcVec = Op.getOperand(0);
else if (SrcVec != Op.getOperand(0))
return SDValue();
SDValue ExtractedIndex = Op->getOperand(1);
// Peek through extends.
if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
ExtractedIndex = ExtractedIndex.getOperand(0);
if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// If this is the first extract from the index vector candidate, set the
// indices vector, otherwise verify the extract is from the previously
// defined indices vector.
if (!IndicesVec)
IndicesVec = ExtractedIndex.getOperand(0);
else if (IndicesVec != ExtractedIndex.getOperand(0))
return SDValue();
auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
if (!PermIdx || PermIdx->getZExtValue() != Idx)
return SDValue();
}
SDLoc DL(V);
MVT VT = V.getSimpleValueType();
return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
}
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = Op.getNumOperands();
// Generate vectors for predicate vectors.
if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
return VectorConstant;
BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
return AddSub;
if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
return HorizontalOp;
if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
return Broadcast;
if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
return BitOp;
unsigned EVTBits = EltVT.getSizeInBits();
unsigned NumZero = 0;
unsigned NumNonZero = 0;
uint64_t NonZeros = 0;
bool IsAllConstants = true;
SmallSet<SDValue, 8> Values;
unsigned NumConstants = NumElems;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
if (Elt.isUndef())
continue;
Values.insert(Elt);
if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
IsAllConstants = false;
NumConstants--;
}
if (X86::isZeroNode(Elt))
NumZero++;
else {
assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
NonZeros |= ((uint64_t)1 << i);
NumNonZero++;
}
}
// All undef vector. Return an UNDEF. All zero vectors were handled above.
if (NumNonZero == 0)
return DAG.getUNDEF(VT);
// If we are inserting one variable into a vector of non-zero constants, try
// to avoid loading each constant element as a scalar. Load the constants as a
// vector and then insert the variable scalar element. If insertion is not
// supported, fall back to a shuffle to get the scalar blended with the
// constants. Insertion into a zero vector is handled as a special-case
// somewhere below here.
if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
(isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
// Create an all-constant vector. The variable element in the old
// build vector is replaced by undef in the constant vector. Save the
// variable scalar element and its index for use in the insertelement.
LLVMContext &Context = *DAG.getContext();
Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
SDValue VarElt;
SDValue InsIndex;
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
if (auto *C = dyn_cast<ConstantSDNode>(Elt))
ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
else if (!Elt.isUndef()) {
assert(!VarElt.getNode() && !InsIndex.getNode() &&
"Expected one variable element in this vector");
VarElt = Elt;
InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
}
}
Constant *CV = ConstantVector::get(ConstVecOps);
SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
// The constants we just created may not be legal (eg, floating point). We
// must lower the vector right here because we can not guarantee that we'll
// legalize it before loading it. This is also why we could not just create
// a new build vector here. If the build vector contains illegal constants,
// it could get split back up into a series of insert elements.
// TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
if (InsertC < NumEltsInLow128Bits)
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
// There's no good way to insert into the high elements of a >128-bit
// vector, so use shuffles to avoid an extract/insert sequence.
assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
SmallVector<int, 8> ShuffleMask;
unsigned NumElts = VT.getVectorNumElements();
for (unsigned i = 0; i != NumElts; ++i)
ShuffleMask.push_back(i == InsertC ? NumElts : i);
SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
}
// Special case for single non-zero, non-undef, element.
if (NumNonZero == 1) {
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
// If we have a constant or non-constant insertion into the low element of
// a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
// the rest of the elements. This will be matched as movd/movq/movss/movsd
// depending on what the source datatype is.
if (Idx == 0) {
if (NumZero == 0)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
(EltVT == MVT::i64 && Subtarget.is64Bit())) {
assert((VT.is128BitVector() || VT.is256BitVector() ||
VT.is512BitVector()) &&
"Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
// We can't directly insert an i8 or i16 into a vector, so zero extend
// it to i32 first.
if (EltVT == MVT::i16 || EltVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
if (VT.getSizeInBits() >= 256) {
MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
if (Subtarget.hasAVX()) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
} else {
// Without AVX, we need to extend to a 128-bit vector and then
// insert into the 256-bit vector.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
}
} else {
assert(VT.is128BitVector() && "Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
return DAG.getBitcast(VT, Item);
}
}
// Is it a vector logical left shift?
if (NumElems == 2 && Idx == 1 &&
X86::isZeroNode(Op.getOperand(0)) &&
!X86::isZeroNode(Op.getOperand(1))) {
unsigned NumBits = VT.getSizeInBits();
return getVShift(true, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
VT, Op.getOperand(1)),
NumBits/2, DAG, *this, dl);
}
if (IsAllConstants) // Otherwise, it's better to do a constpool load.
return SDValue();
// Otherwise, if this is a vector with i32 or f32 elements, and the element
// is a non-constant being inserted into an element other than the low one,
// we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
// movd/movss) to move this into the low element, then shuffle it into
// place.
if (EVTBits == 32) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
}
}
// Splat is obviously ok. Let legalizer expand it to a shuffle.
if (Values.size() == 1) {
if (EVTBits == 32) {
// Instead of a shuffle like this:
// shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
// Check if it's possible to issue this instead.
// shuffle (vload ptr)), undef, <1, 1, 1, 1>
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
if (Op.getNode()->isOnlyUserOf(Item.getNode()))
return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
}
return SDValue();
}
// A vector full of immediates; various special cases are already
// handled, so this is best done with a single constant-pool load.
if (IsAllConstants)
return SDValue();
if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
return V;
// See if we can use a vector load to get all of the elements.
{
SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
if (SDValue LD =
EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
return LD;
}
// If this is a splat of pairs of 32-bit elements, we can use a narrower
// build_vector and broadcast it.
// TODO: We could probably generalize this more.
if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
// Make sure all the even/odd operands match.
for (unsigned i = 2; i != NumElems; ++i)
if (Ops[i % 2] != Op.getOperand(i))
return false;
return true;
};
if (CanSplat(Op, NumElems, Ops)) {
MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
// Create a new build vector and cast to v2i64/v2f64.
SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
DAG.getBuildVector(NarrowVT, dl, Ops));
// Broadcast from v2i64/v2f64 and cast to final VT.
MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
NewBV));
}
}
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.getSizeInBits() > 128) {
MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
// Build both the lower and upper subvector.
SDValue Lower =
DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
SDValue Upper = DAG.getBuildVector(
HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
// Recreate the wider vector with the lower and upper part.
return concatSubVectors(Lower, Upper, VT, NumElems, DAG, dl,
VT.getSizeInBits() / 2);
}
// Let legalizer expand 2-wide build_vectors.
if (EVTBits == 64) {
if (NumNonZero == 1) {
// One half is zero or undef.
unsigned Idx = countTrailingZeros(NonZeros);
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Op.getOperand(Idx));
return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
}
return SDValue();
}
// If element VT is < 32 bits, convert it to inserts into a zero vector.
if (EVTBits == 8 && NumElems == 16)
if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
DAG, Subtarget))
return V;
if (EVTBits == 16 && NumElems == 8)
if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
DAG, Subtarget))
return V;
// If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
if (EVTBits == 32 && NumElems == 4)
if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
return V;
// If element VT is == 32 bits, turn it into a number of shuffles.
if (NumElems == 4 && NumZero > 0) {
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1ULL << i));
if (isZero)
Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
else
Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
}
for (unsigned i = 0; i < 2; ++i) {
switch ((NonZeros >> (i*2)) & 0x3) {
default: llvm_unreachable("Unexpected NonZero count");
case 0:
Ops[i] = Ops[i*2]; // Must be a zero vector.
break;
case 1:
Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
break;
case 2:
Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
case 3:
Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
}
}
bool Reverse1 = (NonZeros & 0x3) == 2;
bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
int MaskVec[] = {
Reverse1 ? 1 : 0,
Reverse1 ? 0 : 1,
static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
static_cast<int>(Reverse2 ? NumElems : NumElems+1)
};
return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
}
assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
// Check for a build vector from mostly shuffle plus few inserting.
if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element.
if (Subtarget.hasSSE41()) {
SDValue Result;
if (!Op.getOperand(0).isUndef())
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) {
if (Op.getOperand(i).isUndef()) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
}
return Result;
}
// Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE).
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
if (!Op.getOperand(i).isUndef())
Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
else
Ops[i] = DAG.getUNDEF(VT);
}
// Next, we iteratively mix elements, e.g. for v4f32:
// Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
// : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
// Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
// Generate scaled UNPCKL shuffle mask.
SmallVector<int, 16> Mask;
for(unsigned i = 0; i != Scale; ++i)
Mask.push_back(i);
for (unsigned i = 0; i != Scale; ++i)
Mask.push_back(NumElems+i);
Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
}
return Ops[0];
}
// 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
// TODO: Detect subvector broadcast here instead of DAG combine?
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
assert((ResVT.is256BitVector() ||
ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
unsigned NumOperands = Op.getNumOperands();
unsigned NumZero = 0;
unsigned NumNonZero = 0;
unsigned NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
if (SubVec.isUndef())
continue;
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
++NumZero;
else {
assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
NonZeros |= 1 << i;
++NumNonZero;
}
}
// If we have more than 2 non-zeros, build each half separately.
if (NumNonZero > 2) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
ArrayRef<SDUse> Ops = Op->ops();
SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(0, NumOperands/2));
SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(NumOperands/2));
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
// Otherwise, build it up through insert_subvectors.
SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
: DAG.getUNDEF(ResVT);
MVT SubVT = Op.getOperand(0).getSimpleValueType();
unsigned NumSubElems = SubVT.getVectorNumElements();
for (unsigned i = 0; i != NumOperands; ++i) {
if ((NonZeros & (1 << i)) == 0)
continue;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
Op.getOperand(i),
DAG.getIntPtrConstant(i * NumSubElems, dl));
}
return Vec;
}
// Return true if all the operands of the given CONCAT_VECTORS node are zeros
// except for the first one. (CONCAT_VECTORS Op, 0, 0,...,0)
static bool isExpandWithZeros(const SDValue &Op) {
assert(Op.getOpcode() == ISD::CONCAT_VECTORS &&
"Expand with zeros only possible in CONCAT_VECTORS nodes!");
for (unsigned i = 1; i < Op.getNumOperands(); i++)
if (!ISD::isBuildVectorAllZeros(Op.getOperand(i).getNode()))
return false;
return true;
}
// Returns true if the given node is a type promotion (by concatenating i1
// zeros) of the result of a node that already zeros all upper bits of
// k-register.
static SDValue isTypePromotionOfi1ZeroUpBits(SDValue Op) {
unsigned Opc = Op.getOpcode();
assert(Opc == ISD::CONCAT_VECTORS &&
Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Unexpected node to check for type promotion!");
// As long as we are concatenating zeros to the upper part of a previous node
// result, climb up the tree until a node with different opcode is
// encountered
while (Opc == ISD::INSERT_SUBVECTOR || Opc == ISD::CONCAT_VECTORS) {
if (Opc == ISD::INSERT_SUBVECTOR) {
if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()) &&
Op.getConstantOperandVal(2) == 0)
Op = Op.getOperand(1);
else
return SDValue();
} else { // Opc == ISD::CONCAT_VECTORS
if (isExpandWithZeros(Op))
Op = Op.getOperand(0);
else
return SDValue();
}
Opc = Op.getOpcode();
}
// Check if the first inserted node zeroes the upper bits, or an 'and' result
// of a node that zeros the upper bits (its masked version).
if (isMaskedZeroUpperBitsvXi1(Op.getOpcode()) ||
(Op.getOpcode() == ISD::AND &&
(isMaskedZeroUpperBitsvXi1(Op.getOperand(0).getOpcode()) ||
isMaskedZeroUpperBitsvXi1(Op.getOperand(1).getOpcode())))) {
return Op;
}
return SDValue();
}
// TODO: Merge this with LowerAVXCONCAT_VECTORS?
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG & DAG) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
unsigned NumOperands = Op.getNumOperands();
assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
"Unexpected number of operands in CONCAT_VECTORS");
// If this node promotes - by concatenating zeroes - the type of the result
// of a node with instruction that zeroes all upper (irrelevant) bits of the
// output register, mark it as legal and catch the pattern in instruction
// selection to avoid emitting extra instructions (for zeroing upper bits).
if (SDValue Promoted = isTypePromotionOfi1ZeroUpBits(Op))
return widenSubVector(ResVT, Promoted, true, Subtarget, DAG, dl);
unsigned NumZero = 0;
unsigned NumNonZero = 0;
uint64_t NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
if (SubVec.isUndef())
continue;
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
++NumZero;
else {
assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
NonZeros |= (uint64_t)1 << i;
++NumNonZero;
}
}
// If there are zero or one non-zeros we can handle this very simply.
if (NumNonZero <= 1) {
SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
: DAG.getUNDEF(ResVT);
if (!NumNonZero)
return Vec;
unsigned Idx = countTrailingZeros(NonZeros);
SDValue SubVec = Op.getOperand(Idx);
unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
}
if (NumOperands > 2) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
ArrayRef<SDUse> Ops = Op->ops();
SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(0, NumOperands/2));
SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(NumOperands/2));
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
assert(NumNonZero == 2 && "Simple cases not handled?");
if (ResVT.getVectorNumElements() >= 16)
return Op; // The operation is legal with KUNPCK
SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
DAG.getUNDEF(ResVT), Op.getOperand(0),
DAG.getIntPtrConstant(0, dl));
unsigned NumElems = ResVT.getVectorNumElements();
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
DAG.getIntPtrConstant(NumElems/2, dl));
}
static SDValue LowerCONCAT_VECTORS(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getVectorElementType() == MVT::i1)
return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
(VT.is512BitVector() && (Op.getNumOperands() == 2 ||
Op.getNumOperands() == 4)));
// AVX can use the vinsertf128 instruction to create 256-bit vectors
// from two other 128-bit ones.
// 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
}
//===----------------------------------------------------------------------===//
// Vector shuffle lowering
//
// This is an experimental code path for lowering vector shuffles on x86. It is
// designed to handle arbitrary vector shuffles and blends, gracefully
// degrading performance as necessary. It works hard to recognize idiomatic
// shuffles and lower them to optimal instruction patterns without leaving
// a framework that allows reasonably efficient handling of all vector shuffle
// patterns.
//===----------------------------------------------------------------------===//
/// Tiny helper function to identify a no-op mask.
///
/// This is a somewhat boring predicate function. It checks whether the mask
/// array input, which is assumed to be a single-input shuffle mask of the kind
/// used by the X86 shuffle instructions (not a fully general
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] >= 0 && Mask[i] != i)
return false;
}
return true;
}
/// Test whether there are elements crossing 128-bit lanes in this
/// shuffle mask.
///
/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
/// and we routinely test for these.
static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
int LaneSize = 128 / VT.getScalarSizeInBits();
int Size = Mask.size();
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
return true;
return false;
}
/// Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
/// suitable for use with existing 128-bit shuffles as entries from the second
/// vector have been remapped to [LaneSize, 2*LaneSize).
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
RepeatedMask.assign(LaneSize, -1);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
if (Mask[i] < 0)
continue;
if ((Mask[i] % Size) / LaneSize != i / LaneSize)
// This entry crosses lanes, so there is no way to model this shuffle.
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
// Adjust second vector indices to start at LaneSize instead of Size.
int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
: Mask[i] % LaneSize + LaneSize;
if (RepeatedMask[i % LaneSize] < 0)
// This is the first non-undef entry in this slot of a 128-bit lane.
RepeatedMask[i % LaneSize] = LocalM;
else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
/// Test whether a shuffle mask is equivalent within each 128-bit lane.
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
SmallVector<int, 32> RepeatedMask;
return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}
/// Test whether a shuffle mask is equivalent within each 256-bit lane.
static bool
is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
}
/// Test whether a target shuffle mask is equivalent within each sub-lane.
/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
RepeatedMask.assign(LaneSize, SM_SentinelUndef);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
if (Mask[i] == SM_SentinelUndef)
continue;
if (Mask[i] == SM_SentinelZero) {
if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
return false;
RepeatedMask[i % LaneSize] = SM_SentinelZero;
continue;
}
if ((Mask[i] % Size) / LaneSize != i / LaneSize)
// This entry crosses lanes, so there is no way to model this shuffle.
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
// Adjust second vector indices to start at LaneSize instead of Size.
int LocalM =
Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
// This is the first non-undef entry in this slot of a 128-bit lane.
RepeatedMask[i % LaneSize] = LocalM;
else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
/// Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
/// This is a fast way to test a shuffle mask against a fixed pattern:
///
/// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
///
/// It returns true if the mask is exactly as wide as the argument list, and
/// each element of the mask is either -1 (signifying undef) or the value given
/// in the argument.
static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
ArrayRef<int> ExpectedMask) {
if (Mask.size() != ExpectedMask.size())
return false;
int Size = Mask.size();
// If the values are build vectors, we can look through them to find
// equivalent inputs that make the shuffles equivalent.
auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
for (int i = 0; i < Size; ++i) {
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
if (!MaskBV || !ExpectedBV ||
MaskBV->getOperand(Mask[i] % Size) !=
ExpectedBV->getOperand(ExpectedMask[i] % Size))
return false;
}
}
return true;
}
/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
///
/// The masks must be exactly the same width.
///
/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
/// value in ExpectedMask is always accepted. Otherwise the indices must match.
///
/// SM_SentinelZero is accepted as a valid negative index but must match in both.
static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
ArrayRef<int> ExpectedMask) {
int Size = Mask.size();
if (Size != (int)ExpectedMask.size())
return false;
for (int i = 0; i < Size; ++i)
if (Mask[i] == SM_SentinelUndef)
continue;
else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
return false;
else if (Mask[i] != ExpectedMask[i])
return false;
return true;
}
// Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle
// mask.
static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask,
const APInt &Zeroable) {
int NumElts = Mask.size();
assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes");
SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef);
for (int i = 0; i != NumElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index");
TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M);
}
return TargetMask;
}
// Attempt to create a shuffle mask from a VSELECT condition mask.
static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
SDValue Cond) {
if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return false;
unsigned Size = Cond.getValueType().getVectorNumElements();
Mask.resize(Size, SM_SentinelUndef);
for (int i = 0; i != (int)Size; ++i) {
SDValue CondElt = Cond.getOperand(i);
Mask[i] = i;
// Arbitrarily choose from the 2nd operand if the select condition element
// is undef.
// TODO: Can we do better by matching patterns such as even/odd?
if (CondElt.isUndef() || isNullConstant(CondElt))
Mask[i] += Size;
}
return true;
}
// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
// instructions.
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
if (VT != MVT::v8i32 && VT != MVT::v8f32)
return false;
SmallVector<int, 8> Unpcklwd;
createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
/* Unary = */ false);
SmallVector<int, 8> Unpckhwd;
createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
/* Unary = */ false);
bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
isTargetShuffleEquivalent(Mask, Unpckhwd));
return IsUnpackwdMask;
}
/// Get a 4-lane 8-bit shuffle immediate for a mask.
///
/// This helper function produces an 8-bit shuffle immediate corresponding to
/// the ubiquitous shuffle encoding scheme used in x86 instructions for
/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
unsigned Imm = 0;
Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
return Imm;
}
static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
SelectionDAG &DAG) {
return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}
/// Compute whether each element of a shuffle is zeroable.
///
/// A "zeroable" vector shuffle element is one which can be lowered to zero.
/// Either it is an undef element in the shuffle mask, the element of the input
/// referenced is undef, or the element of the input referenced is known to be
/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
/// as many lanes with this technique as possible to simplify the remaining
/// shuffle.
static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2) {
APInt Zeroable(Mask.size(), 0);
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
int VectorSizeInBits = V1.getValueSizeInBits();
int ScalarSizeInBits = VectorSizeInBits / Mask.size();
assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Handle the easy cases.
if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
Zeroable.setBit(i);
continue;
}
// Determine shuffle input and normalize the mask.
SDValue V = M < Size ? V1 : V2;
M %= Size;
// Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
if (V.getOpcode() != ISD::BUILD_VECTOR)
continue;
// If the BUILD_VECTOR has fewer elements then the bitcasted portion of
// the (larger) source element must be UNDEF/ZERO.
if ((Size % V.getNumOperands()) == 0) {
int Scale = Size / V->getNumOperands();
SDValue Op = V.getOperand(M / Scale);
if (Op.isUndef() || X86::isZeroNode(Op))
Zeroable.setBit(i);
else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt Val = Cst->getAPIntValue();
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
} else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt Val = Cst->getValueAPF().bitcastToAPInt();
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
}
continue;
}
// If the BUILD_VECTOR has more elements then all the (smaller) source
// elements must be UNDEF or ZERO.
if ((V.getNumOperands() % Size) == 0) {
int Scale = V->getNumOperands() / Size;
bool AllZeroable = true;
for (int j = 0; j < Scale; ++j) {
SDValue Op = V.getOperand((M * Scale) + j);
AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
}
if (AllZeroable)
Zeroable.setBit(i);
continue;
}
}
return Zeroable;
}
// The Shuffle result is as follow:
// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
// Each Zeroable's element correspond to a particular Mask's element.
// As described in computeZeroableShuffleElements function.
//
// The function looks for a sub-mask that the nonzero elements are in
// increasing order. If such sub-mask exist. The function returns true.
static bool isNonZeroElementsInOrder(const APInt &Zeroable,
ArrayRef<int> Mask, const EVT &VectorType,
bool &IsZeroSideLeft) {
int NextElement = -1;
// Check if the Mask's nonzero elements are in increasing order.
for (int i = 0, e = Mask.size(); i < e; i++) {
// Checks if the mask's zeros elements are built from only zeros.
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] < 0)
return false;
if (Zeroable[i])
continue;
// Find the lowest non zero element
if (NextElement < 0) {
NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
IsZeroSideLeft = NextElement != 0;
}
// Exit if the mask's non zero elements are not in increasing order.
if (NextElement != Mask[i])
return false;
NextElement++;
}
return true;
}
/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
const int NumBytes = VT.getSizeInBits() / 8;
const int NumEltBytes = VT.getScalarSizeInBits() / 8;
assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
(Subtarget.hasAVX2() && VT.is256BitVector()) ||
(Subtarget.hasBWI() && VT.is512BitVector()));
SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
// Sign bit set in i8 mask means zero element.
SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
SDValue V;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / NumEltBytes];
if (M < 0) {
PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
continue;
}
if (Zeroable[i / NumEltBytes]) {
PSHUFBMask[i] = ZeroMask;
continue;
}
// We can only use a single input of V1 or V2.
SDValue SrcV = (M >= Size ? V2 : V1);
if (V && V != SrcV)
return SDValue();
V = SrcV;
M %= Size;
// PSHUFB can't cross lanes, ensure this doesn't happen.
if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
return SDValue();
M = M % LaneSize;
M = M * NumEltBytes + (i % NumEltBytes);
PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
}
assert(V && "Failed to find a source input");
MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
}
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl);
// X86 has dedicated shuffle that can be lowered to VEXPAND
static SDValue lowerVectorShuffleToEXPAND(const SDLoc &DL, MVT VT,
const APInt &Zeroable,
ArrayRef<int> Mask, SDValue &V1,
SDValue &V2, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
bool IsLeftZeroSide = true;
if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
IsLeftZeroSide))
return SDValue();
unsigned VEXPANDMask = (~Zeroable).getZExtValue();
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
unsigned NumElts = VT.getVectorNumElements();
assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
"Unexpected number of vector elements");
SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
Subtarget, DAG, DL);
SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
return DAG.getSelect(DL, VT, VMask,
DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector),
ZeroVector);
}
static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
unsigned &UnpackOpcode, bool IsUnary,
ArrayRef<int> TargetMask,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
int NumElts = VT.getVectorNumElements();
bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
for (int i = 0; i != NumElts; i += 2) {
int M1 = TargetMask[i + 0];
int M2 = TargetMask[i + 1];
Undef1 &= (SM_SentinelUndef == M1);
Undef2 &= (SM_SentinelUndef == M2);
Zero1 &= isUndefOrZero(M1);
Zero2 &= isUndefOrZero(M2);
}
assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
"Zeroable shuffle detected");
// Attempt to match the target mask against the unpack lo/hi mask patterns.
SmallVector<int, 64> Unpckl, Unpckh;
createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
UnpackOpcode = X86ISD::UNPCKL;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
return true;
}
createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
UnpackOpcode = X86ISD::UNPCKH;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
return true;
}
// If an unary shuffle, attempt to match as an unpack lo/hi with zero.
if (IsUnary && (Zero1 || Zero2)) {
// Don't bother if we can blend instead.
if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
return false;
bool MatchLo = true, MatchHi = true;
for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
int M = TargetMask[i];
// Ignore if the input is known to be zero or the index is undef.
if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
(M == SM_SentinelUndef))
continue;
MatchLo &= (M == Unpckl[i]);
MatchHi &= (M == Unpckh[i]);
}
if (MatchLo || MatchHi) {
UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
return true;
}
}
// If a binary shuffle, commute and try again.
if (!IsUnary) {
ShuffleVectorSDNode::commuteMask(Unpckl);
if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
UnpackOpcode = X86ISD::UNPCKL;
std::swap(V1, V2);
return true;
}
ShuffleVectorSDNode::commuteMask(Unpckh);
if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
UnpackOpcode = X86ISD::UNPCKH;
std::swap(V1, V2);
return true;
}
}
return false;
}
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SmallVector<int, 8> Unpckl;
createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
SmallVector<int, 8> Unpckh;
createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
// Commute and try again.
ShuffleVectorSDNode::commuteMask(Unpckl);
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
ShuffleVectorSDNode::commuteMask(Unpckh);
if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
return SDValue();
}
static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
int Delta) {
int Size = (int)Mask.size();
int Split = Size / Delta;
int TruncatedVectorStart = SwappedOps ? Size : 0;
// Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
return false;
// The rest of the mask should not refer to the truncated vector's elements.
if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
TruncatedVectorStart + Size))
return false;
return true;
}
// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
//
// An example is the following:
//
// t0: ch = EntryToken
// t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
// t25: v4i32 = truncate t2
// t41: v8i16 = bitcast t25
// t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
// Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
// t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
// t18: v2i64 = bitcast t51
//
// Without avx512vl, this is lowered to:
//
// vpmovqd %zmm0, %ymm0
// vpshufb {{.*#+}} xmm0 =
// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
//
// But when avx512vl is available, one can just use a single vpmovdw
// instruction.
static SDValue lowerVectorShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (VT != MVT::v16i8 && VT != MVT::v8i16)
return SDValue();
if (Mask.size() != VT.getVectorNumElements())
return SDValue();
bool SwappedOps = false;
if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
if (!ISD::isBuildVectorAllZeros(V1.getNode()))
return SDValue();
std::swap(V1, V2);
SwappedOps = true;
}
// Look for:
//
// bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
// bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
//
// and similar ones.
if (V1.getOpcode() != ISD::BITCAST)
return SDValue();
if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
return SDValue();
SDValue Src = V1.getOperand(0).getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
// The vptrunc** instructions truncating 128 bit and 256 bit vectors
// are only available with avx512vl.
if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
return SDValue();
// Down Convert Word to Byte is only available with avx512bw. The case with
// 256-bit output doesn't contain a shuffle and is therefore not handled here.
if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
!Subtarget.hasBWI())
return SDValue();
// The first half/quarter of the mask should refer to every second/fourth
// element of the vector truncated and bitcasted.
if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
return SDValue();
return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
}
// X86 has dedicated pack instructions that can handle specific truncation
// operations: PACKSS and PACKUS.
static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
SDValue &V2, unsigned &PackOpcode,
ArrayRef<int> TargetMask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
unsigned NumElts = VT.getVectorNumElements();
unsigned BitSize = VT.getScalarSizeInBits();
MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
auto MatchPACK = [&](SDValue N1, SDValue N2) {
SDValue VV1 = DAG.getBitcast(PackVT, N1);
SDValue VV2 = DAG.getBitcast(PackVT, N2);
if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
(N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
V1 = VV1;
V2 = VV2;
SrcVT = PackVT;
PackOpcode = X86ISD::PACKUS;
return true;
}
}
if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
(N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
V1 = VV1;
V2 = VV2;
SrcVT = PackVT;
PackOpcode = X86ISD::PACKSS;
return true;
}
return false;
};
// Try binary shuffle.
SmallVector<int, 32> BinaryMask;
createPackShuffleMask(VT, BinaryMask, false);
if (isTargetShuffleEquivalent(TargetMask, BinaryMask))
if (MatchPACK(V1, V2))
return true;
// Try unary shuffle.
SmallVector<int, 32> UnaryMask;
createPackShuffleMask(VT, UnaryMask, true);
if (isTargetShuffleEquivalent(TargetMask, UnaryMask))
if (MatchPACK(V1, V1))
return true;
return false;
}
static SDValue lowerVectorShuffleWithPACK(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT PackVT;
unsigned PackOpcode;
if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
Subtarget))
return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
DAG.getBitcast(PackVT, V2));
return SDValue();
}
/// Try to emit a bitmask instruction for a shuffle.
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() && "Floating point types are not supported");
MVT EltVT = VT.getVectorElementType();
SDValue Zero = DAG.getConstant(0, DL, EltVT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
SDValue V;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Zeroable[i])
continue;
if (Mask[i] % Size != i)
return SDValue(); // Not a blend.
if (!V)
V = Mask[i] < Size ? V1 : V2;
else if (V != (Mask[i] < Size ? V1 : V2))
return SDValue(); // Can only let one input through the mask.
VMaskOps[i] = AllOnes;
}
if (!V)
return SDValue(); // No non-zeroable elements!
SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
return DAG.getNode(ISD::AND, DL, VT, V, VMask);
}
/// Try to emit a blend instruction for a shuffle using bit math.
///
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.isInteger() && "Only supports integer vector types!");
MVT EltVT = VT.getVectorElementType();
SDValue Zero = DAG.getConstant(0, DL, EltVT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
SmallVector<SDValue, 16> MaskOps;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
return SDValue(); // Shuffled input!
MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
}
SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
return DAG.getNode(ISD::OR, DL, VT, V1, V2);
}
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG);
static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
MutableArrayRef<int> TargetMask,
bool &ForceV1Zero, bool &ForceV2Zero,
uint64_t &BlendMask) {
bool V1IsZeroOrUndef =
V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZeroOrUndef =
V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
BlendMask = 0;
ForceV1Zero = false, ForceV2Zero = false;
assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask");
// Attempt to generate the binary blend mask. If an input is zero then
// we can use any lane.
// TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
int M = TargetMask[i];
if (M == SM_SentinelUndef)
continue;
if (M == i)
continue;
if (M == i + Size) {
BlendMask |= 1ull << i;
continue;
}
if (M == SM_SentinelZero) {
if (V1IsZeroOrUndef) {
ForceV1Zero = true;
TargetMask[i] = i;
continue;
}
if (V2IsZeroOrUndef) {
ForceV2Zero = true;
BlendMask |= 1ull << i;
TargetMask[i] = i + Size;
continue;
}
}
return false;
}
return true;
}
static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
int Scale) {
uint64_t ScaledMask = 0;
for (int i = 0; i != Size; ++i)
if (BlendMask & (1ull << i))
ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
return ScaledMask;
}
/// Try to emit a blend instruction for a shuffle.
///
/// This doesn't do any checks for the availability of instructions for blending
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Original,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable);
uint64_t BlendMask = 0;
bool ForceV1Zero = false, ForceV2Zero = false;
if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero,
BlendMask))
return SDValue();
// Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
if (ForceV1Zero)
V1 = getZeroVector(VT, Subtarget, DAG, DL);
if (ForceV2Zero)
V2 = getZeroVector(VT, Subtarget, DAG, DL);
switch (VT.SimpleTy) {
case MVT::v2f64:
case MVT::v4f32:
case MVT::v4f64:
case MVT::v8f32:
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
case MVT::v4i64:
case MVT::v8i32:
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
LLVM_FALLTHROUGH;
case MVT::v2i64:
case MVT::v4i32:
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
// that instruction.
if (Subtarget.hasAVX2()) {
// Scale the blend by the number of 32-bit dwords per element.
int Scale = VT.getScalarSizeInBits() / 32;
BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8)));
}
LLVM_FALLTHROUGH;
case MVT::v8i16: {
// For integer shuffles we need to expand the mask and cast the inputs to
// v8i16s prior to blending.
int Scale = 8 / VT.getVectorNumElements();
BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
V1 = DAG.getBitcast(MVT::v8i16, V1);
V2 = DAG.getBitcast(MVT::v8i16, V2);
return DAG.getBitcast(VT,
DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8)));
}
case MVT::v16i16: {
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// We can lower these with PBLENDW which is mirrored across 128-bit lanes.
assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
BlendMask = 0;
for (int i = 0; i < 8; ++i)
if (RepeatedMask[i] >= 8)
BlendMask |= 1ull << i;
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
}
// Use PBLENDW for lower/upper lanes and then blend lanes.
// TODO - we should allow 2 PBLENDW here and leave shuffle combine to
// merge to VSELECT where useful.
uint64_t LoMask = BlendMask & 0xFF;
uint64_t HiMask = (BlendMask >> 8) & 0xFF;
if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(LoMask, DL, MVT::i8));
SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(HiMask, DL, MVT::i8));
return DAG.getVectorShuffle(
MVT::v16i16, DL, Lo, Hi,
{0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
}
LLVM_FALLTHROUGH;
}
case MVT::v16i8:
case MVT::v32i8: {
assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
"256-bit byte-blends require AVX2 support!");
// Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
if (SDValue Masked =
lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
return Masked;
if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
}
// Scale the blend by the number of bytes per element.
int Scale = VT.getScalarSizeInBits() / 8;
// This form of blend is always done on bytes. Compute the byte vector
// type.
MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
// x86 allows load folding with blendvb from the 2nd source operand. But
// we are still using LLVM select here (see comment below), so that's V1.
// If V2 can be load-folded and V1 cannot be load-folded, then commute to
// allow that load-folding possibility.
if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
ShuffleVectorSDNode::commuteMask(Mask);
std::swap(V1, V2);
}
// Compute the VSELECT mask. Note that VSELECT is really confusing in the
// mix of LLVM's code generator and the x86 backend. We tell the code
// generator that boolean values in the elements of an x86 vector register
// are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
// mapping a select to operand #1, and 'false' mapping to operand #2. The
// reality in x86 is that vector masks (pre-AVX-512) use only the high bit
// of the element (the remaining are ignored) and 0 in that high bit would
// mean operand #1 while 1 in the high bit would mean operand #2. So while
// the LLVM model for boolean values in vector elements gets the relevant
// bit set, it is set backwards and over constrained relative to x86's
// actual model.
SmallVector<SDValue, 32> VSELECTMask;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
for (int j = 0; j < Scale; ++j)
VSELECTMask.push_back(
Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
: DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
MVT::i8));
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
return DAG.getBitcast(
VT,
DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
V1, V2));
}
case MVT::v16f32:
case MVT::v8f64:
case MVT::v8i64:
case MVT::v16i32:
case MVT::v32i16:
case MVT::v64i8: {
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
}
default:
llvm_unreachable("Not a supported integer vector type!");
}
}
/// Try to lower as a blend of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG,
bool ImmBlends = false) {
// We build up the blend mask while checking whether a blend is a viable way
// to reduce the shuffle.
SmallVector<int, 32> BlendMask(Mask.size(), -1);
SmallVector<int, 32> PermuteMask(Mask.size(), -1);
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] < 0)
continue;
assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
if (BlendMask[Mask[i] % Size] < 0)
BlendMask[Mask[i] % Size] = Mask[i];
else if (BlendMask[Mask[i] % Size] != Mask[i])
return SDValue(); // Can't blend in the needed input!
PermuteMask[i] = Mask[i] % Size;
}
// If only immediate blends, then bail if the blend mask can't be widened to
// i16.
unsigned EltSize = VT.getScalarSizeInBits();
if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
return SDValue();
SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
}
/// Try to lower as an unpack of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can unpack elements from two inputs and
/// then reduce the shuffle to a single-input (wider) permutation.
static SDValue lowerVectorShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
int NumElts = Mask.size();
int NumLanes = VT.getSizeInBits() / 128;
int NumLaneElts = NumElts / NumLanes;
int NumHalfLaneElts = NumLaneElts / 2;
bool MatchLo = true, MatchHi = true;
SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
// Determine UNPCKL/UNPCKH type and operand order.
for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
int M = Mask[Lane + Elt];
if (M < 0)
continue;
SDValue &Op = Ops[Elt & 1];
if (M < NumElts && (Op.isUndef() || Op == V1))
Op = V1;
else if (NumElts <= M && (Op.isUndef() || Op == V2))
Op = V2;
else
return SDValue();
int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
if (!MatchLo && !MatchHi)
return SDValue();
}
}
assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
// Now check that each pair of elts come from the same unpack pair
// and set the permute mask based on each pair.
// TODO - Investigate cases where we permute individual elements.
SmallVector<int, 32> PermuteMask(NumElts, -1);
for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
int M0 = Mask[Lane + Elt + 0];
int M1 = Mask[Lane + Elt + 1];
if (0 <= M0 && 0 <= M1 &&
(M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
return SDValue();
if (0 <= M0)
PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
if (0 <= M1)
PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
}
}
unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
}
/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
/// permuting the elements of the result in place.
static SDValue lowerVectorShuffleAsByteRotateAndPermute(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
(VT.is256BitVector() && !Subtarget.hasAVX2()) ||
(VT.is512BitVector() && !Subtarget.hasBWI()))
return SDValue();
// We don't currently support lane crossing permutes.
if (is128BitLaneCrossingShuffleMask(VT, Mask))
return SDValue();
int Scale = VT.getScalarSizeInBits() / 8;
int NumLanes = VT.getSizeInBits() / 128;
int NumElts = VT.getVectorNumElements();
int NumEltsPerLane = NumElts / NumLanes;
// Determine range of mask elts.
bool Blend1 = true;
bool Blend2 = true;
std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
int M = Mask[Lane + Elt];
if (M < 0)
continue;
if (M < NumElts) {
Blend1 &= (M == (Lane + Elt));
assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
M = M % NumEltsPerLane;
Range1.first = std::min(Range1.first, M);
Range1.second = std::max(Range1.second, M);
} else {
M -= NumElts;
Blend2 &= (M == (Lane + Elt));
assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
M = M % NumEltsPerLane;
Range2.first = std::min(Range2.first, M);
Range2.second = std::max(Range2.second, M);
}
}
}
// Bail if we don't need both elements.
// TODO - it might be worth doing this for unary shuffles if the permute
// can be widened.
if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
!(0 <= Range2.first && Range2.second < NumEltsPerLane))
return SDValue();
if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
return SDValue();
// Rotate the 2 ops so we can access both ranges, then permute the result.
auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
SDValue Rotate = DAG.getBitcast(
VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
DAG.getBitcast(ByteVT, Lo),
DAG.getConstant(Scale * RotAmt, DL, MVT::i8)));
SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
int M = Mask[Lane + Elt];
if (M < 0)
continue;
if (M < NumElts)
PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
else
PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
}
}
return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
};
// Check if the ranges are small enough to rotate from either direction.
if (Range2.second < Range1.first)
return RotateAndPermute(V1, V2, Range1.first, 0);
if (Range1.second < Range2.first)
return RotateAndPermute(V2, V1, Range2.first, NumElts);
return SDValue();
}
/// Generic routine to decompose a shuffle and blend into independent
/// blends and permutes.
///
/// This matches the extremely common pattern for handling combined
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends.
static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
// Shuffle the input elements into the desired positions in V1 and V2 and
// blend them together.
SmallVector<int, 32> V1Mask(Mask.size(), -1);
SmallVector<int, 32> V2Mask(Mask.size(), -1);
SmallVector<int, 32> BlendMask(Mask.size(), -1);
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= 0 && Mask[i] < Size) {
V1Mask[i] = Mask[i];
BlendMask[i] = i;
} else if (Mask[i] >= Size) {
V2Mask[i] = Mask[i] - Size;
BlendMask[i] = i + Size;
}
// Try to lower with the simpler initial blend/unpack/rotate strategies unless
// one of the input shuffles would be a no-op. We prefer to shuffle inputs as
// the shuffle may be able to fold with a load or other benefit. However, when
// we'll have to do 2x as many shuffles in order to achieve this, a 2-input
// pre-shuffle first is a better strategy.
if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
// Only prefer immediate blends to unpack/rotate.
if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
DL, VT, V1, V2, Mask, DAG, true))
return BlendPerm;
if (SDValue UnpackPerm =
lowerVectorShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG))
return UnpackPerm;
if (SDValue RotatePerm = lowerVectorShuffleAsByteRotateAndPermute(
DL, VT, V1, V2, Mask, Subtarget, DAG))
return RotatePerm;
// Unpack/rotate failed - try again with variable blends.
if (SDValue BlendPerm =
lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
return BlendPerm;
}
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
}
/// Try to lower a vector shuffle as a rotation.
///
/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2,
ArrayRef<int> Mask) {
int NumElts = Mask.size();
// We need to detect various ways of spelling a rotation:
// [11, 12, 13, 14, 15, 0, 1, 2]
// [-1, 12, 13, 14, -1, -1, 1, -1]
// [-1, -1, -1, -1, -1, -1, 1, 2]
// [ 3, 4, 5, 6, 7, 8, 9, 10]
// [-1, 4, 5, 6, -1, -1, 9, -1]
// [-1, 4, 5, 6, -1, -1, -1, -1]
int Rotation = 0;
SDValue Lo, Hi;
for (int i = 0; i < NumElts; ++i) {
int M = Mask[i];
assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
"Unexpected mask index.");
if (M < 0)
continue;
// Determine where a rotated vector would have started.
int StartIdx = i - (M % NumElts);
if (StartIdx == 0)
// The identity rotation isn't interesting, stop.
return -1;
// If we found the tail of a vector the rotation must be the missing
// front. If we found the head of a vector, it must be how much of the
// head.
int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
if (Rotation == 0)
Rotation = CandidateRotation;
else if (Rotation != CandidateRotation)
// The rotations don't match, so we can't match this mask.
return -1;
// Compute which value this mask is pointing at.
SDValue MaskV = M < NumElts ? V1 : V2;
// Compute which of the two target values this index should be assigned
// to. This reflects whether the high elements are remaining or the low
// elements are remaining.
SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
// Either set up this value if we've not encountered it before, or check
// that it remains consistent.
if (!TargetV)
TargetV = MaskV;
else if (TargetV != MaskV)
// This may be a rotation, but it pulls from the inputs in some
// unsupported interleaving.
return -1;
}
// Check that we successfully analyzed the mask, and normalize the results.
assert(Rotation != 0 && "Failed to locate a viable rotation!");
assert((Lo || Hi) && "Failed to find a rotated input vector!");
if (!Lo)
Lo = Hi;
else if (!Hi)
Hi = Lo;
V1 = Lo;
V2 = Hi;
return Rotation;
}
/// Try to lower a vector shuffle as a byte rotation.
///
/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
/// try to generically lower a vector shuffle through such an pattern. It
/// does not check for the profitability of lowering either as PALIGNR or
/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
/// This matches shuffle vectors that look like:
///
/// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static int matchVectorShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask) {
// Don't accept any shuffles with zero elements.
if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return -1;
// PALIGNR works on 128-bit lanes.
SmallVector<int, 16> RepeatedMask;
if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
return -1;
int Rotation = matchVectorShuffleAsRotate(V1, V2, RepeatedMask);
if (Rotation <= 0)
return -1;
// PALIGNR rotates bytes, so we need to scale the
// rotation based on how many bytes are in the vector lane.
int NumElts = RepeatedMask.size();
int Scale = 16 / NumElts;
return Rotation * Scale;
}
static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
SDValue Lo = V1, Hi = V2;
int ByteRotation = matchVectorShuffleAsByteRotate(VT, Lo, Hi, Mask);
if (ByteRotation <= 0)
return SDValue();
// Cast the inputs to i8 vector of correct length to match PALIGNR or
// PSLLDQ/PSRLDQ.
MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
Lo = DAG.getBitcast(ByteVT, Lo);
Hi = DAG.getBitcast(ByteVT, Hi);
// SSSE3 targets can use the palignr instruction.
if (Subtarget.hasSSSE3()) {
assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
"512-bit PALIGNR requires BWI instructions");
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
DAG.getConstant(ByteRotation, DL, MVT::i8)));
}
assert(VT.is128BitVector() &&
"Rotate-based lowering only supports 128-bit lowering!");
assert(Mask.size() <= 16 &&
"Can shuffle at most 16 bytes in a 128-bit vector!");
assert(ByteVT == MVT::v16i8 &&
"SSE2 rotate lowering only needed for v16i8!");
// Default SSE2 implementation
int LoByteShift = 16 - ByteRotation;
int HiByteShift = ByteRotation;
SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
DAG.getConstant(LoByteShift, DL, MVT::i8));
SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
DAG.getConstant(HiByteShift, DL, MVT::i8));
return DAG.getBitcast(VT,
DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
}
/// Try to lower a vector shuffle as a dword/qword rotation.
///
/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
/// rotation of the concatenation of two vectors; This routine will
/// try to generically lower a vector shuffle through such an pattern.
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static SDValue lowerVectorShuffleAsRotate(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
"Only 32-bit and 64-bit elements are supported!");
// 128/256-bit vectors are only supported with VLX.
assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
&& "VLX required for 128/256-bit vectors");
SDValue Lo = V1, Hi = V2;
int Rotation = matchVectorShuffleAsRotate(Lo, Hi, Mask);
if (Rotation <= 0)
return SDValue();
return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
DAG.getConstant(Rotation, DL, MVT::i8));
}
/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
///
/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
/// matches elements from one of the input vectors shuffled to the left or
/// right with zeroable elements 'shifted in'. It handles both the strictly
/// bit-wise element shifts and the byte shift across an entire 128-bit double
/// quad word lane.
///
/// PSHL : (little-endian) left bit shift.
/// [ zz, 0, zz, 2 ]
/// [ -1, 4, zz, -1 ]
/// PSRL : (little-endian) right bit shift.
/// [ 1, zz, 3, zz]
/// [ -1, -1, 7, zz]
/// PSLLDQ : (little-endian) left byte shift
/// [ zz, 0, 1, 2, 3, 4, 5, 6]
/// [ zz, zz, -1, -1, 2, 3, 4, -1]
/// [ zz, zz, zz, zz, zz, zz, -1, 1]
/// PSRLDQ : (little-endian) right byte shift
/// [ 5, 6, 7, zz, zz, zz, zz, zz]
/// [ -1, 5, 6, 7, zz, zz, zz, zz]
/// [ 1, 2, -1, -1, -1, -1, zz, zz]
static int matchVectorShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
unsigned ScalarSizeInBits,
ArrayRef<int> Mask, int MaskOffset,
const APInt &Zeroable,
const X86Subtarget &Subtarget) {
int Size = Mask.size();
unsigned SizeInBits = Size * ScalarSizeInBits;
auto CheckZeros = [&](int Shift, int Scale, bool Left) {
for (int i = 0; i < Size; i += Scale)
for (int j = 0; j < Shift; ++j)
if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
return false;
return true;
};
auto MatchShift = [&](int Shift, int Scale, bool Left) {
for (int i = 0; i != Size; i += Scale) {
unsigned Pos = Left ? i + Shift : i;
unsigned Low = Left ? i : i + Shift;
unsigned Len = Scale - Shift;
if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
return -1;
}
int ShiftEltBits = ScalarSizeInBits * Scale;
bool ByteShift = ShiftEltBits > 64;
Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
: (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
// Normalize the scale for byte shifts to still produce an i64 element
// type.
Scale = ByteShift ? Scale / 2 : Scale;
// We need to round trip through the appropriate type for the shift.
MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
: MVT::getVectorVT(ShiftSVT, Size / Scale);
return (int)ShiftAmt;
};
// SSE/AVX supports logical shifts up to 64-bit integers - so we can just
// keep doubling the size of the integer elements up to that. We can
// then shift the elements of the integer vector by whole multiples of
// their width within the elements of the larger integer vector. Test each
// multiple to see if we can find a match with the moved element indices
// and that the shifted in elements are all zeroable.
unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
for (int Shift = 1; Shift != Scale; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Scale, Left)) {
int ShiftAmt = MatchShift(Shift, Scale, Left);
if (0 < ShiftAmt)
return ShiftAmt;
}
// no match
return -1;
}
static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Size = Mask.size();
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
MVT ShiftVT;
SDValue V = V1;
unsigned Opcode;
// Try to match shuffle against V1 shift.
int ShiftAmt = matchVectorShuffleAsShift(
ShiftVT, Opcode, VT.getScalarSizeInBits(), Mask, 0, Zeroable, Subtarget);
// If V1 failed, try to match shuffle against V2 shift.
if (ShiftAmt < 0) {
ShiftAmt =
matchVectorShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
Mask, Size, Zeroable, Subtarget);
V = V2;
}
if (ShiftAmt < 0)
return SDValue();
assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type");
V = DAG.getBitcast(ShiftVT, V);
V = DAG.getNode(Opcode, DL, ShiftVT, V,
DAG.getConstant(ShiftAmt, DL, MVT::i8));
return DAG.getBitcast(VT, V);
}
// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
// Remainder of lower half result is zero and upper half is all undef.
static bool matchVectorShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask, uint64_t &BitLen,
uint64_t &BitIdx, const APInt &Zeroable) {
int Size = Mask.size();
int HalfSize = Size / 2;
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
// Upper half must be undefined.
if (!isUndefInRange(Mask, HalfSize, HalfSize))
return false;
// Determine the extraction length from the part of the
// lower half that isn't zeroable.
int Len = HalfSize;
for (; Len > 0; --Len)
if (!Zeroable[Len - 1])
break;
assert(Len > 0 && "Zeroable shuffle mask");
// Attempt to match first Len sequential elements from the lower half.
SDValue Src;
int Idx = -1;
for (int i = 0; i != Len; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
SDValue &V = (M < Size ? V1 : V2);
M = M % Size;
// The extracted elements must start at a valid index and all mask
// elements must be in the lower half.
if (i > M || M >= HalfSize)
return false;
if (Idx < 0 || (Src == V && Idx == (M - i))) {
Src = V;
Idx = M - i;
continue;
}
return false;
}
if (!Src || Idx < 0)
return false;
assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
V1 = Src;
return true;
}
// INSERTQ: Extract lowest Len elements from lower half of second source and
// insert over first source, starting at Idx.
// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
static bool matchVectorShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask, uint64_t &BitLen,
uint64_t &BitIdx) {
int Size = Mask.size();
int HalfSize = Size / 2;
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
// Upper half must be undefined.
if (!isUndefInRange(Mask, HalfSize, HalfSize))
return false;
for (int Idx = 0; Idx != HalfSize; ++Idx) {
SDValue Base;
// Attempt to match first source from mask before insertion point.
if (isUndefInRange(Mask, 0, Idx)) {
/* EMPTY */
} else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
Base = V1;
} else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
Base = V2;
} else {
continue;
}
// Extend the extraction length looking to match both the insertion of
// the second source and the remaining elements of the first.
for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
SDValue Insert;
int Len = Hi - Idx;
// Match insertion.
if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
Insert = V1;
} else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
Insert = V2;
} else {
continue;
}
// Match the remaining elements of the lower half.
if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
/* EMPTY */
} else if ((!Base || (Base == V1)) &&
isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
Base = V1;
} else if ((!Base || (Base == V2)) &&
isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
Size + Hi)) {
Base = V2;
} else {
continue;
}
BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
V1 = Base;
V2 = Insert;
return true;
}
}
return false;
}
/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
uint64_t BitLen, BitIdx;
if (matchVectorShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
if (matchVectorShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
V2 ? V2 : DAG.getUNDEF(VT),
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return SDValue();
}
/// Lower a vector shuffle as a zero or any extension.
///
/// Given a specific number of elements, element bit width, and extension
/// stride, produce either a zero or any extension based on the available
/// features of the subtarget. The extended elements are consecutive and
/// begin and can start from an offsetted element index in the input; to
/// avoid excess shuffling the offset must either being in the bottom lane
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(Scale > 1 && "Need a scale to extend.");
int EltBits = VT.getScalarSizeInBits();
int NumElements = VT.getVectorNumElements();
int NumEltsPerLane = 128 / EltBits;
int OffsetLane = Offset / NumEltsPerLane;
assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
"Only 8, 16, and 32 bit elements can be extended.");
assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
assert(0 <= Offset && "Extension offset must be positive.");
assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
"Extension offset must be in the first lane or start an upper lane.");
// Check that an index is in same lane as the base offset.
auto SafeOffset = [&](int Idx) {
return OffsetLane == (Idx / NumEltsPerLane);
};
// Shift along an input so that the offset base moves to the first element.
auto ShuffleOffset = [&](SDValue V) {
if (!Offset)
return V;
SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
for (int i = 0; i * Scale < NumElements; ++i) {
int SrcIdx = i + Offset;
ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
}
return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
};
// Found a valid zext mask! Try various lowering strategies based on the
// input type and available ISA extensions.
if (Subtarget.hasSSE41()) {
// Not worth offsetting 128-bit vectors if scale == 2, a pattern using
// PUNPCK will catch this in a later shuffle match.
if (Offset && Scale == 2 && VT.is128BitVector())
return SDValue();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
NumElements / Scale);
InputV = ShuffleOffset(InputV);
InputV = getExtendInVec(/*Signed*/false, DL, ExtVT, InputV, DAG);
return DAG.getBitcast(VT, InputV);
}
assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
// For any extends we can cheat for larger element sizes and use shuffle
// instructions that can fold with a load and/or copy.
if (AnyExt && EltBits == 32) {
int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
-1};
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getBitcast(MVT::v4i32, InputV),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
if (AnyExt && EltBits == 16 && Scale > 2) {
int PSHUFDMask[4] = {Offset / 2, -1,
SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getBitcast(MVT::v4i32, InputV),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
int PSHUFWMask[4] = {1, -1, -1, -1};
unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW);
return DAG.getBitcast(
VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
DAG.getBitcast(MVT::v8i16, InputV),
getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
}
// The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
// to 64-bits.
if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
assert(VT.is128BitVector() && "Unexpected vector width!");
int LoIdx = Offset * EltBits;
SDValue Lo = DAG.getBitcast(
MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
DAG.getConstant(EltBits, DL, MVT::i8),
DAG.getConstant(LoIdx, DL, MVT::i8)));
if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
!SafeOffset(Offset + 1))
return DAG.getBitcast(VT, Lo);
int HiIdx = (Offset + 1) * EltBits;
SDValue Hi = DAG.getBitcast(
MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
DAG.getConstant(EltBits, DL, MVT::i8),
DAG.getConstant(HiIdx, DL, MVT::i8)));
return DAG.getBitcast(VT,
DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
}
// If this would require more than 2 unpack instructions to expand, use
// pshufb when available. We can only use more than 2 unpack instructions
// when zero extending i8 elements which also makes it easier to use pshufb.
if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
assert(NumElements == 16 && "Unexpected byte vector width!");
SDValue PSHUFBMask[16];
for (int i = 0; i < 16; ++i) {
int Idx = Offset + (i / Scale);
PSHUFBMask[i] = DAG.getConstant(
(i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
}
InputV = DAG.getBitcast(MVT::v16i8, InputV);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
}
// If we are extending from an offset, ensure we start on a boundary that
// we can unpack from.
int AlignToUnpack = Offset % (NumElements / Scale);
if (AlignToUnpack) {
SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
for (int i = AlignToUnpack; i < NumElements; ++i)
ShMask[i - AlignToUnpack] = i;
InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
Offset -= AlignToUnpack;
}
// Otherwise emit a sequence of unpacks.
do {
unsigned UnpackLoHi = X86ISD::UNPCKL;
if (Offset >= (NumElements / 2)) {
UnpackLoHi = X86ISD::UNPCKH;
Offset -= (NumElements / 2);
}
MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
: getZeroVector(InputVT, Subtarget, DAG, DL);
InputV = DAG.getBitcast(InputVT, InputV);
InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
Scale /= 2;
EltBits *= 2;
NumElements /= 2;
} while (Scale > 1);
return DAG.getBitcast(VT, InputV);
}
/// Try to lower a vector shuffle as a zero extension on any microarch.
///
/// This routine will try to do everything in its power to cleverly lower
/// a shuffle which happens to match the pattern of a zero extend. It doesn't
/// check for the profitability of this lowering, it tries to aggressively
/// match this pattern. It will use all of the micro-architectural details it
/// can to emit an efficient lowering. It handles both blends with all-zero
/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
/// masking out later).
///
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Bits = VT.getSizeInBits();
int NumLanes = Bits / 128;
int NumElements = VT.getVectorNumElements();
int NumEltsPerLane = NumElements / NumLanes;
assert(VT.getScalarSizeInBits() <= 32 &&
"Exceeds 32-bit integer zero extension limit");
assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
// Define a helper function to check a particular ext-scale and lower to it if
// valid.
auto Lower = [&](int Scale) -> SDValue {
SDValue InputV;
bool AnyExt = true;
int Offset = 0;
int Matches = 0;
for (int i = 0; i < NumElements; ++i) {
int M = Mask[i];
if (M < 0)
continue; // Valid anywhere but doesn't tell us anything.
if (i % Scale != 0) {
// Each of the extended elements need to be zeroable.
if (!Zeroable[i])
return SDValue();
// We no longer are in the anyext case.
AnyExt = false;
continue;
}
// Each of the base elements needs to be consecutive indices into the
// same input vector.
SDValue V = M < NumElements ? V1 : V2;
M = M % NumElements;
if (!InputV) {
InputV = V;
Offset = M - (i / Scale);
} else if (InputV != V)
return SDValue(); // Flip-flopping inputs.
// Offset must start in the lowest 128-bit lane or at the start of an
// upper lane.
// FIXME: Is it ever worth allowing a negative base offset?
if (!((0 <= Offset && Offset < NumEltsPerLane) ||
(Offset % NumEltsPerLane) == 0))
return SDValue();
// If we are offsetting, all referenced entries must come from the same
// lane.
if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
return SDValue();
if ((M % NumElements) != (Offset + (i / Scale)))
return SDValue(); // Non-consecutive strided elements.
Matches++;
}
// If we fail to find an input, we have a zero-shuffle which should always
// have already been handled.
// FIXME: Maybe handle this here in case during blending we end up with one?
if (!InputV)
return SDValue();
// If we are offsetting, don't extend if we only match a single input, we
// can always do better by using a basic PSHUF or PUNPCK.
if (Offset != 0 && Matches < 2)
return SDValue();
return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG);
};
// The widest scale possible for extending is to a 64-bit integer.
assert(Bits % 64 == 0 &&
"The number of bits in a vector must be divisible by 64 on x86!");
int NumExtElements = Bits / 64;
// Each iteration, try extending the elements half as much, but into twice as
// many elements.
for (; NumExtElements < NumElements; NumExtElements *= 2) {
assert(NumElements % NumExtElements == 0 &&
"The input vector size must be divisible by the extended size.");
if (SDValue V = Lower(NumElements / NumExtElements))
return V;
}
// General extends failed, but 128-bit vectors may be able to use MOVQ.
if (Bits != 128)
return SDValue();
// Returns one of the source operands if the shuffle can be reduced to a
// MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
auto CanZExtLowHalf = [&]() {
for (int i = NumElements / 2; i != NumElements; ++i)
if (!Zeroable[i])
return SDValue();
if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
return V1;
if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
return V2;
return SDValue();
};
if (SDValue V = CanZExtLowHalf()) {
V = DAG.getBitcast(MVT::v2i64, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
return DAG.getBitcast(VT, V);
}
// No viable ext lowering found.
return SDValue();
}
/// Try to get a scalar value for a specific element of a vector.
///
/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
SelectionDAG &DAG) {
MVT VT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
V = peekThroughBitcasts(V);
// If the bitcasts shift the element size, we can't extract an equivalent
// element from it.
MVT NewVT = V.getSimpleValueType();
if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
// Ensure the scalar operand is the same size as the destination.
// FIXME: Add support for scalar truncation where possible.
SDValue S = V.getOperand(Idx);
if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
return DAG.getBitcast(EltVT, S);
}
return SDValue();
}
/// Helper to test for a load that can be folded with x86 shuffles.
///
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
V = peekThroughBitcasts(V);
return ISD::isNON_EXTLoad(V.getNode());
}
/// Try to lower insertion of a single element into a zero vector.
///
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerVectorShuffleAsElementInsertion(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT ExtVT = VT;
MVT EltVT = VT.getVectorElementType();
int V2Index =
find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
Mask.begin();
bool IsV1Zeroable = true;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (i != V2Index && !Zeroable[i]) {
IsV1Zeroable = false;
break;
}
// Check for a single input from a SCALAR_TO_VECTOR node.
// FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
// all the smarts here sunk into that routine. However, the current
// lowering of BUILD_VECTOR makes that nearly impossible until the old
// vector shuffle lowering is dead.
SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
DAG);
if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
// We need to zext the scalar if it is smaller than an i32.
V2S = DAG.getBitcast(EltVT, V2S);
if (EltVT == MVT::i8 || EltVT == MVT::i16) {
// Using zext to expand a narrow element won't work for non-zero
// insertions.
if (!IsV1Zeroable)
return SDValue();
// Zero-extend directly to i32.
ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
}
V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
} else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
EltVT == MVT::i16) {
// Either not inserting from the low element of the input or the input
// element size is too small to use VZEXT_MOVL to clear the high bits.
return SDValue();
}
if (!IsV1Zeroable) {
// If V1 can't be treated as a zero vector we have fewer options to lower
// this. We can't support integer vectors or non-zero targets cheaply, and
// the V1 elements can't be permuted in any way.
assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
if (!VT.isFloatingPoint() || V2Index != 0)
return SDValue();
SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
V1Mask[V2Index] = -1;
if (!isNoopShuffleMask(V1Mask))
return SDValue();
if (!VT.is128BitVector())
return SDValue();
// Otherwise, use MOVSD or MOVSS.
assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
"Only two types of floating point element types to handle!");
return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
ExtVT, V1, V2);
}
// This lowering only works for the low element with floating point vectors.
if (VT.isFloatingPoint() && V2Index != 0)
return SDValue();
V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
if (ExtVT != VT)
V2 = DAG.getBitcast(VT, V2);
if (V2Index != 0) {
// If we have 4 or fewer lanes we can cheaply shuffle the element into
// the desired position. Otherwise it is more efficient to do a vector
// shift left. We know that we can do a vector shift left because all
// the inputs are zero.
if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
V2Shuffle[V2Index] = 0;
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
} else {
V2 = DAG.getBitcast(MVT::v16i8, V2);
V2 = DAG.getNode(
X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
V2 = DAG.getBitcast(VT, V2);
}
}
return V2;
}
/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
SDValue V0, int BroadcastIdx,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX2() &&
"We can only lower integer broadcasts with AVX2!");
EVT EltVT = VT.getVectorElementType();
EVT V0VT = V0.getValueType();
assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
EVT V0EltVT = V0VT.getVectorElementType();
if (!V0EltVT.isInteger())
return SDValue();
const unsigned EltSize = EltVT.getSizeInBits();
const unsigned V0EltSize = V0EltVT.getSizeInBits();
// This is only a truncation if the original element type is larger.
if (V0EltSize <= EltSize)
return SDValue();
assert(((V0EltSize % EltSize) == 0) &&
"Scalar type sizes must all be powers of 2 on x86!");
const unsigned V0Opc = V0.getOpcode();
const unsigned Scale = V0EltSize / EltSize;
const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
V0Opc != ISD::BUILD_VECTOR)
return SDValue();
SDValue Scalar = V0.getOperand(V0BroadcastIdx);
// If we're extracting non-least-significant bits, shift so we can truncate.
// Hopefully, we can fold away the trunc/srl/load into the broadcast.
// Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
// vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
if (const int OffsetIdx = BroadcastIdx % Scale)
Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
}
/// Try to lower broadcast of a single element.
///
/// For convenience, this code also bundles all of the subtarget feature set
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
(Subtarget.hasAVX() && VT.isFloatingPoint()) ||
(Subtarget.hasAVX2() && VT.isInteger())))
return SDValue();
// With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
// we can only broadcast from a register with AVX2.
unsigned NumElts = Mask.size();
unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
? X86ISD::MOVDDUP
: X86ISD::VBROADCAST;
bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
// Check that the mask is a broadcast.
int BroadcastIdx = -1;
for (int i = 0; i != (int)NumElts; ++i) {
SmallVector<int, 8> BroadcastMask(NumElts, i);
if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
BroadcastIdx = i;
break;
}
}
if (BroadcastIdx < 0)
return SDValue();
assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast "
"comes from V1.");
// Go up the chain of (vector) values to find a scalar load that we can
// combine with the broadcast.
SDValue V = V1;
for (;;) {
switch (V.getOpcode()) {
case ISD::BITCAST: {
// Peek through bitcasts as long as BroadcastIdx can be adjusted.
SDValue VSrc = V.getOperand(0);
unsigned NumEltBits = V.getScalarValueSizeInBits();
unsigned NumSrcBits = VSrc.getScalarValueSizeInBits();
if ((NumEltBits % NumSrcBits) == 0)
BroadcastIdx *= (NumEltBits / NumSrcBits);
else if ((NumSrcBits % NumEltBits) == 0 &&
(BroadcastIdx % (NumSrcBits / NumEltBits)) == 0)
BroadcastIdx /= (NumSrcBits / NumEltBits);
else
break;
V = VSrc;
continue;
}
case ISD::CONCAT_VECTORS: {
int OperandSize =
V.getOperand(0).getSimpleValueType().getVectorNumElements();
V = V.getOperand(BroadcastIdx / OperandSize);
BroadcastIdx %= OperandSize;
continue;
}
case ISD::INSERT_SUBVECTOR: {
SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
if (!ConstantIdx)
break;
int BeginIdx = (int)ConstantIdx->getZExtValue();
int EndIdx =
BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements();
if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
BroadcastIdx -= BeginIdx;
V = VInner;
} else {
V = VOuter;
}
continue;
}
}
break;
}
// Ensure the source vector and BroadcastIdx are for a suitable type.
if (VT.getScalarSizeInBits() != V.getScalarValueSizeInBits()) {
unsigned NumEltBits = VT.getScalarSizeInBits();
unsigned NumSrcBits = V.getScalarValueSizeInBits();
if ((NumSrcBits % NumEltBits) == 0)
BroadcastIdx *= (NumSrcBits / NumEltBits);
else if ((NumEltBits % NumSrcBits) == 0 &&
(BroadcastIdx % (NumEltBits / NumSrcBits)) == 0)
BroadcastIdx /= (NumEltBits / NumSrcBits);
else
return SDValue();
unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
MVT SrcVT = MVT::getVectorVT(VT.getScalarType(), NumSrcElts);
V = DAG.getBitcast(SrcVT, V);
}
// Check if this is a broadcast of a scalar. We special case lowering
// for scalars so that we can more effectively fold with loads.
// First, look through bitcast: if the original value has a larger element
// type than the shuffle, the broadcast element is in essence truncated.
// Make that explicit to ease folding.
if (V.getOpcode() == ISD::BITCAST && VT.isInteger())
if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast(
DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
return TruncBroadcast;
MVT BroadcastVT = VT;
// Peek through any bitcast (only useful for loads).
SDValue BC = peekThroughBitcasts(V);
// Also check the simpler case, where we can directly reuse the scalar.
if ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
(V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
V = V.getOperand(BroadcastIdx);
// If we can't broadcast from a register, check that the input is a load.
if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
return SDValue();
} else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
? X86ISD::MOVDDUP
: Opcode;
}
// If we are broadcasting a load that is only used by the shuffle
// then we can reduce the vector load to the broadcasted scalar load.
LoadSDNode *Ld = cast<LoadSDNode>(BC);
SDValue BaseAddr = Ld->getOperand(1);
EVT SVT = BroadcastVT.getScalarType();
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
DAG.getMachineFunction().getMachineMemOperand(
Ld->getMemOperand(), Offset, SVT.getStoreSize()));
DAG.makeEquivalentMemoryOrdering(Ld, V);
} else if (!BroadcastFromReg) {
// We can't broadcast from a vector register.
return SDValue();
} else if (BroadcastIdx != 0) {
// We can only broadcast from the zero-element of a vector register,
// but it can be advantageous to broadcast from the zero-element of a
// subvector.
if (!VT.is256BitVector() && !VT.is512BitVector())
return SDValue();
// VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
if (VT == MVT::v4f64 || VT == MVT::v4i64)
return SDValue();
// Only broadcast the zero-element of a 128-bit subvector.
unsigned EltSize = VT.getScalarSizeInBits();
if (((BroadcastIdx * EltSize) % 128) != 0)
return SDValue();
// The shuffle input might have been a bitcast we looked through; look at
// the original input vector. Emit an EXTRACT_SUBVECTOR of that type; we'll
// later bitcast it to BroadcastVT.
assert(V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
"Unexpected vector element size");
assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
"Unexpected vector size");
V = extract128BitVector(V, BroadcastIdx, DAG, DL);
}
if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
DAG.getBitcast(MVT::f64, V));
// Bitcast back to the same scalar type as BroadcastVT.
MVT SrcVT = V.getSimpleValueType();
if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
"Unexpected vector element size");
if (SrcVT.isVector()) {
unsigned NumSrcElts = SrcVT.getVectorNumElements();
SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
} else {
SrcVT = BroadcastVT.getScalarType();
}
V = DAG.getBitcast(SrcVT, V);
}
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
if (!Subtarget.is64Bit() && SrcVT == MVT::i64) {
V = DAG.getBitcast(MVT::f64, V);
unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
}
// We only support broadcasting from 128-bit vectors to minimize the
// number of patterns we need to deal with in isel. So extract down to
// 128-bits, removing as many bitcasts as possible.
if (SrcVT.getSizeInBits() > 128) {
MVT ExtVT = MVT::getVectorVT(SrcVT.getScalarType(),
128 / SrcVT.getScalarSizeInBits());
V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
V = DAG.getBitcast(ExtVT, V);
}
return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
}
// Check for whether we can use INSERTPS to perform the shuffle. We only use
// INSERTPS when the V1 elements are already in the correct locations
// because otherwise we can just always use two SHUFPS instructions which
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
unsigned &InsertPSMask,
const APInt &Zeroable,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Attempt to match INSERTPS with one element from VA or VB being
// inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
// are updated.
auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
ArrayRef<int> CandidateMask) {
unsigned ZMask = 0;
int VADstIndex = -1;
int VBDstIndex = -1;
bool VAUsedInPlace = false;
for (int i = 0; i < 4; ++i) {
// Synthesize a zero mask from the zeroable elements (includes undefs).
if (Zeroable[i]) {
ZMask |= 1 << i;
continue;
}
// Flag if we use any VA inputs in place.
if (i == CandidateMask[i]) {
VAUsedInPlace = true;
continue;
}
// We can only insert a single non-zeroable element.
if (VADstIndex >= 0 || VBDstIndex >= 0)
return false;
if (CandidateMask[i] < 4) {
// VA input out of place for insertion.
VADstIndex = i;
} else {
// VB input for insertion.
VBDstIndex = i;
}
}
// Don't bother if we have no (non-zeroable) element for insertion.
if (VADstIndex < 0 && VBDstIndex < 0)
return false;
// Determine element insertion src/dst indices. The src index is from the
// start of the inserted vector, not the start of the concatenated vector.
unsigned VBSrcIndex = 0;
if (VADstIndex >= 0) {
// If we have a VA input out of place, we use VA as the V2 element
// insertion and don't use the original V2 at all.
VBSrcIndex = CandidateMask[VADstIndex];
VBDstIndex = VADstIndex;
VB = VA;
} else {
VBSrcIndex = CandidateMask[VBDstIndex] - 4;
}
// If no V1 inputs are used in place, then the result is created only from
// the zero mask and the V2 insertion - so remove V1 dependency.
if (!VAUsedInPlace)
VA = DAG.getUNDEF(MVT::v4f32);
// Update V1, V2 and InsertPSMask accordingly.
V1 = VA;
V2 = VB;
// Insert the V2 element into the desired position.
InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
return true;
};
if (matchAsInsertPS(V1, V2, Mask))
return true;
// Commute and try again.
SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
ShuffleVectorSDNode::commuteMask(CommutedMask);
if (matchAsInsertPS(V2, V1, CommutedMask))
return true;
return false;
}
static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
// Attempt to match the insertps pattern.
unsigned InsertPSMask;
if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
return SDValue();
// Insert the V2 element into the desired position.
return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
/// Try to lower a shuffle as a permute of the inputs followed by an
/// UNPCK instruction.
///
/// This specifically targets cases where we end up with alternating between
/// the two inputs, and so can permute them into something that feeds a single
/// UNPCK instruction. Note that this routine only targets integer vectors
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
static SDValue lowerVectorShuffleAsPermuteAndUnpack(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() &&
"This routine only supports integer vectors.");
assert(VT.is128BitVector() &&
"This routine only works on 128-bit vectors.");
assert(!V2.isUndef() &&
"This routine should only be used when blending two inputs.");
assert(Mask.size() >= 2 && "Single element masks are invalid.");
int Size = Mask.size();
int NumLoInputs =
count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
int NumHiInputs =
count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
bool UnpackLo = NumLoInputs >= NumHiInputs;
auto TryUnpack = [&](int ScalarSize, int Scale) {
SmallVector<int, 16> V1Mask((unsigned)Size, -1);
SmallVector<int, 16> V2Mask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
// Each element of the unpack contains Scale elements from this mask.
int UnpackIdx = i / Scale;
// We only handle the case where V1 feeds the first slots of the unpack.
// We rely on canonicalization to ensure this is the case.
if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
return SDValue();
// Setup the mask for this input. The indexing is tricky as we have to
// handle the unpack stride.
SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
Mask[i] % Size;
}
// If we will have to shuffle both inputs to use the unpack, check whether
// we can just unpack first and shuffle the result. If so, skip this unpack.
if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
!isNoopShuffleMask(V2Mask))
return SDValue();
// Shuffle the inputs into place.
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
// Cast the inputs to the type we will use to unpack them.
MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
V1 = DAG.getBitcast(UnpackVT, V1);
V2 = DAG.getBitcast(UnpackVT, V2);
// Unpack the inputs and cast the result back to the desired type.
return DAG.getBitcast(
VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
UnpackVT, V1, V2));
};
// We try each unpack from the largest to the smallest to try and find one
// that fits this mask.
int OrigScalarSize = VT.getScalarSizeInBits();
for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
return Unpack;
// If we're shuffling with a zero vector then we're better off not doing
// VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
ISD::isBuildVectorAllZeros(V2.getNode()))
return SDValue();
// If none of the unpack-rooted lowerings worked (or were profitable) try an
// initial unpack.
if (NumLoInputs == 0 || NumHiInputs == 0) {
assert((NumLoInputs > 0 || NumHiInputs > 0) &&
"We have to have *some* inputs!");
int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
// FIXME: We could consider the total complexity of the permute of each
// possible unpacking. Or at the least we should consider how many
// half-crossings are created.
// FIXME: We could consider commuting the unpacks.
SmallVector<int, 32> PermMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
PermMask[i] =
2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
}
return DAG.getVectorShuffle(
VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
DL, VT, V1, V2),
DAG.getUNDEF(VT), PermMask);
}
return SDValue();
}
/// Handle lowering of 2-lane 64-bit floating point shuffles.
///
/// This is the basis function for the 2-lane 64-bit shuffles as we have full
/// support for floating point shuffles but not integer shuffles. These
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
return DAG.getNode(
X86ISD::SHUFP, DL, MVT::v2f64,
Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
// When loading a scalar and then shuffling it into a vector we can often do
// the insertion cheaply.
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Try inverting the insertion since for v2 masks it is easy to do and we
// can't reliably sort the mask one way or the other.
int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
return Insertion;
// Try to use one of the special instruction patterns to handle two common
// blend patterns if a zero-blend above didn't work.
if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
isShuffleEquivalent(V1, V2, Mask, {1, 3}))
if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
// We can either use a special instruction to load over the low double or
// to move just the low double.
return DAG.getNode(
X86ISD::MOVSD, DL, MVT::v2f64, V2,
DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
return V;
unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
/// Handle lowering of 2-lane 64-bit integer shuffles.
///
/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
// onward this has a single fast instruction with no scary immediates.
// We have to map the mask as it is actually a v4i32 shuffle instruction.
V1 = DAG.getBitcast(MVT::v4i32, V1);
int WidenedMask[4] = {
std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
return DAG.getBitcast(
MVT::v2i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
}
assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// When loading a scalar and then shuffling it into a vector we can often do
// the insertion cheaply.
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Try inverting the insertion since for v2 masks it is easy to do and we
// can't reliably sort the mask one way or the other.
int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
return Insertion;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
return V;
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
if (Subtarget.hasSSSE3()) {
if (Subtarget.hasVLX())
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v2i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Rotate;
}
// If we have direct support for blends, we should lower by decomposing into
// a permute. That will be faster than the domain cross.
if (IsBlendSupported)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
Mask, Subtarget, DAG);
// We implement this with SHUFPD which is pretty lame because it will likely
// incur 2 cycles of stall for integer vectors on Nehalem and older chips.
// However, all the alternatives are still more cycles and newer chips don't
// have this problem. It would be really nice if x86 had better shuffles here.
V1 = DAG.getBitcast(MVT::v2f64, V1);
V2 = DAG.getBitcast(MVT::v2f64, V2);
return DAG.getBitcast(MVT::v2i64,
DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
}
/// Test whether this can be lowered with a single SHUFPS instruction.
///
/// This is used to disable more specialized lowerings when the shufps lowering
/// will happen to be efficient.
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
// This routine only handles 128-bit shufps.
assert(Mask.size() == 4 && "Unsupported mask size!");
assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
// To lower with a single SHUFPS we need to have the low half and high half
// each requiring a single input.
if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
return false;
if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
return false;
return true;
}
/// Lower a vector shuffle using the SHUFPS instruction.
///
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SDValue LowV = V1, HighV = V2;
int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 1) {
int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
// Compute the index adjacent to V2Index and in the same half by toggling
// the low bit.
int V2AdjIndex = V2Index ^ 1;
if (Mask[V2AdjIndex] < 0) {
// Handles all the cases where we have a single V2 element and an undef.
// This will only ever happen in the high lanes because we commute the
// vector otherwise.
if (V2Index < 2)
std::swap(LowV, HighV);
NewMask[V2Index] -= 4;
} else {
// Handle the case where the V2 element ends up adjacent to a V1 element.
// To make this work, blend them together as the first step.
int V1Index = V2AdjIndex;
int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now proceed to reconstruct the final blend as we have the necessary
// high or low half formed.
if (V2Index < 2) {
LowV = V2;
HighV = V1;
} else {
HighV = V2;
}
NewMask[V1Index] = 2; // We put the V1 element in V2[2].
NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
}
} else if (NumV2Elements == 2) {
if (Mask[0] < 4 && Mask[1] < 4) {
// Handle the easy case where we have V1 in the low lanes and V2 in the
// high lanes.
NewMask[2] -= 4;
NewMask[3] -= 4;
} else if (Mask[2] < 4 && Mask[3] < 4) {
// We also handle the reversed case because this utility may get called
// when we detect a SHUFPS pattern but can't easily commute the shuffle to
// arrange things in the right direction.
NewMask[0] -= 4;
NewMask[1] -= 4;
HighV = V1;
LowV = V2;
} else {
// We have a mixture of V1 and V2 in both low and high lanes. Rather than
// trying to place elements directly, just blend them and set up the final
// shuffle to place them.
// The first two blend mask elements are for V1, the second two are for
// V2.
int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
Mask[2] < 4 ? Mask[2] : Mask[3],
(Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
(Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now we do a normal shuffle of V1 by giving V1 as both operands to
// a blend.
LowV = HighV = V1;
NewMask[0] = Mask[0] < 4 ? 0 : 2;
NewMask[1] = Mask[0] < 4 ? 2 : 0;
NewMask[2] = Mask[2] < 4 ? 1 : 3;
NewMask[3] = Mask[2] < 4 ? 3 : 1;
}
}
return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
}
/// Lower 4-lane 32-bit floating point shuffles.
///
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use even/odd duplicate instructions for masks that match their pattern.
if (Subtarget.hasSSE3()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
}
if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
// in SSE1 because otherwise they are widened to v2f64 and never get here.
if (!Subtarget.hasSSE2()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
}
// Otherwise, use a straight shuffle of a single input vector. We pass the
// input vector to both operands to simulate this with a SHUFPS.
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// There are special ways we can lower some single-element blends. However, we
// have custom ways we can lower more complex single-element blends below that
// we defer to if both this and BLENDPS fail to match, so restrict this to
// when the V2 input is targeting element 0 of the mask -- that is the fast
// case here.
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
if (Subtarget.hasSSE41()) {
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use INSERTPS if we can complete the shuffle efficiently.
if (SDValue V =
lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
return V;
if (!isSingleSHUFPSMask(Mask))
if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
DL, MVT::v4f32, V1, V2, Mask, DAG))
return BlendPerm;
}
// Use low/high mov instructions. These are only valid in SSE1 because
// otherwise they are widened to v2f64 and never get here.
if (!Subtarget.hasSSE2()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
}
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
return V;
// Otherwise fall back to a SHUFPS lowering strategy.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
}
/// Lower 4-lane i32 vector shuffles.
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
// onward this has a single fast instruction with no scary immediates.
// We coerce the shuffle pattern to be compatible with UNPCK instructions
// but we aren't actually going to use the UNPCK instruction because doing
// so prevents folding a load into this instruction or making a copy.
const int UnpackLoMask[] = {0, 0, 1, 1};
const int UnpackHiMask[] = {2, 2, 3, 3};
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
Mask = UnpackLoMask;
else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
Mask = UnpackHiMask;
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
return V;
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
if (Subtarget.hasSSSE3()) {
if (Subtarget.hasVLX())
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
}
// Assume that a single SHUFPS is faster than an alternative sequence of
// multiple instructions (even if the CPU has a domain penalty).
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (!isSingleSHUFPSMask(Mask)) {
// If we have direct support for blends, we should lower by decomposing into
// a permute. That will be faster than the domain cross.
if (IsBlendSupported)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
Mask, Subtarget, DAG);
// Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Unpack;
}
// We implement this with SHUFPS because it can blend from two vectors.
// Because we're going to eventually use SHUFPS, we use SHUFPS even to build
// up the inputs, bypassing domain shift penalties that we would incur if we
// directly used PSHUFD on Nehalem and older. For newer chips, this isn't
// relevant.
SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
return DAG.getBitcast(MVT::v4i32, ShufPS);
}
/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
/// shuffle lowering, and the most complex part.
///
/// The lowering strategy is to try to form pairs of input lanes which are
/// targeted at the same half of the final vector, and then use a dword shuffle
/// to place them onto the right half, and finally unpack the paired lanes into
/// their final position.
///
/// The exact breakdown of how to form these dword pairs and align them on the
/// correct sides is really tricky. See the comments within the function for
/// more of the details.
///
/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
MutableArrayRef<int> LoMask = Mask.slice(0, 4);
MutableArrayRef<int> HiMask = Mask.slice(4, 4);
// Attempt to directly match PSHUFLW or PSHUFHW.
if (isUndefOrInRange(LoMask, 0, 4) &&
isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
}
if (isUndefOrInRange(HiMask, 4, 8) &&
isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
for (int i = 0; i != 4; ++i)
HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
}
SmallVector<int, 4> LoInputs;
copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
array_pod_sort(LoInputs.begin(), LoInputs.end());
LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
SmallVector<int, 4> HiInputs;
copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
array_pod_sort(HiInputs.begin(), HiInputs.end());
HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
int NumLToL =
std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
int NumHToL = LoInputs.size() - NumLToL;
int NumLToH =
std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
int NumHToH = HiInputs.size() - NumLToH;
MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
// If we are shuffling values from one half - check how many different DWORD
// pairs we need to create. If only 1 or 2 then we can perform this as a
// PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
V = DAG.getNode(ShufWOp, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
V = DAG.getBitcast(PSHUFDVT, V);
V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
return DAG.getBitcast(VT, V);
};
if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
int PSHUFDMask[4] = { -1, -1, -1, -1 };
SmallVector<std::pair<int, int>, 4> DWordPairs;
int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
// Collect the different DWORD pairs.
for (int DWord = 0; DWord != 4; ++DWord) {
int M0 = Mask[2 * DWord + 0];
int M1 = Mask[2 * DWord + 1];
M0 = (M0 >= 0 ? M0 % 4 : M0);
M1 = (M1 >= 0 ? M1 % 4 : M1);
if (M0 < 0 && M1 < 0)
continue;
bool Match = false;
for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
auto &DWordPair = DWordPairs[j];
if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
(M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
PSHUFDMask[DWord] = DOffset + j;
Match = true;
break;
}
}
if (!Match) {
PSHUFDMask[DWord] = DOffset + DWordPairs.size();
DWordPairs.push_back(std::make_pair(M0, M1));
}
}
if (DWordPairs.size() <= 2) {
DWordPairs.resize(2, std::make_pair(-1, -1));
int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
DWordPairs[1].first, DWordPairs[1].second};
if ((NumHToL + NumHToH) == 0)
return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
if ((NumLToL + NumLToH) == 0)
return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
}
}
// Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
// such inputs we can swap two of the dwords across the half mark and end up
// with <=2 inputs to each half in each half. Once there, we can fall through
// to the generic code below. For example:
//
// Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
// Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
//
// However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
// and an existing 2-into-2 on the other half. In this case we may have to
// pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
// 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
// Fortunately, we don't have to handle anything but a 2-into-2 pattern
// because any other situation (including a 3-into-1 or 1-into-3 in the other
// half than the one we target for fixing) will be fixed when we re-enter this
// path. We will also combine away any sequence of PSHUFD instructions that
// result into a single instruction. Here is an example of the tricky case:
//
// Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
//
// This now has a 1-into-3 in the high half! Instead, we do two shuffles:
//
// Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
//
// Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
//
// The result is fine to be handled by the generic logic.
auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
int AOffset, int BOffset) {
assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
"Must call this with A having 3 or 1 inputs from the A half.");
assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
"Must call this with B having 1 or 3 inputs from the B half.");
assert(AToAInputs.size() + BToAInputs.size() == 4 &&
"Must call this with either 3:1 or 1:3 inputs (summing to 4).");
bool ThreeAInputs = AToAInputs.size() == 3;
// Compute the index of dword with only one word among the three inputs in
// a half by taking the sum of the half with three inputs and subtracting
// the sum of the actual three inputs. The difference is the remaining
// slot.
int ADWord, BDWord;
int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
int TripleNonInputIdx =
TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
TripleDWord = TripleNonInputIdx / 2;
// We use xor with one to compute the adjacent DWord to whichever one the
// OneInput is in.
OneInputDWord = (OneInput / 2) ^ 1;
// Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
// and BToA inputs. If there is also such a problem with the BToB and AToB
// inputs, we don't try to fix it necessarily -- we'll recurse and see it in
// the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
// is essential that we don't *create* a 3<-1 as then we might oscillate.
if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
// Compute how many inputs will be flipped by swapping these DWords. We
// need
// to balance this to ensure we don't form a 3-1 shuffle in the other
// half.
int NumFlippedAToBInputs =
std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
int NumFlippedBToBInputs =
std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
if ((NumFlippedAToBInputs == 1 &&
(NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
(NumFlippedBToBInputs == 1 &&
(NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
// We choose whether to fix the A half or B half based on whether that
// half has zero flipped inputs. At zero, we may not be able to fix it
// with that half. We also bias towards fixing the B half because that
// will more commonly be the high half, and we have to bias one way.
auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
ArrayRef<int> Inputs) {
int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
// Determine whether the free index is in the flipped dword or the
// unflipped dword based on where the pinned index is. We use this bit
// in an xor to conditionally select the adjacent dword.
int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
if (IsFixIdxInput == IsFixFreeIdxInput)
FixFreeIdx += 1;
IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
assert(IsFixIdxInput != IsFixFreeIdxInput &&
"We need to be changing the number of flipped inputs!");
int PSHUFHalfMask[] = {0, 1, 2, 3};
std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
V = DAG.getNode(
FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
for (int &M : Mask)
if (M >= 0 && M == FixIdx)
M = FixFreeIdx;
else if (M >= 0 && M == FixFreeIdx)
M = FixIdx;
};
if (NumFlippedBToBInputs != 0) {
int BPinnedIdx =
BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
} else {
assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
}
}
}
int PSHUFDMask[] = {0, 1, 2, 3};
PSHUFDMask[ADWord] = BDWord;
PSHUFDMask[BDWord] = ADWord;
V = DAG.getBitcast(
VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
// Adjust the mask to match the new locations of A and B.
for (int &M : Mask)
if (M >= 0 && M/2 == ADWord)
M = 2 * BDWord + M % 2;
else if (M >= 0 && M/2 == BDWord)
M = 2 * ADWord + M % 2;
// Recurse back into this routine to re-compute state now that this isn't
// a 3 and 1 problem.
return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget,
DAG);
};
if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
// At this point there are at most two inputs to the low and high halves from
// each half. That means the inputs can always be grouped into dwords and
// those dwords can then be moved to the correct half with a dword shuffle.
// We use at most one low and one high word shuffle to collect these paired
// inputs into dwords, and finally a dword shuffle to place them.
int PSHUFLMask[4] = {-1, -1, -1, -1};
int PSHUFHMask[4] = {-1, -1, -1, -1};
int PSHUFDMask[4] = {-1, -1, -1, -1};
// First fix the masks for all the inputs that are staying in their
// original halves. This will then dictate the targets of the cross-half
// shuffles.
auto fixInPlaceInputs =
[&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
MutableArrayRef<int> SourceHalfMask,
MutableArrayRef<int> HalfMask, int HalfOffset) {
if (InPlaceInputs.empty())
return;
if (InPlaceInputs.size() == 1) {
SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
InPlaceInputs[0] - HalfOffset;
PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
return;
}
if (IncomingInputs.empty()) {
// Just fix all of the in place inputs.
for (int Input : InPlaceInputs) {
SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
PSHUFDMask[Input / 2] = Input / 2;
}
return;
}
assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
InPlaceInputs[0] - HalfOffset;
// Put the second input next to the first so that they are packed into
// a dword. We find the adjacent index by toggling the low bit.
int AdjIndex = InPlaceInputs[0] ^ 1;
SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
};
fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
// Now gather the cross-half inputs and place them into a free dword of
// their target half.
// FIXME: This operation could almost certainly be simplified dramatically to
// look more like the 3-1 fixing operation.
auto moveInputsToRightHalf = [&PSHUFDMask](
MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
int DestOffset) {
auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
};
auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
int Word) {
int LowWord = Word & ~1;
int HighWord = Word | 1;
return isWordClobbered(SourceHalfMask, LowWord) ||
isWordClobbered(SourceHalfMask, HighWord);
};
if (IncomingInputs.empty())
return;
if (ExistingInputs.empty()) {
// Map any dwords with inputs from them into the right half.
for (int Input : IncomingInputs) {
// If the source half mask maps over the inputs, turn those into
// swaps and use the swapped lane.
if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
Input - SourceOffset;
// We have to swap the uses in our half mask in one sweep.
for (int &M : HalfMask)
if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
M = Input;
else if (M == Input)
M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
} else {
assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
Input - SourceOffset &&
"Previous placement doesn't match!");
}
// Note that this correctly re-maps both when we do a swap and when
// we observe the other side of the swap above. We rely on that to
// avoid swapping the members of the input list directly.
Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
}
// Map the input's dword into the correct half.
if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
else
assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
Input / 2 &&
"Previous placement doesn't match!");
}
// And just directly shift any other-half mask elements to be same-half
// as we will have mirrored the dword containing the element into the
// same position within that half.
for (int &M : HalfMask)
if (M >= SourceOffset && M < SourceOffset + 4) {
M = M - SourceOffset + DestOffset;
assert(M >= 0 && "This should never wrap below zero!");
}
return;
}
// Ensure we have the input in a viable dword of its current half. This
// is particularly tricky because the original position may be clobbered
// by inputs being moved and *staying* in that half.
if (IncomingInputs.size() == 1) {
if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
SourceOffset;
SourceHalfMask[InputFixed - SourceOffset] =
IncomingInputs[0] - SourceOffset;
std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
InputFixed);
IncomingInputs[0] = InputFixed;
}
} else if (IncomingInputs.size() == 2) {
if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
// We have two non-adjacent or clobbered inputs we need to extract from
// the source half. To do this, we need to map them into some adjacent
// dword slot in the source mask.
int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
IncomingInputs[1] - SourceOffset};
// If there is a free slot in the source half mask adjacent to one of
// the inputs, place the other input in it. We use (Index XOR 1) to
// compute an adjacent index.
if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
InputsFixed[1] = InputsFixed[0] ^ 1;
} else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
InputsFixed[0] = InputsFixed[1] ^ 1;
} else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
// The two inputs are in the same DWord but it is clobbered and the
// adjacent DWord isn't used at all. Move both inputs to the free
// slot.
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
} else {
// The only way we hit this point is if there is no clobbering
// (because there are no off-half inputs to this half) and there is no
// free slot adjacent to one of the inputs. In this case, we have to
// swap an input with a non-input.
for (int i = 0; i < 4; ++i)
assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!");
assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
"Cannot have adjacent inputs here!");
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
// We also have to update the final source mask in this case because
// it may need to undo the above swap.
for (int &M : FinalSourceHalfMask)
if (M == (InputsFixed[0] ^ 1) + SourceOffset)
M = InputsFixed[1] + SourceOffset;
else if (M == InputsFixed[1] + SourceOffset)
M = (InputsFixed[0] ^ 1) + SourceOffset;
InputsFixed[1] = InputsFixed[0] ^ 1;
}
// Point everything at the fixed inputs.
for (int &M : HalfMask)
if (M == IncomingInputs[0])
M = InputsFixed[0] + SourceOffset;
else if (M == IncomingInputs[1])
M = InputsFixed[1] + SourceOffset;
IncomingInputs[0] = InputsFixed[0] + SourceOffset;
IncomingInputs[1] = InputsFixed[1] + SourceOffset;
}
} else {
llvm_unreachable("Unhandled input size!");
}
// Now hoist the DWord down to the right half.
int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
for (int &M : HalfMask)
for (int Input : IncomingInputs)
if (M == Input)
M = FreeDWord * 2 + Input % 2;
};
moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
/*SourceOffset*/ 4, /*DestOffset*/ 0);
moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
/*SourceOffset*/ 0, /*DestOffset*/ 4);
// Now enact all the shuffles we've computed to move the inputs into their
// target half.
if (!isNoopShuffleMask(PSHUFLMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFHMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFDMask))
V = DAG.getBitcast(
VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
// At this point, each half should contain all its inputs, and we can then
// just shuffle them into their final position.
assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!");
assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
"Failed to lift all the low half inputs to the high mask!");
// Do a half shuffle for the low mask.
if (!isNoopShuffleMask(LoMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
// Do a half shuffle with the high mask after shifting its values down.
for (int &M : HiMask)
if (M >= 0)
M -= 4;
if (!isNoopShuffleMask(HiMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
return V;
}
/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
/// blend if only one input is used.
static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
"Lane crossing shuffle masks not supported");
int NumBytes = VT.getSizeInBits() / 8;
int Size = Mask.size();
int Scale = NumBytes / Size;
SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
V1InUse = false;
V2InUse = false;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / Scale];
if (M < 0)
continue;
const int ZeroMask = 0x80;
int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
if (Zeroable[i / Scale])
V1Idx = V2Idx = ZeroMask;
V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
V1InUse |= (ZeroMask != V1Idx);
V2InUse |= (ZeroMask != V2Idx);
}
MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
if (V1InUse)
V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
DAG.getBuildVector(ShufVT, DL, V1Mask));
if (V2InUse)
V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
DAG.getBuildVector(ShufVT, DL, V2Mask));
// If we need shuffled inputs from both, blend the two.
SDValue V;
if (V1InUse && V2InUse)
V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
else
V = V1InUse ? V1 : V2;
// Cast the result back to the correct type.
return DAG.getBitcast(VT, V);
}
/// Generic lowering of 8-lane i16 shuffles.
///
/// This handles both single-input shuffles and combined shuffle/blends with
/// two inputs. The single input shuffles are immediately delegated to
/// a dedicated lowering routine.
///
/// The blends are lowered in one of three fundamental ways. If there are few
/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
/// of the input is significantly cheaper when lowered as an interleaving of
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
if (NumV2Inputs == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2,
DAG, Subtarget))
return V;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
Mask, Subtarget, DAG))
return Rotate;
// Make a copy of the mask so it can be modified.
SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
MutableMask, Subtarget,
DAG);
}
assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
"All single-input shuffles should be canonicalized to be V1-input "
"shuffles.");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// See if we can use SSE4A Extraction / Insertion.
if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG))
return V;
// There are special ways we can lower some single-element blends.
if (NumV2Inputs == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (SDValue BitBlend =
lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
return BitBlend;
// Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Unpack;
// If we can't directly blend but can use PSHUFB, that will be better as it
// can both shuffle and set up the inefficient blend.
if (!IsBlendSupported && Subtarget.hasSSSE3()) {
bool V1InUse, V2InUse;
return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG, V1InUse, V2InUse);
}
// We can always bit-blend if we have to so the fallback strategy is to
// decompose into single-input permutes and blends.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
Mask, Subtarget, DAG);
}
/// Check whether a compaction lowering can be done by dropping even
/// elements and compute how many times even elements must be dropped.
///
/// This handles shuffles which take every Nth element where N is a power of
/// two. Example shuffle masks:
///
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
/// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
/// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
/// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
/// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
///
/// Any of these lanes can of course be undef.
///
/// This routine only supports N <= 3.
/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
/// for larger N.
///
/// \returns N above, or the number of times even elements must be dropped if
/// there is such a number. Otherwise returns zero.
static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
bool IsSingleInput) {
// The modulus for the shuffle vector entries is based on whether this is
// a single input or not.
int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
"We should only be called with masks with a power-of-2 size!");
uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
// We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
// and 2^3 simultaneously. This is because we may have ambiguity with
// partially undef inputs.
bool ViableForN[3] = {true, true, true};
for (int i = 0, e = Mask.size(); i < e; ++i) {
// Ignore undef lanes, we'll optimistically collapse them to the pattern we
// want.
if (Mask[i] < 0)
continue;
bool IsAnyViable = false;
for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
if (ViableForN[j]) {
uint64_t N = j + 1;
// The shuffle mask must be equal to (i * 2^N) % M.
if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
IsAnyViable = true;
else
ViableForN[j] = false;
}
// Early exit if we exhaust the possible powers of two.
if (!IsAnyViable)
break;
}
for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
if (ViableForN[j])
return j + 1;
// Return 0 as there is no viable power of two.
return 0;
}
static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}
/// Generic lowering of v16i8 shuffles.
///
/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
/// detect any complexity reducing interleaving. If that doesn't help, it uses
/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
/// back together.
static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use a zext lowering.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// See if we can use SSE4A Extraction / Insertion.
if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, DAG))
return V;
int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
// For single-input shuffles, there are some nicer lowering tricks we can use.
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
return V;
// Check whether we can widen this to an i16 shuffle by duplicating bytes.
// Notably, this handles splat and partial-splat shuffles more efficiently.
// However, it only makes sense if the pre-duplication shuffle simplifies
// things significantly. Currently, this means we need to be able to
// express the pre-duplication shuffle as an i16 shuffle.
//
// FIXME: We should check for other patterns which can be widened into an
// i16 shuffle as well.
auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
for (int i = 0; i < 16; i += 2)
if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
return false;
return true;
};
auto tryToWidenViaDuplication = [&]() -> SDValue {
if (!canWidenViaDuplication(Mask))
return SDValue();
SmallVector<int, 4> LoInputs;
copy_if(Mask, std::back_inserter(LoInputs),
[](int M) { return M >= 0 && M < 8; });
array_pod_sort(LoInputs.begin(), LoInputs.end());
LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
LoInputs.end());
SmallVector<int, 4> HiInputs;
copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
array_pod_sort(HiInputs.begin(), HiInputs.end());
HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
HiInputs.end());
bool TargetLo = LoInputs.size() >= HiInputs.size();
ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
SmallDenseMap<int, int, 8> LaneMap;
for (int I : InPlaceInputs) {
PreDupI16Shuffle[I/2] = I/2;
LaneMap[I] = I;
}
int j = TargetLo ? 0 : 4, je = j + 4;
for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
// Check if j is already a shuffle of this input. This happens when
// there are two adjacent bytes after we move the low one.
if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
// If we haven't yet mapped the input, search for a slot into which
// we can map it.
while (j < je && PreDupI16Shuffle[j] >= 0)
++j;
if (j == je)
// We can't place the inputs into a single half with a simple i16 shuffle, so bail.
return SDValue();
// Map this input with the i16 shuffle.
PreDupI16Shuffle[j] = MovingInputs[i] / 2;
}
// Update the lane map based on the mapping we ended up with.
LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
}
V1 = DAG.getBitcast(
MVT::v16i8,
DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
// Unpack the bytes to form the i16s that will be shuffled into place.
V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
MVT::v16i8, V1, V1);
int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0) {
int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
if (PostDupI16Shuffle[i / 2] < 0)
PostDupI16Shuffle[i / 2] = MappedMask;
else
assert(PostDupI16Shuffle[i / 2] == MappedMask &&
"Conflicting entries in the original shuffle!");
}
return DAG.getBitcast(
MVT::v16i8,
DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
};
if (SDValue V = tryToWidenViaDuplication())
return V;
}
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
return V;
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
// with PSHUFB. It is important to do this before we attempt to generate any
// blends but after all of the single-input lowerings. If the single input
// lowerings can find an instruction sequence that is faster than a PSHUFB, we
// want to preserve that and we can DAG combine any longer sequences into
// a PSHUFB in the end. But once we start blending from multiple inputs,
// the complexity of DAG combining bad patterns back into PSHUFB is too high,
// and there are *very* few patterns that would actually be faster than the
// PSHUFB approach because of its ability to zero lanes.
//
// FIXME: The only exceptions to the above are blends which are exact
// interleavings with direct instructions supporting them. We currently don't
// handle those well here.
if (Subtarget.hasSSSE3()) {
bool V1InUse = false;
bool V2InUse = false;
SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
// If both V1 and V2 are in use and we can use a direct blend or an unpack,
// do so. This avoids using them to handle blends-with-zero which is
// important as a single pshufb is significantly faster for that.
if (V1InUse && V2InUse) {
if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Blend;
// We can use an unpack to do the blending rather than an or in some
// cases. Even though the or may be (very minorly) more efficient, we
// preference this lowering because there are common cases where part of
// the complexity of the shuffles goes away when we do the final blend as
// an unpack.
// FIXME: It might be worth trying to detect if the unpack-feeding
// shuffles will both be pshufb, in which case we shouldn't bother with
// this.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Unpack;
// If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
if (Subtarget.hasVBMI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
// Use PALIGNR+Permute if possible - permute might become PSHUFB but the
// PALIGNR will be cheaper than the second PSHUFB+OR.
if (SDValue V = lowerVectorShuffleAsByteRotateAndPermute(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return V;
}
return PSHUFB;
}
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
if (SDValue BitBlend =
lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
return BitBlend;
// Check whether a compaction lowering can be done. This handles shuffles
// which take every Nth element for some even N. See the helper function for
// details.
//
// We special case these as they can be particularly efficiently handled with
// the PACKUSB instruction on x86 and they show up in common patterns of
// rearranging bytes to truncate wide elements.
bool IsSingleInput = V2.isUndef();
if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
// NumEvenDrops is the power of two stride of the elements. Another way of
// thinking about it is that we need to drop the even elements this many
// times to get the original input.
// First we need to zero all the dropped bytes.
assert(NumEvenDrops <= 3 &&
"No support for dropping even elements more than 3 times.");
// We use the mask type to pick which bytes are preserved based on how many
// elements are dropped.
MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
SDValue ByteClearMask = DAG.getBitcast(
MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
if (!IsSingleInput)
V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
// Now pack things back together.
V1 = DAG.getBitcast(MVT::v8i16, V1);
V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
for (int i = 1; i < NumEvenDrops; ++i) {
Result = DAG.getBitcast(MVT::v8i16, Result);
Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
}
return Result;
}
// Handle multi-input cases by blending single-input shuffles.
if (NumV2Elements > 0)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
Mask, Subtarget, DAG);
// The fallback path for single-input shuffles widens this into two v8i16
// vectors with unpacks, shuffles those, and then pulls them back together
// with a pack.
SDValue V = V1;
std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0)
(i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
SDValue VLoHalf, VHiHalf;
// Check if any of the odd lanes in the v16i8 are used. If not, we can mask
// them out and avoid using UNPCK{L,H} to extract the elements of V as
// i16s.
if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
DAG.getConstant(0x00FF, DL, MVT::v8i16));
// This will be a single vector shuffle instead of a blend so nuke VHiHalf.
VHiHalf = DAG.getUNDEF(MVT::v8i16);
// Squash the masks to point directly into VLoHalf.
for (int &M : LoBlendMask)
if (M >= 0)
M /= 2;
for (int &M : HiBlendMask)
if (M >= 0)
M /= 2;
} else {
// Otherwise just unpack the low half of V into VLoHalf and the high half into
// VHiHalf so that we can blend them as i16s.
SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
VLoHalf = DAG.getBitcast(
MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
VHiHalf = DAG.getBitcast(
MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
}
SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
}
/// Dispatching routine to lower various 128-bit x86 vector shuffles.
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
switch (VT.SimpleTy) {
case MVT::v2i64:
return lowerV2I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v2f64:
return lowerV2F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4i32:
return lowerV4I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4f32:
return lowerV4F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i16:
return lowerV8I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i8:
return lowerV16I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Unimplemented!");
}
}
/// Generic routine to split vector shuffle into half-sized shuffles.
///
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
/// AVX vector shuffle types.
static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.getSizeInBits() >= 256 &&
"Only for 256-bit or wider vector shuffles!");
assert(V1.getSimpleValueType() == VT && "Bad operand type!");
assert(V2.getSimpleValueType() == VT && "Bad operand type!");
ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
int NumElements = VT.getVectorNumElements();
int SplitNumElements = NumElements / 2;
MVT ScalarVT = VT.getVectorElementType();
MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
// Rather than splitting build-vectors, just build two narrower build
// vectors. This helps shuffling with splats and zeros.
auto SplitVector = [&](SDValue V) {
V = peekThroughBitcasts(V);
MVT OrigVT = V.getSimpleValueType();
int OrigNumElements = OrigVT.getVectorNumElements();
int OrigSplitNumElements = OrigNumElements / 2;
MVT OrigScalarVT = OrigVT.getVectorElementType();
MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
SDValue LoV, HiV;
auto *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV) {
LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
DAG.getIntPtrConstant(0, DL));
HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
DAG.getIntPtrConstant(OrigSplitNumElements, DL));
} else {
SmallVector<SDValue, 16> LoOps, HiOps;
for (int i = 0; i < OrigSplitNumElements; ++i) {
LoOps.push_back(BV->getOperand(i));
HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
}
LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
}
return std::make_pair(DAG.getBitcast(SplitVT, LoV),
DAG.getBitcast(SplitVT, HiV));
};
SDValue LoV1, HiV1, LoV2, HiV2;
std::tie(LoV1, HiV1) = SplitVector(V1);
std::tie(LoV2, HiV2) = SplitVector(V2);
// Now create two 4-way blends of these half-width vectors.
auto HalfBlend = [&](ArrayRef<int> HalfMask) {
bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
for (int i = 0; i < SplitNumElements; ++i) {
int M = HalfMask[i];
if (M >= NumElements) {
if (M >= NumElements + SplitNumElements)
UseHiV2 = true;
else
UseLoV2 = true;
V2BlendMask[i] = M - NumElements;
BlendMask[i] = SplitNumElements + i;
} else if (M >= 0) {
if (M >= SplitNumElements)
UseHiV1 = true;
else
UseLoV1 = true;
V1BlendMask[i] = M;
BlendMask[i] = i;
}
}
// Because the lowering happens after all combining takes place, we need to
// manually combine these blend masks as much as possible so that we create
// a minimal number of high-level vector shuffle nodes.
// First try just blending the halves of V1 or V2.
if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
return DAG.getUNDEF(SplitVT);
if (!UseLoV2 && !UseHiV2)
return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
if (!UseLoV1 && !UseHiV1)
return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
SDValue V1Blend, V2Blend;
if (UseLoV1 && UseHiV1) {
V1Blend =
DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
} else {
// We only use half of V1 so map the usage down into the final blend mask.
V1Blend = UseLoV1 ? LoV1 : HiV1;
for (int i = 0; i < SplitNumElements; ++i)
if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
}
if (UseLoV2 && UseHiV2) {
V2Blend =
DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
} else {
// We only use half of V2 so map the usage down into the final blend mask.
V2Blend = UseLoV2 ? LoV2 : HiV2;
for (int i = 0; i < SplitNumElements; ++i)
if (BlendMask[i] >= SplitNumElements)
BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
}
return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
};
SDValue Lo = HalfBlend(LoMask);
SDValue Hi = HalfBlend(HiMask);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
/// Either split a vector in halves or decompose the shuffles and the
/// blend.
///
/// This is provided as a good fallback for many lowerings of non-single-input
/// shuffles with more than one 128-bit lane. In those cases, we want to select
/// between splitting the shuffle into 128-bit components and stitching those
/// back together vs. extracting the single-input shuffles and blending those
/// results.
static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(!V2.isUndef() && "This routine must not be used to lower single-input "
"shuffles as it could then recurse on itself.");
int Size = Mask.size();
// If this can be modeled as a broadcast of two elements followed by a blend,
// prefer that lowering. This is especially important because broadcasts can
// often fold with memory operands.
auto DoBothBroadcast = [&] {
int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
for (int M : Mask)
if (M >= Size) {
if (V2BroadcastIdx < 0)
V2BroadcastIdx = M - Size;
else if (M - Size != V2BroadcastIdx)
return false;
} else if (M >= 0) {
if (V1BroadcastIdx < 0)
V1BroadcastIdx = M;
else if (M != V1BroadcastIdx)
return false;
}
return true;
};
if (DoBothBroadcast())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
Subtarget, DAG);
// If the inputs all stem from a single 128-bit lane of each input, then we
// split them rather than blending because the split will decompose to
// unusually few instructions.
int LaneCount = VT.getSizeInBits() / 128;
int LaneSize = Size / LaneCount;
SmallBitVector LaneInputs[2];
LaneInputs[0].resize(LaneCount, false);
LaneInputs[1].resize(LaneCount, false);
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
// Otherwise, just fall back to decomposed shuffles and a blend. This requires
// that the decomposed single-input shuffles don't end up here.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
Subtarget, DAG);
}
/// Lower a vector shuffle crossing multiple 128-bit lanes as
/// a lane permutation followed by a per-lane permutation.
///
/// This is mainly for cases where we can have non-repeating permutes
/// in each lane.
///
/// TODO: This is very similar to lowerVectorShuffleByMerging128BitLanes,
/// we should investigate merging them.
static SDValue lowerVectorShuffleAsLanePermuteAndPermute(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG, const X86Subtarget &Subtarget) {
int NumElts = VT.getVectorNumElements();
int NumLanes = VT.getSizeInBits() / 128;
int NumEltsPerLane = NumElts / NumLanes;
SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
for (int i = 0; i != NumElts; ++i) {
int M = Mask[i];
if (M < 0)
continue;
// Ensure that each lane comes from a single source lane.
int SrcLane = M / NumEltsPerLane;
int DstLane = i / NumEltsPerLane;
if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
return SDValue();
SrcLaneMask[DstLane] = SrcLane;
LaneMask[i] = (SrcLane * NumEltsPerLane) + (i % NumEltsPerLane);
PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
}
// If we're only shuffling a single lowest lane and the rest are identity
// then don't bother.
// TODO - isShuffleMaskInputInPlace could be extended to something like this.
int NumIdentityLanes = 0;
bool OnlyShuffleLowestLane = true;
for (int i = 0; i != NumLanes; ++i) {
if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
i * NumEltsPerLane))
NumIdentityLanes++;
else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
OnlyShuffleLowestLane = false;
}
if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
return SDValue();
SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
}
/// Lower a vector shuffle crossing multiple 128-bit lanes as
/// a permutation and blend of those lanes.
///
/// This essentially blends the out-of-lane inputs to each lane into the lane
/// from a permuted copy of the vector. This lowering strategy results in four
/// instructions in the worst case for a single-input cross lane shuffle which
/// is lower than any other fully general cross-lane shuffle strategy I'm aware
/// of. Special cases for each particular shuffle pattern should be handled
/// prior to trying this lowering.
static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FIXME: This should probably be generalized for 512-bit vectors as well.
assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
int Size = Mask.size();
int LaneSize = Size / 2;
// If there are only inputs from one 128-bit lane, splitting will in fact be
// less expensive. The flags track whether the given lane contains an element
// that crosses to another lane.
if (!Subtarget.hasAVX2()) {
bool LaneCrossing[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
if (!LaneCrossing[0] || !LaneCrossing[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
} else {
bool LaneUsed[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
LaneUsed[(Mask[i] / LaneSize)] = true;
if (!LaneUsed[0] || !LaneUsed[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
assert(V2.isUndef() &&
"This last part of this routine only works on single input shuffles");
SmallVector<int, 32> FlippedBlendMask(Size);
for (int i = 0; i < Size; ++i)
FlippedBlendMask[i] =
Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
? Mask[i]
: Mask[i] % LaneSize +
(i / LaneSize) * LaneSize + Size);
// Flip the vector, and blend the results which should now be in-lane.
MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
SDValue Flipped = DAG.getBitcast(PVT, V1);
Flipped = DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT),
{ 2, 3, 0, 1 });
Flipped = DAG.getBitcast(VT, Flipped);
return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
}
/// Handle lowering 2-lane 128-bit shuffles.
static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
if (Subtarget.hasAVX2() && V2.isUndef())
return SDValue();
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
return SDValue();
bool IsLowZero = (Zeroable & 0x3) == 0x3;
bool IsHighZero = (Zeroable & 0xc) == 0xc;
// Try to use an insert into a zero vector.
if (WidenedMask[0] == 0 && IsHighZero) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL), LoV,
DAG.getIntPtrConstant(0, DL));
}
// TODO: If minimizing size and one of the inputs is a zero vector and the
// the zero vector has only one use, we could use a VPERM2X128 to save the
// instruction bytes needed to explicitly generate the zero vector.
// Blends are faster and handle all the non-lane-crossing cases.
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// If either input operand is a zero vector, use VPERM2X128 because its mask
// allows us to replace the zero input with an implicit zero.
if (!IsLowZero && !IsHighZero) {
// Check for patterns which can be matched with a single insert of a 128-bit
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
// With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
// this will likely become vinsertf128 which can't fold a 256-bit memop.
if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
OnlyUsesV1 ? V1 : V2,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
DAG.getIntPtrConstant(2, DL));
}
}
// Try to use SHUF128 if possible.
if (Subtarget.hasVLX()) {
if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
((WidenedMask[1] % 2) << 1);
return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
DAG.getConstant(PermMask, DL, MVT::i8));
}
}
}
// Otherwise form a 128-bit permutation. After accounting for undefs,
// convert the 64-bit shuffle mask selection values into 128-bit
// selection bits by dividing the indexes by 2 and shifting into positions
// defined by a vperm2*128 instruction's immediate control byte.
// The immediate permute control byte looks like this:
// [1:0] - select 128 bits from sources for low half of destination
// [2] - ignore
// [3] - zero low half of destination
// [5:4] - select 128 bits from sources for high half of destination
// [6] - ignore
// [7] - zero high half of destination
assert((WidenedMask[0] >= 0 || IsLowZero) &&
(WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
unsigned PermMask = 0;
PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
// Check the immediate mask and replace unused sources with undef.
if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
V1 = DAG.getUNDEF(VT);
if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
V2 = DAG.getUNDEF(VT);
return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
DAG.getConstant(PermMask, DL, MVT::i8));
}
/// Lower a vector shuffle by first fixing the 128-bit lanes and then
/// shuffling each lane.
///
/// This attempts to create a repeated lane shuffle where each lane uses one
/// or two of the lanes of the inputs. The lanes of the input vectors are
/// shuffled in one or two independent shuffles to get the lanes into the
/// position needed by the final shuffle.
///
/// FIXME: This should be generalized to 512-bit shuffles.
static SDValue lowerVectorShuffleByMerging128BitLanes(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(!V2.isUndef() && "This is only useful with multiple inputs.");
if (is128BitLaneRepeatedShuffleMask(VT, Mask))
return SDValue();
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
int NumLanes = Size / LaneSize;
assert(NumLanes == 2 && "Only handles 256-bit shuffles.");
SmallVector<int, 16> RepeatMask(LaneSize, -1);
int LaneSrcs[2][2] = { { -1, -1 }, { -1 , -1 } };
// First pass will try to fill in the RepeatMask from lanes that need two
// sources.
for (int Lane = 0; Lane != NumLanes; ++Lane) {
int Srcs[2] = { -1, -1 };
SmallVector<int, 16> InLaneMask(LaneSize, -1);
for (int i = 0; i != LaneSize; ++i) {
int M = Mask[(Lane * LaneSize) + i];
if (M < 0)
continue;
// Determine which of the 4 possible input lanes (2 from each source)
// this element comes from. Assign that as one of the sources for this
// lane. We can assign up to 2 sources for this lane. If we run out
// sources we can't do anything.
int LaneSrc = M / LaneSize;
int Src;
if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
Src = 0;
else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
Src = 1;
else
return SDValue();
Srcs[Src] = LaneSrc;
InLaneMask[i] = (M % LaneSize) + Src * Size;
}
// If this lane has two sources, see if it fits with the repeat mask so far.
if (Srcs[1] < 0)
continue;
LaneSrcs[Lane][0] = Srcs[0];
LaneSrcs[Lane][1] = Srcs[1];
auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
assert(M1.size() == M2.size() && "Unexpected mask size");
for (int i = 0, e = M1.size(); i != e; ++i)
if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
return false;
return true;
};
auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
for (int i = 0, e = MergedMask.size(); i != e; ++i) {
int M = Mask[i];
if (M < 0)
continue;
assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
"Unexpected mask element");
MergedMask[i] = M;
}
};
if (MatchMasks(InLaneMask, RepeatMask)) {
// Merge this lane mask into the final repeat mask.
MergeMasks(InLaneMask, RepeatMask);
continue;
}
// Didn't find a match. Swap the operands and try again.
std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
ShuffleVectorSDNode::commuteMask(InLaneMask);
if (MatchMasks(InLaneMask, RepeatMask)) {
// Merge this lane mask into the final repeat mask.
MergeMasks(InLaneMask, RepeatMask);
continue;
}
// Couldn't find a match with the operands in either order.
return SDValue();
}
// Now handle any lanes with only one source.
for (int Lane = 0; Lane != NumLanes; ++Lane) {
// If this lane has already been processed, skip it.
if (LaneSrcs[Lane][0] >= 0)
continue;
for (int i = 0; i != LaneSize; ++i) {
int M = Mask[(Lane * LaneSize) + i];
if (M < 0)
continue;
// If RepeatMask isn't defined yet we can define it ourself.
if (RepeatMask[i] < 0)
RepeatMask[i] = M % LaneSize;
if (RepeatMask[i] < Size) {
if (RepeatMask[i] != M % LaneSize)
return SDValue();
LaneSrcs[Lane][0] = M / LaneSize;
} else {
if (RepeatMask[i] != ((M % LaneSize) + Size))
return SDValue();
LaneSrcs[Lane][1] = M / LaneSize;
}
}
if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
return SDValue();
}
SmallVector<int, 16> NewMask(Size, -1);
for (int Lane = 0; Lane != NumLanes; ++Lane) {
int Src = LaneSrcs[Lane][0];
for (int i = 0; i != LaneSize; ++i) {
int M = -1;
if (Src >= 0)
M = Src * LaneSize + i;
NewMask[Lane * LaneSize + i] = M;
}
}
SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
// Ensure we didn't get back the shuffle we started with.
// FIXME: This is a hack to make up for some splat handling code in
// getVectorShuffle.
if (isa<ShuffleVectorSDNode>(NewV1) &&
cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
return SDValue();
for (int Lane = 0; Lane != NumLanes; ++Lane) {
int Src = LaneSrcs[Lane][1];
for (int i = 0; i != LaneSize; ++i) {
int M = -1;
if (Src >= 0)
M = Src * LaneSize + i;
NewMask[Lane * LaneSize + i] = M;
}
}
SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
// Ensure we didn't get back the shuffle we started with.
// FIXME: This is a hack to make up for some splat handling code in
// getVectorShuffle.
if (isa<ShuffleVectorSDNode>(NewV2) &&
cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
return SDValue();
for (int i = 0; i != Size; ++i) {
NewMask[i] = RepeatMask[i % LaneSize];
if (NewMask[i] < 0)
continue;
NewMask[i] += (i / LaneSize) * LaneSize;
}
return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
}
/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
/// This allows for fast cases such as subvector extraction/insertion
/// or shuffling smaller vector types which can lower more efficiently.
static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert((VT.is256BitVector() || VT.is512BitVector()) &&
"Expected 256-bit or 512-bit vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts);
bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts);
if (!UndefLower && !UndefUpper)
return SDValue();
// Upper half is undef and lower half is whole upper subvector.
// e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
if (UndefUpper &&
isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
DAG.getIntPtrConstant(HalfNumElts, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
DAG.getIntPtrConstant(0, DL));
}
// Lower half is undef and upper half is whole lower subvector.
// e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
if (UndefLower &&
isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
DAG.getIntPtrConstant(HalfNumElts, DL));
}
// If the shuffle only uses two of the four halves of the input operands,
// then extract them and perform the 'half' shuffle at half width.
// e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
int HalfIdx1 = -1, HalfIdx2 = -1;
SmallVector<int, 8> HalfMask(HalfNumElts);
unsigned Offset = UndefLower ? HalfNumElts : 0;
for (unsigned i = 0; i != HalfNumElts; ++i) {
int M = Mask[i + Offset];
if (M < 0) {
HalfMask[i] = M;
continue;
}
// Determine which of the 4 half vectors this element is from.
// i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
int HalfIdx = M / HalfNumElts;
// Determine the element index into its half vector source.
int HalfElt = M % HalfNumElts;
// We can shuffle with up to 2 half vectors, set the new 'half'
// shuffle mask accordingly.
if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
HalfMask[i] = HalfElt;
HalfIdx1 = HalfIdx;
continue;
}
if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
HalfMask[i] = HalfElt + HalfNumElts;
HalfIdx2 = HalfIdx;
continue;
}
// Too many half vectors referenced.
return SDValue();
}
assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
// Only shuffle the halves of the inputs when useful.
int NumLowerHalves =
(HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
int NumUpperHalves =
(HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
// uuuuXXXX - don't extract uppers just to insert again.
if (UndefLower && NumUpperHalves != 0)
return SDValue();
// XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
if (UndefUpper && NumUpperHalves == 2)
return SDValue();
// AVX2 - XXXXuuuu - always extract lowers.
if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
// AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
if (VT == MVT::v4f64 || VT == MVT::v4i64)
return SDValue();
// AVX2 supports variable 32-bit element cross-lane shuffles.
if (VT == MVT::v8f32 || VT == MVT::v8i32) {
// XXXXuuuu - don't extract lowers and uppers.
if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
return SDValue();
}
}
// AVX512 - XXXXuuuu - always extract lowers.
if (VT.is512BitVector() && !(UndefUpper && NumUpperHalves == 0))
return SDValue();
auto GetHalfVector = [&](int HalfIdx) {
if (HalfIdx < 0)
return DAG.getUNDEF(HalfVT);
SDValue V = (HalfIdx < 2 ? V1 : V2);
HalfIdx = (HalfIdx % 2) * HalfNumElts;
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
DAG.getIntPtrConstant(HalfIdx, DL));
};
SDValue Half1 = GetHalfVector(HalfIdx1);
SDValue Half2 = GetHalfVector(HalfIdx2);
SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
DAG.getIntPtrConstant(Offset, DL));
}
/// Test whether the specified input (0 or 1) is in-place blended by the
/// given mask.
///
/// This returns true if the elements from a particular input are already in the
/// slot required by the given mask and require no permutation.
static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
int Size = Mask.size();
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
return false;
return true;
}
/// Handle case where shuffle sources are coming from the same 128-bit lane and
/// every lane can be represented as the same repeating mask - allowing us to
/// shuffle the sources with the repeating shuffle and then permute the result
/// to the destination lanes.
static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
int NumElts = VT.getVectorNumElements();
int NumLanes = VT.getSizeInBits() / 128;
int NumLaneElts = NumElts / NumLanes;
// On AVX2 we may be able to just shuffle the lowest elements and then
// broadcast the result.
if (Subtarget.hasAVX2()) {
for (unsigned BroadcastSize : {16, 32, 64}) {
if (BroadcastSize <= VT.getScalarSizeInBits())
continue;
int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
// Attempt to match a repeating pattern every NumBroadcastElts,
// accounting for UNDEFs but only references the lowest 128-bit
// lane of the inputs.
auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
for (int i = 0; i != NumElts; i += NumBroadcastElts)
for (int j = 0; j != NumBroadcastElts; ++j) {
int M = Mask[i + j];
if (M < 0)
continue;
int &R = RepeatMask[j];
if (0 != ((M % NumElts) / NumLaneElts))
return false;
if (0 <= R && R != M)
return false;
R = M;
}
return true;
};
SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
if (!FindRepeatingBroadcastMask(RepeatMask))
continue;
// Shuffle the (lowest) repeated elements in place for broadcast.
SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
// Shuffle the actual broadcast.
SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
for (int i = 0; i != NumElts; i += NumBroadcastElts)
for (int j = 0; j != NumBroadcastElts; ++j)
BroadcastMask[i + j] = j;
return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
BroadcastMask);
}
}
// Bail if the shuffle mask doesn't cross 128-bit lanes.
if (!is128BitLaneCrossingShuffleMask(VT, Mask))
return SDValue();
// Bail if we already have a repeated lane shuffle mask.
SmallVector<int, 8> RepeatedShuffleMask;
if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
return SDValue();
// On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
// (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
int NumSubLanes = NumLanes * SubLaneScale;
int NumSubLaneElts = NumLaneElts / SubLaneScale;
// Check that all the sources are coming from the same lane and see if we can
// form a repeating shuffle mask (local to each sub-lane). At the same time,
// determine the source sub-lane for each destination sub-lane.
int TopSrcSubLane = -1;
SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
// Extract the sub-lane mask, check that it all comes from the same lane
// and normalize the mask entries to come from the first lane.
int SrcLane = -1;
SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
if (M < 0)
continue;
int Lane = (M % NumElts) / NumLaneElts;
if ((0 <= SrcLane) && (SrcLane != Lane))
return SDValue();
SrcLane = Lane;
int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
SubLaneMask[Elt] = LocalM;
}
// Whole sub-lane is UNDEF.
if (SrcLane < 0)
continue;
// Attempt to match against the candidate repeated sub-lane masks.
for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
for (int i = 0; i != NumSubLaneElts; ++i) {
if (M1[i] < 0 || M2[i] < 0)
continue;
if (M1[i] != M2[i])
return false;
}
return true;
};
auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
continue;
// Merge the sub-lane mask into the matching repeated sub-lane mask.
for (int i = 0; i != NumSubLaneElts; ++i) {
int M = SubLaneMask[i];
if (M < 0)
continue;
assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
"Unexpected mask element");
RepeatedSubLaneMask[i] = M;
}
// Track the top most source sub-lane - by setting the remaining to UNDEF
// we can greatly simplify shuffle matching.
int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
break;
}
// Bail if we failed to find a matching repeated sub-lane mask.
if (Dst2SrcSubLanes[DstSubLane] < 0)
return SDValue();
}
assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
"Unexpected source lane");
// Create a repeating shuffle mask for the entire vector.
SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
int Lane = SubLane / SubLaneScale;
auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
int M = RepeatedSubLaneMask[Elt];
if (M < 0)
continue;
int Idx = (SubLane * NumSubLaneElts) + Elt;
RepeatedMask[Idx] = M + (Lane * NumLaneElts);
}
}
SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
// Shuffle each source sub-lane to its destination.
SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
for (int i = 0; i != NumElts; i += NumSubLaneElts) {
int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
if (SrcSubLane < 0)
continue;
for (int j = 0; j != NumSubLaneElts; ++j)
SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
}
return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
SubLaneMask);
}
static bool matchVectorShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
unsigned &ShuffleImm,
ArrayRef<int> Mask) {
int NumElts = VT.getVectorNumElements();
assert(VT.getScalarSizeInBits() == 64 &&
(NumElts == 2 || NumElts == 4 || NumElts == 8) &&
"Unexpected data type for VSHUFPD");
// Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
// Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
ShuffleImm = 0;
bool ShufpdMask = true;
bool CommutableMask = true;
for (int i = 0; i < NumElts; ++i) {
if (Mask[i] == SM_SentinelUndef)
continue;
if (Mask[i] < 0)
return false;
int Val = (i & 6) + NumElts * (i & 1);
int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
if (Mask[i] < Val || Mask[i] > Val + 1)
ShufpdMask = false;
if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
CommutableMask = false;
ShuffleImm |= (Mask[i] % 2) << i;
}
if (ShufpdMask)
return true;
if (CommutableMask) {
std::swap(V1, V2);
return true;
}
return false;
}
static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&
"Unexpected data type for VSHUFPD");
unsigned Immediate = 0;
if (!matchVectorShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask))
return SDValue();
return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
DAG.getConstant(Immediate, DL, MVT::i8));
}
/// Handle lowering of 4-lane 64-bit floating point shuffles.
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return V;
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use low duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
// Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
DAG.getConstant(VPERMILPMask, DL, MVT::i8));
}
// With AVX2 we have direct support for this permutation.
if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to permute the lanes and then use a per-lane permute.
if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute(
DL, MVT::v4f64, V1, V2, Mask, DAG, Subtarget))
return V;
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
DAG, Subtarget);
}
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check if the blend happens to exactly fit that of SHUFPD.
if (SDValue Op =
lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
return Op;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have VLX support, we can use VEXPAND.
if (Subtarget.hasVLX())
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
// If we have AVX2 then we always want to lower with a blend because an v4 we
// can fully permute the elements.
if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
Mask, Subtarget, DAG);
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
Subtarget, DAG);
}
/// Handle lowering of 4-lane 64-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v4i64 shuffling..
static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (V2.isUndef()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on both lanes.
SmallVector<int, 2> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
SmallVector<int, 4> PSHUFDMask;
scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
return DAG.getBitcast(
MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getBitcast(MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
// AVX2 provides a direct instruction for permuting a single input across
// lanes.
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// If we have VLX support, we can use VALIGN or VEXPAND.
if (Subtarget.hasVLX()) {
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
}
// Try to use PALIGNR.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
return V;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
if (!isShuffleMaskInputInPlace(0, Mask) &&
!isShuffleMaskInputInPlace(1, Mask))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG);
}
/// Handle lowering of 8-lane 32-bit floating point shuffles.
///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
// If the shuffle mask is repeated in each 128-bit lane, we have many more
// options to efficiently lower the shuffle.
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 &&
"Repeated masks must be half the mask width!");
// Use even/odd duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
return V;
// Otherwise, fall back to a SHUFPS sequence. Here it is important that we
// have already handled any direct blends.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
}
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
return V;
// If we have a single input shuffle with different shuffle patterns in the
// two 128-bit lanes use the variable mask to VPERMILPS.
if (V2.isUndef()) {
SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
DAG, Subtarget);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have VLX support, we can use VEXPAND.
if (Subtarget.hasVLX())
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code using vpunpcklwd and
// vpunpckhwd instrs than vblend.
if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG))
return V;
// If we have AVX2 then we always want to lower with a blend because at v8 we
// can fully permute the elements.
if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG);
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
Subtarget, DAG);
}
/// Handle lowering of 8-lane 32-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v8i32 shuffling..
static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code than vblend by using
// vpunpcklwd and vpunpckhwd instrs.
if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
!Subtarget.hasAVX512())
if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
// If the shuffle mask is repeated in each 128-bit lane we can use more
// efficient instructions that mirror the shuffles across the two 128-bit
// lanes.
SmallVector<int, 4> RepeatedMask;
bool Is128BitLaneRepeatedShuffle =
is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
return V;
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// If we have VLX support, we can use VALIGN or EXPAND.
if (Subtarget.hasVLX()) {
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
}
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return V;
// If the shuffle patterns aren't repeated but it is a single input, directly
// generate a cross-lane VPERMD instruction.
if (V2.isUndef()) {
SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
}
// Assume that a single SHUFPS is faster than an alternative sequence of
// multiple instructions (even if the CPU has a domain penalty).
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
CastV1, CastV2, DAG);
return DAG.getBitcast(MVT::v8i32, ShufPS);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG);
}
/// Handle lowering of 16-lane 16-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v16i16 shuffling..
static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return V;
if (V2.isUndef()) {
// There are no generalized cross-lane shuffle operations available on i16
// element types.
if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute(
DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
return V;
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
Mask, DAG, Subtarget);
}
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// As this is a single-input shuffle, the repeated mask should be
// a strictly valid v8i16 mask that we can pass through to the v8i16
// lowering to handle even the v16 case.
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
}
}
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v16i16, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// AVX512BWVL can lower to VPERMW.
if (Subtarget.hasBWI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Result;
// Try to permute the lanes and then use a per-lane permute.
if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute(
DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
return V;
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
Subtarget, DAG);
}
/// Handle lowering of 32-lane 8-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v32i8 shuffling..
static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return V;
// There are no generalized cross-lane shuffle operations available on i8
// element types.
if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute(
DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
return V;
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
DAG, Subtarget);
}
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v32i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// AVX512VBMIVL can lower to VPERMB.
if (Subtarget.hasVBMI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Result;
// Try to permute the lanes and then use a per-lane permute.
if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute(
DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
return V;
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
Subtarget, DAG);
}
/// High-level routine to lower various 256-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 256-bit x86 vector
/// shuffle or splits it into two 128-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = VT.getVectorNumElements();
int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Handle special cases where the lower or upper half is UNDEF.
if (SDValue V =
lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
// There is a really nice hard cut-over between AVX1 and AVX2 that means we
// can check for those subtargets here and avoid much of the subtarget
// querying in the per-vector-type lowering routines. With AVX1 we have
// essentially *zero* ability to manipulate a 256-bit vector with integer
// types. Since we'll use floating point types there eventually, just
// immediately cast everything to a float and operate entirely in that domain.
if (VT.isInteger() && !Subtarget.hasAVX2()) {
int ElementBits = VT.getScalarSizeInBits();
if (ElementBits < 32) {
// No floating point type available, if we can't use the bit operations
// for masking/blending then decompose into 128-bit vectors.
if (SDValue V =
lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
return V;
if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
return V;
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
VT.getVectorNumElements());
V1 = DAG.getBitcast(FpVT, V1);
V2 = DAG.getBitcast(FpVT, V2);
return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
}
switch (VT.SimpleTy) {
case MVT::v4f64:
return lowerV4F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4i64:
return lowerV4I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8f32:
return lowerV8F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i32:
return lowerV8I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i16:
return lowerV16I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v32i8:
return lowerV32I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 256-bit x86 vector type!");
}
}
/// Try to lower a vector shuffle as a 128-bit shuffles.
static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(VT.getScalarSizeInBits() == 64 &&
"Unexpected element type size for 128bit shuffle.");
// To handle 256 bit vector requires VLX and most probably
// function lowerV2X128VectorShuffle() is better solution.
assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
// TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, WidenedMask))
return SDValue();
// Try to use an insert into a zero vector.
if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
(WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL), LoV,
DAG.getIntPtrConstant(0, DL));
}
// Check for patterns which can be matched with a single insert of a 256-bit
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
{0, 1, 2, 3, 0, 1, 2, 3});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
{0, 1, 2, 3, 8, 9, 10, 11})) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
OnlyUsesV1 ? V1 : V2,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
DAG.getIntPtrConstant(4, DL));
}
assert(WidenedMask.size() == 4);
// See if this is an insertion of the lower 128-bits of V2 into V1.
bool IsInsert = true;
int V2Index = -1;
for (int i = 0; i < 4; ++i) {
assert(WidenedMask[i] >= -1);
if (WidenedMask[i] < 0)
continue;
// Make sure all V1 subvectors are in place.
if (WidenedMask[i] < 4) {
if (WidenedMask[i] != i) {
IsInsert = false;
break;
}
} else {
// Make sure we only have a single V2 index and its the lowest 128-bits.
if (V2Index >= 0 || WidenedMask[i] != 4) {
IsInsert = false;
break;
}
V2Index = i;
}
}
if (IsInsert && V2Index >= 0) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
DAG.getIntPtrConstant(0, DL));
return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
}
// Try to lower to vshuf64x2/vshuf32x4.
SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
unsigned PermMask = 0;
// Insure elements came from the same Op.
for (int i = 0; i < 4; ++i) {
assert(WidenedMask[i] >= -1);
if (WidenedMask[i] < 0)
continue;
SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
unsigned OpIndex = i / 2;
if (Ops[OpIndex].isUndef())
Ops[OpIndex] = Op;
else if (Ops[OpIndex] != Op)
return SDValue();
// Convert the 128-bit shuffle mask selection values into 128-bit selection
// bits defined by a vshuf64x2 instruction's immediate control byte.
PermMask |= (WidenedMask[i] % 4) << (i * 2);
}
return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
DAG.getConstant(PermMask, DL, MVT::i8));
}
/// Handle lowering of 8-lane 64-bit floating point shuffles.
static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (V2.isUndef()) {
// Use low duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
// Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
DAG.getConstant(VPERMILPMask, DL, MVT::i8));
}
SmallVector<int, 4> RepeatedMask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
}
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, Zeroable, V1, V2,
Subtarget, DAG))
return Shuf128;
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Unpck;
// Check if the blend happens to exactly fit that of SHUFPD.
if (SDValue Op =
lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Op;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1,
V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}
/// Handle lowering of 16-lane 32-bit floating point shuffles.
static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// If the shuffle mask is repeated in each 128-bit lane, we have many more
// options to efficiently lower the shuffle.
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
// Use even/odd duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
return Unpck;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Otherwise, fall back to a SHUFPS sequence.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
}
// If we have a single input shuffle with different shuffle patterns in the
// 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
if (V2.isUndef() &&
!is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
}
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}
/// Handle lowering of 8-lane 64-bit integer shuffles.
static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (V2.isUndef()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on all four
// 128-bit lanes.
SmallVector<int, 2> Repeated128Mask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
SmallVector<int, 4> PSHUFDMask;
scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
return DAG.getBitcast(
MVT::v8i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
DAG.getBitcast(MVT::v16i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
SmallVector<int, 4> Repeated256Mask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
}
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, Zeroable,
V1, V2, Subtarget, DAG))
return Shuf128;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use VALIGN.
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Try to use PALIGNR.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Unpck;
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1,
V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
}
/// Handle lowering of 16-lane 32-bit integer shuffles.
static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// If the shuffle mask is repeated in each 128-bit lane we can use more
// efficient instructions that mirror the shuffles across the four 128-bit
// lanes.
SmallVector<int, 4> RepeatedMask;
bool Is128BitLaneRepeatedShuffle =
is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
return V;
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use VALIGN.
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v16i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Try to use byte rotation instructions.
if (Subtarget.hasBWI())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Assume that a single SHUFPS is faster than using a permv shuffle.
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
CastV1, CastV2, DAG);
return DAG.getBitcast(MVT::v16i32, ShufPS);
}
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}
/// Handle lowering of 32-lane 16-bit integer shuffles.
static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (V2.isUndef()) {
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
// As this is a single-input shuffle, the repeated mask should be
// a strictly valid v8i16 mask that we can pass through to the v8i16
// lowering to handle even the v32 case.
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
}
}
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v32i16, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}
/// Handle lowering of 64-lane 8-bit integer shuffles.
static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v64i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// VBMI can use VPERMV/VPERMV3 byte shuffles.
if (Subtarget.hasVBMI())
return lowerVectorShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
}
/// High-level routine to lower various 512-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 512-bit x86 vector
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/ basic ISA!");
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = Mask.size();
int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Handle special cases where the lower or upper half is UNDEF.
if (SDValue V =
lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
// Check for being able to broadcast a single element.
if (SDValue Broadcast =
lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Dispatch to each element type for lowering. If we don't have support for
// specific element type shuffles at 512 bits, immediately split them and
// lower them. Each lowering routine of a given type is allowed to assume that
// the requisite ISA extensions for that element type are available.
switch (VT.SimpleTy) {
case MVT::v8f64:
return lowerV8F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16f32:
return lowerV16F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i64:
return lowerV8I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i32:
return lowerV16I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v32i16:
return lowerV32I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v64i8:
return lowerV64I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 512-bit x86 vector type!");
}
}
// Determine if this shuffle can be implemented with a KSHIFT instruction.
// Returns the shift amount if possible or -1 if not. This is a simplified
// version of matchVectorShuffleAsShift.
static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
int MaskOffset, const APInt &Zeroable) {
int Size = Mask.size();
auto CheckZeros = [&](int Shift, bool Left) {
for (int j = 0; j < Shift; ++j)
if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
return false;
return true;
};
auto MatchShift = [&](int Shift, bool Left) {
unsigned Pos = Left ? Shift : 0;
unsigned Low = Left ? 0 : Shift;
unsigned Len = Size - Shift;
return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
};
for (int Shift = 1; Shift != Size; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
return Shift;
}
return -1;
}
// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
// vector, shuffle and then truncate it back.
static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
unsigned NumElts = Mask.size();
// Try to recognize shuffles that are just padding a subvector with zeros.
unsigned SubvecElts = 0;
for (int i = 0; i != (int)NumElts; ++i) {
if (Mask[i] >= 0 && Mask[i] != i)
break;
++SubvecElts;
}
assert(SubvecElts != NumElts && "Identity shuffle?");
// Clip to a power 2.
SubvecElts = PowerOf2Floor(SubvecElts);
// Make sure the number of zeroable bits in the top at least covers the bits
// not covered by the subvector.
if (Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
V1, DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL),
Extract, DAG.getIntPtrConstant(0, DL));
}
// Try to match KSHIFTs.
// TODO: Support narrower than legal shifts by widening and extracting.
if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) {
unsigned Offset = 0;
for (SDValue V : { V1, V2 }) {
unsigned Opcode;
int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
if (ShiftAmt >= 0)
return DAG.getNode(Opcode, DL, VT, V,
DAG.getConstant(ShiftAmt, DL, MVT::i8));
Offset += NumElts; // Increment for next iteration.
}
}
MVT ExtVT;
switch (VT.SimpleTy) {
default:
llvm_unreachable("Expected a vector of i1 elements");
case MVT::v2i1:
ExtVT = MVT::v2i64;
break;
case MVT::v4i1:
ExtVT = MVT::v4i32;
break;
case MVT::v8i1:
// Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
// shuffle.
ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
break;
case MVT::v16i1:
// Take 512-bit type, unless we are avoiding 512-bit types and have the
// 256-bit operation available.
ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
break;
case MVT::v32i1:
// Take 512-bit type, unless we are avoiding 512-bit types and have the
// 256-bit operation available.
assert(Subtarget.hasBWI() && "Expected AVX512BW support");
ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
break;
case MVT::v64i1:
ExtVT = MVT::v64i8;
break;
}
V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
// i1 was sign extended we can use X86ISD::CVT2MASK.
int NumElems = VT.getVectorNumElements();
if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
(Subtarget.hasDQI() && (NumElems < 32)))
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
Shuffle, ISD::SETGT);
return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
}
/// Helper function that returns true if the shuffle mask should be
/// commuted to improve canonicalization.
static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
int NumElements = Mask.size();
int NumV1Elements = 0, NumV2Elements = 0;
for (int M : Mask)
if (M < 0)
continue;
else if (M < NumElements)
++NumV1Elements;
else
++NumV2Elements;
// Commute the shuffle as needed such that more elements come from V1 than
// V2. This allows us to match the shuffle pattern strictly on how many
// elements come from V1 without handling the symmetric cases.
if (NumV2Elements > NumV1Elements)
return true;
assert(NumV1Elements > 0 && "No V1 indices");
if (NumV2Elements == 0)
return false;
// When the number of V1 and V2 elements are the same, try to minimize the
// number of uses of V2 in the low half of the vector. When that is tied,
// ensure that the sum of indices for V1 is equal to or lower than the sum
// indices for V2. When those are equal, try to ensure that the number of odd
// indices for V1 is lower than the number of odd indices for V2.
if (NumV1Elements == NumV2Elements) {
int LowV1Elements = 0, LowV2Elements = 0;
for (int M : Mask.slice(0, NumElements / 2))
if (M >= NumElements)
++LowV2Elements;
else if (M >= 0)
++LowV1Elements;
if (LowV2Elements > LowV1Elements)
return true;
if (LowV2Elements == LowV1Elements) {
int SumV1Indices = 0, SumV2Indices = 0;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= NumElements)
SumV2Indices += i;
else if (Mask[i] >= 0)
SumV1Indices += i;
if (SumV2Indices < SumV1Indices)
return true;
if (SumV2Indices == SumV1Indices) {
int NumV1OddIndices = 0, NumV2OddIndices = 0;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= NumElements)
NumV2OddIndices += i % 2;
else if (Mask[i] >= 0)
NumV1OddIndices += i % 2;
if (NumV2OddIndices < NumV1OddIndices)
return true;
}
}
}
return false;
}
/// Top-level lowering for x86 vector shuffles.
///
/// This handles decomposition, canonicalization, and lowering of all x86
/// vector shuffles. Most of the specific lowering strategies are encapsulated
/// above in helper routines. The canonicalization attempts to widen shuffles
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
/// s.t. only one of the two inputs needs to be tested, etc.
static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
MVT VT = Op.getSimpleValueType();
int NumElements = VT.getVectorNumElements();
SDLoc DL(Op);
bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
"Can't lower MMX shuffles");
bool V1IsUndef = V1.isUndef();
bool V2IsUndef = V2.isUndef();
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
// When we create a shuffle node we put the UNDEF node to second operand,
// but in some cases the first operand may be transformed to UNDEF.
// In this case we should just commute the node.
if (V1IsUndef)
return DAG.getCommutedVectorShuffle(*SVOp);
// Check for non-undef masks pointing at an undef vector and make the masks
// undef as well. This makes it easier to match the shuffle based solely on
// the mask.
if (V2IsUndef)
for (int M : Mask)
if (M >= NumElements) {
SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
for (int &M : NewMask)
if (M >= NumElements)
M = -1;
return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
}
// Check for illegal shuffle mask element index values.
int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit;
assert(llvm::all_of(Mask,
[&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
"Out of bounds shuffle index");
// We actually see shuffles that are entirely re-arrangements of a set of
// zero inputs. This mostly happens while decomposing complex shuffles into
// simple ones. Directly lower these as a buildvector of zeros.
APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
if (Zeroable.isAllOnesValue())
return getZeroVector(VT, Subtarget, DAG, DL);
bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
// Create an alternative mask with info about zeroable elements.
// Here we do not set undef elements as zeroable.
SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
if (V2IsZero) {
assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
for (int i = 0; i != NumElements; ++i)
if (Mask[i] != SM_SentinelUndef && Zeroable[i])
ZeroableMask[i] = SM_SentinelZero;
}
// Try to collapse shuffles into using a vector type with fewer elements but
// wider element types. We cap this to not form integers or floating point
// elements wider than 64 bits, but it might be interesting to form i128
// integers to handle flipping the low and high halves of AVX 256-bit vectors.
SmallVector<int, 16> WidenedMask;
if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
canWidenShuffleElements(ZeroableMask, WidenedMask)) {
// Shuffle mask widening should not interfere with a broadcast opportunity
// by obfuscating the operands with bitcasts.
// TODO: Avoid lowering directly from this top-level function: make this
// a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
if (SDValue Broadcast =
lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
MVT NewEltVT = VT.isFloatingPoint()
? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
: MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
int NewNumElts = NumElements / 2;
MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
// Make sure that the new vector type is legal. For example, v2f64 isn't
// legal on SSE1.
if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
if (V2IsZero) {
// Modify the new Mask to take all zeros from the all-zero vector.
// Choose indices that are blend-friendly.
bool UsedZeroVector = false;
assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
"V2's non-undef elements are used?!");
for (int i = 0; i != NewNumElts; ++i)
if (WidenedMask[i] == SM_SentinelZero) {
WidenedMask[i] = i + NewNumElts;
UsedZeroVector = true;
}
// Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
// some elements to be undef.
if (UsedZeroVector)
V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
}
V1 = DAG.getBitcast(NewVT, V1);
V2 = DAG.getBitcast(NewVT, V2);
return DAG.getBitcast(
VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
}
}
// Commute the shuffle if it will improve canonicalization.
if (canonicalizeShuffleMaskWithCommute(Mask))
return DAG.getCommutedVectorShuffle(*SVOp);
if (SDValue V =
lowerVectorShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
return V;
// For each vector width, delegate to a specialized lowering routine.
if (VT.is128BitVector())
return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (VT.is256BitVector())
return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (VT.is512BitVector())
return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (Is1BitVector)
return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
llvm_unreachable("Unimplemented!");
}
/// Try to lower a VSELECT instruction to a vector shuffle.
static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
// Only non-legal VSELECTs reach this lowering, convert those into generic
// shuffles and re-use the shuffle lowering path for blends.
SmallVector<int, 32> Mask;
if (createShuffleMaskFromVSELECT(Mask, Cond))
return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
return SDValue();
}
SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
// A vselect where all conditions and data are constants can be optimized into
// a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
return SDValue();
// Try to lower this to a blend-style vector shuffle. This can handle all
// constant condition cases.
if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
return BlendOp;
// If this VSELECT has a vector if i1 as a mask, it will be directly matched
// with patterns on the mask registers on AVX-512.
MVT CondVT = Cond.getSimpleValueType();
unsigned CondEltSize = Cond.getScalarValueSizeInBits();
if (CondEltSize == 1)
return Op;
// Variable blends are only legal from SSE4.1 onward.
if (!Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
unsigned EltSize = VT.getScalarSizeInBits();
unsigned NumElts = VT.getVectorNumElements();
// If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
// into an i1 condition so that we can use the mask-based 512-bit blend
// instructions.
if (VT.getSizeInBits() == 512) {
// Build a mask by testing the condition against zero.
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
DAG.getConstant(0, dl, CondVT),
ISD::SETNE);
// Now return a new VSELECT using the mask.
return DAG.getSelect(dl, VT, Mask, LHS, RHS);
}
// SEXT/TRUNC cases where the mask doesn't match the destination size.
if (CondEltSize != EltSize) {
// If we don't have a sign splat, rely on the expansion.
if (CondEltSize != DAG.ComputeNumSignBits(Cond))
return SDValue();
MVT NewCondSVT = MVT::getIntegerVT(EltSize);
MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
}
// Only some types will be legal on some subtargets. If we can emit a legal
// VSELECT-matching blend, return Op, and but if we need to expand, return
// a null value.
switch (VT.SimpleTy) {
default:
// Most of the vector types have blends past SSE4.1.
return Op;
case MVT::v32i8:
// The byte blends for AVX vectors were introduced only in AVX2.
if (Subtarget.hasAVX2())
return Op;
return SDValue();
case MVT::v8i16:
case MVT::v16i16: {
// Bitcast everything to the vXi8 type and use a vXi8 vselect.
MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
Cond = DAG.getBitcast(CastVT, Cond);
LHS = DAG.getBitcast(CastVT, LHS);
RHS = DAG.getBitcast(CastVT, RHS);
SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
return DAG.getBitcast(VT, Select);
}
}
}
static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
return SDValue();
if (VT.getSizeInBits() == 8) {
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
}
if (VT == MVT::f32) {
// EXTRACTPS outputs to a GPR32 register which will require a movd to copy
// the result back to FR32 register. It's only worth matching if the
// result has a single use which is a store or a bitcast to i32. And in
// the case of a store, it's not worth it if the index is a constant 0,
// because a MOVSSmr can be used instead, which is smaller and faster.
if (!Op.hasOneUse())
return SDValue();
SDNode *User = *Op.getNode()->use_begin();
if ((User->getOpcode() != ISD::STORE ||
isNullConstant(Op.getOperand(1))) &&
(User->getOpcode() != ISD::BITCAST ||
User->getValueType(0) != MVT::i32))
return SDValue();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
Op.getOperand(1));
return DAG.getBitcast(MVT::f32, Extract);
}
if (VT == MVT::i32 || VT == MVT::i64) {
// ExtractPS/pextrq works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return Op;
}
return SDValue();
}
/// Extract one bit from mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Vec = Op.getOperand(0);
SDLoc dl(Vec);
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
MVT EltVT = Op.getSimpleValueType();
assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
"Unexpected vector type in ExtractBitFromMaskVector");
// variable index can't be handled in mask registers,
// extend vector to VR512/128
if (!isa<ConstantSDNode>(Idx)) {
unsigned NumElts = VecVT.getVectorNumElements();
// Extending v8i1/v16i1 to 512-bit get better performance on KNL
// than extending to 128/256bit.
MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0) // the operation is legal
return Op;
// Extend to natively supported kshift.
unsigned NumElems = VecVT.getVectorNumElements();
MVT WideVecVT = VecVT;
if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
DAG.getUNDEF(WideVecVT), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Use kshiftr instruction to move to the lower element.
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
DAG.getIntPtrConstant(0, dl));
}
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
if (VecVT.getVectorElementType() == MVT::i1)
return ExtractBitFromMaskVector(Op, DAG, Subtarget);
if (!isa<ConstantSDNode>(Idx)) {
// Its more profitable to go through memory (1 cycles throughput)
// than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
// IACA tool was used to get performance estimation
// (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
//
// example : extractelement <16 x i8> %a, i32 %i
//
// Block Throughput: 3.00 Cycles
// Throughput Bottleneck: Port5
//
// | Num Of | Ports pressure in cycles | |
// | Uops | 0 - DV | 5 | 6 | 7 | |
// ---------------------------------------------
// | 1 | | 1.0 | | | CP | vmovd xmm1, edi
// | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
// | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
// Total Num Of Uops: 4
//
//
// Block Throughput: 1.00 Cycles
// Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
//
// | | Ports pressure in cycles | |
// |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
// ---------------------------------------------------------
// |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
// |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
// |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
// Total Num Of Uops: 4
return SDValue();
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
// Get the 128-bit vector.
Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
MVT EltVT = VecVT.getVectorElementType();
unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
// this can be done with a mask.
IdxVal &= ElemsPerChunk - 1;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
DAG.getConstant(IdxVal, dl, MVT::i32));
}
assert(VecVT.is128BitVector() && "Unexpected vector length");
MVT VT = Op.getSimpleValueType();
if (VT.getSizeInBits() == 16) {
// If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
// we're going to zero extend the register or fold the store (SSE41 only).
if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
!(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
// Transform it so it match pextrw which produces a 32-bit result.
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
}
if (Subtarget.hasSSE41())
if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
return Res;
// TODO: We only extract a single element from v16i8, we can probably afford
// to be more aggressive here before using the default approach of spilling to
// stack.
if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
// Extract either the lowest i32 or any i16, and extract the sub-byte.
int DWordIdx = IdxVal / 4;
if (DWordIdx == 0) {
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec),
DAG.getIntPtrConstant(DWordIdx, dl));
int ShiftVal = (IdxVal % 4) * 8;
if (ShiftVal != 0)
Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
DAG.getConstant(ShiftVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
int WordIdx = IdxVal / 2;
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
DAG.getBitcast(MVT::v8i16, Vec),
DAG.getIntPtrConstant(WordIdx, dl));
int ShiftVal = (IdxVal % 2) * 8;
if (ShiftVal != 0)
Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
DAG.getConstant(ShiftVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
if (VT.getSizeInBits() == 32) {
if (IdxVal == 0)
return Op;
// SHUFPS the element to the lowest double word, then movss.
int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
if (VT.getSizeInBits() == 64) {
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
if (IdxVal == 0)
return Op;
// UNPCKHPD the element to the lowest double word, then movsd.
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
return SDValue();
}
/// Insert one bit to mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue Elt = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
MVT VecVT = Vec.getSimpleValueType();
if (!isa<ConstantSDNode>(Idx)) {
// Non constant index. Extend source and destination,
// insert element and then truncate the result.
unsigned NumElts = VecVT.getVectorNumElements();
MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
}
// Copy into a k-register, extract to v1i1 and insert_subvector.
SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
Op.getOperand(2));
}
SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
if (EltVT == MVT::i1)
return InsertBitToMaskVector(Op, DAG, Subtarget);
SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2 = Op.getOperand(2);
if (!isa<ConstantSDNode>(N2))
return SDValue();
auto *N2C = cast<ConstantSDNode>(N2);
unsigned IdxVal = N2C->getZExtValue();
bool IsZeroElt = X86::isZeroNode(N1);
bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
// If we are inserting a element, see if we can do this more efficiently with
// a blend shuffle with a rematerializable vector than a costly integer
// insertion.
if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
16 <= EltVT.getSizeInBits()) {
SmallVector<int, 8> BlendMask;
for (unsigned i = 0; i != NumElts; ++i)
BlendMask.push_back(i == IdxVal ? i + NumElts : i);
SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
: getOnesVector(VT, DAG, dl);
return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
}
// If the vector is wider than 128 bits, extract the 128-bit subvector, insert
// into that, and then insert the subvector back into the result.
if (VT.is256BitVector() || VT.is512BitVector()) {
// With a 256-bit vector, we can insert into the zero element efficiently
// using a blend if we have AVX or AVX2 and the right data type.
if (VT.is256BitVector() && IdxVal == 0) {
// TODO: It is worthwhile to cast integer to floating point and back
// and incur a domain crossing penalty if that's what we'll end up
// doing anyway after extracting to a 128-bit vector.
if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
(Subtarget.hasAVX2() && EltVT == MVT::i32)) {
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
N2 = DAG.getIntPtrConstant(1, dl);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
}
}
// Get the desired 128-bit vector chunk.
SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired chunk.
unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
assert(isPowerOf2_32(NumEltsIn128));
// Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
DAG.getConstant(IdxIn128, dl, MVT::i32));
// Insert the changed part back into the bigger vector
return insert128BitVector(N0, V, IdxVal, DAG, dl);
}
assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
// Transform it so it match pinsr{b,w} which expects a GR32 as its second
// argument. SSE41 required for pinsrb.
if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
unsigned Opc;
if (VT == MVT::v8i16) {
assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
Opc = X86ISD::PINSRW;
} else {
assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
Opc = X86ISD::PINSRB;
}
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
N2 = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(Opc, dl, VT, N0, N1, N2);
}
if (Subtarget.hasSSE41()) {
if (EltVT == MVT::f32) {
// Bits [7:6] of the constant are the source select. This will always be
// zero here. The DAG Combiner may combine an extract_elt index into
// these bits. For example (insert (extract, 3), 2) could be matched by
// putting the '3' into bits [7:6] of X86ISD::INSERTPS.
// Bits [5:4] of the constant are the destination select. This is the
// value of the incoming immediate.
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
// than an insertps. Blends are simpler operations in hardware and so
// will always have equal or better performance than insertps.
// But if optimizing for size and there's a load folding opportunity,
// generate insertps because blendps does not have a 32-bit memory
// operand form.
N2 = DAG.getIntPtrConstant(1, dl);
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
}
N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
}
// PINSR* works with constant index.
if (EltVT == MVT::i32 || EltVT == MVT::i64)
return Op;
}
return SDValue();
}
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT OpVT = Op.getSimpleValueType();
// It's always cheaper to replace a xor+movd with xorps and simplifies further
// combines.
if (X86::isZeroNode(Op.getOperand(0)))
return getZeroVector(OpVT, Subtarget, DAG, dl);
// If this is a 256-bit vector result, first insert into a 128-bit
// vector and then insert into the 256-bit vector.
if (!OpVT.is128BitVector()) {
// Insert into a 128-bit vector.
unsigned SizeFactor = OpVT.getSizeInBits() / 128;
MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
OpVT.getVectorNumElements() / SizeFactor);
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
// Insert the 128-bit vector.
return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
}
assert(OpVT.is128BitVector() && "Expected an SSE type!");
// Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
if (OpVT == MVT::v4i32)
return Op;
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
return DAG.getBitcast(
OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
}
// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
return insert1BitVector(Op, DAG, Subtarget);
}
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Only vXi1 extract_subvectors need custom lowering");
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0) // the operation is legal
return Op;
MVT VecVT = Vec.getSimpleValueType();
unsigned NumElems = VecVT.getVectorNumElements();
// Extend to natively supported kshift.
MVT WideVecVT = VecVT;
if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
DAG.getUNDEF(WideVecVT), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Shift to the LSB.
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Returns the appropriate wrapper opcode for a global reference.
unsigned X86TargetLowering::getGlobalWrapperKind(
const GlobalValue *GV, const unsigned char OpFlags) const {
// References to absolute symbols are never PC-relative.
if (GV && GV->isAbsoluteSymbolRef())
return X86ISD::Wrapper;
CodeModel::Model M = getTargetMachine().getCodeModel();
if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
return X86ISD::WrapperRIP;
// GOTPCREL references must always use RIP.
if (OpFlags == X86II::MO_GOTPCREL)
return X86ISD::WrapperRIP;
return X86ISD::Wrapper;
}
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOV32ri.
SDValue
X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetConstantPool(
CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
SDLoc DL(CP);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
}
return Result;
}
SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
SDLoc DL(JT);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag)
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
return Result;
}
SDValue
X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
SDLoc DL(Op);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
}
// For symbols that require a load from a stub to get the address, emit the
// load.
if (isGlobalStubReference(OpFlag))
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
Subtarget.classifyBlockAddressReference();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
SDLoc dl(Op);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (isGlobalRelativeToPICBase(OpFlags)) {
Result = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
}
return Result;
}
SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
const SDLoc &dl, int64_t Offset,
SelectionDAG &DAG) const {
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
CodeModel::Model M = DAG.getTarget().getCodeModel();
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result;
if (OpFlags == X86II::MO_NO_FLAG &&
X86::isOffsetSuitableForCodeModel(Offset, M)) {
// A direct static reference to a global.
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
Offset = 0;
} else {
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
}
Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (isGlobalRelativeToPICBase(OpFlags)) {
Result = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
}
// For globals that require a load from a stub to get the address, emit the
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
if (Offset != 0)
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
DAG.getConstant(Offset, dl, PtrVT));
return Result;
}
SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
}
static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
unsigned char OperandFlags, bool LocalDynamic = false) {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDLoc dl(GA);
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(),
OperandFlags);
X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
: X86ISD::TLSADDR;
if (InFlag) {
SDValue Ops[] = { Chain, TGA, *InFlag };
Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
} else {
SDValue Ops[] = { Chain, TGA };
Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
}
// TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
MFI.setAdjustsStack(true);
MFI.setHasCalls(true);
SDValue Flag = Chain.getValue(1);
return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
SDValue InFlag;
SDLoc dl(GA); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
X86::RAX, X86II::MO_TLSGD);
}
static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG,
const EVT PtrVT,
bool is64Bit) {
SDLoc dl(GA);
// Get the start address of the TLS block for this module.
X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
.getInfo<X86MachineFunctionInfo>();
MFI->incNumLocalDynamicTLSAccesses();
SDValue Base;
if (is64Bit) {
Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
X86II::MO_TLSLD, /*LocalDynamic=*/true);
} else {
SDValue InFlag;
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
X86II::MO_TLSLDM, /*LocalDynamic=*/true);
}
// Note: the CleanupLocalDynamicTLSPass will remove redundant computations
// of Base.
// Build x@dtpoff.
unsigned char OperandFlags = X86II::MO_DTPOFF;
unsigned WrapperKind = X86ISD::Wrapper;
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
// Add x@dtpoff with the base.
return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
}
// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model,
bool is64Bit, bool isPIC) {
SDLoc dl(GA);
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
is64Bit ? 257 : 256));
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
MachinePointerInfo(Ptr));
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
// initialexec.
unsigned WrapperKind = X86ISD::Wrapper;
if (model == TLSModel::LocalExec) {
OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
} else if (model == TLSModel::InitialExec) {
if (is64Bit) {
OperandFlags = X86II::MO_GOTTPOFF;
WrapperKind = X86ISD::WrapperRIP;
} else {
OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
}
} else {
llvm_unreachable("Unexpected model");
}
// emit "addl x@ntpoff,%eax" (local exec)
// or "addl x@indntpoff,%eax" (initial exec)
// or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
SDValue TGA =
DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
if (model == TLSModel::InitialExec) {
if (isPIC && !is64Bit) {
Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
Offset);
}
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
}
SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
if (DAG.getTarget().useEmulatedTLS())
return LowerToTLSEmulatedModel(GA, DAG);
const GlobalValue *GV = GA->getGlobal();
auto PtrVT = getPointerTy(DAG.getDataLayout());
bool PositionIndependent = isPositionIndependent();
if (Subtarget.isTargetELF()) {
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
if (Subtarget.is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
case TLSModel::LocalDynamic:
return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
Subtarget.is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
PositionIndependent);
}
llvm_unreachable("Unknown TLS model.");
}
if (Subtarget.isTargetDarwin()) {
// Darwin only has one model of TLS. Lower to that.
unsigned char OpFlag = 0;
unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper;
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
if (PIC32)
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
OpFlag = X86II::MO_TLVP;
SDLoc DL(Op);
SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
GA->getValueType(0),
GA->getOffset(), OpFlag);
SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
// With PIC32, the address is actually $g + Offset.
if (PIC32)
Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
Offset);
// Lowering the machine isd will make sure everything is in the right
// location.
SDValue Chain = DAG.getEntryNode();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
SDValue Args[] = { Chain, Offset };
Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
DAG.getIntPtrConstant(0, DL, true),
Chain.getValue(1), DL);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setAdjustsStack(true);
// And our return value (tls address) is in the standard call return value
// location.
unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
}
if (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium() ||
Subtarget.isTargetWindowsGNU()) {
// Just use the implicit TLS architecture
// Need to generate something similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
// ; from TEB
// mov ecx, dword [rel _tls_index]: Load index (from C runtime)
// mov rcx, qword [rdx+rcx*8]
// mov eax, .tls$:tlsvar
// [rax+rcx] contains the address
// Windows 64bit: gs:0x58
// Windows 32bit: fs:__tls_array
SDLoc dl(GA);
SDValue Chain = DAG.getEntryNode();
// Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
// %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
// use its literal value of 0x2C.
Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
? Type::getInt8PtrTy(*DAG.getContext(),
256)
: Type::getInt32PtrTy(*DAG.getContext(),
257));
SDValue TlsArray = Subtarget.is64Bit()
? DAG.getIntPtrConstant(0x58, dl)
: (Subtarget.isTargetWindowsGNU()
? DAG.getIntPtrConstant(0x2C, dl)
: DAG.getExternalSymbol("_tls_array", PtrVT));
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
SDValue res;
if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
res = ThreadPointer;
} else {
// Load the _tls_index variable
SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
if (Subtarget.is64Bit())
IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
MachinePointerInfo(), MVT::i32);
else
IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
auto &DL = DAG.getDataLayout();
SDValue Scale =
DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
}
res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
// Get the offset of start of .tls section
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), X86II::MO_SECREL);
SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
}
llvm_unreachable("TLS not implemented for this target.");
}
/// Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
/// TODO: Can this be moved to general expansion code?
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
MVT VT = Op.getSimpleValueType();
unsigned VTBits = VT.getSizeInBits();
SDLoc dl(Op);
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
// ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
// ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
// during isel.
SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
DAG.getConstant(VTBits - 1, dl, MVT::i8));
SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
DAG.getConstant(VTBits - 1, dl, MVT::i8))
: DAG.getConstant(0, dl, VT);
SDValue Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
} else {
Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
}
// If the shift amount is larger or equal than the width of a part we can't
// rely on the results of shld/shrd. Insert a test and select the appropriate
// values for large shift amounts.
SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
DAG.getConstant(VTBits, dl, MVT::i8));
SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
SDValue Hi, Lo;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
} else {
Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
}
return DAG.getMergeValues({ Lo, Hi }, dl);
}
static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
"Unexpected funnel shift opcode!");
SDLoc DL(Op);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Amt = Op.getOperand(2);
bool IsFSHR = Op.getOpcode() == ISD::FSHR;
if (VT.isVector()) {
assert(Subtarget.hasVBMI2() && "Expected VBMI2");
if (IsFSHR)
std::swap(Op0, Op1);
APInt APIntShiftAmt;
if (isConstantSplat(Amt, APIntShiftAmt)) {
uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
Op0, Op1, DAG.getConstant(ShiftAmt, DL, MVT::i8));
}
return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
Op0, Op1, Amt);
}
assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
"Unexpected funnel shift type!");
// Expand slow SHLD/SHRD cases if we are not optimizing for size.
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
if (IsFSHR)
std::swap(Op0, Op1);
// i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
if (VT == MVT::i16)
Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
DAG.getConstant(15, DL, Amt.getValueType()));
unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
}
// Try to use a packed vector operation to handle i64 on 32-bit targets when
// AVX512DQ is enabled.
static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert((Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT VT = Op.getSimpleValueType();
if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
(VT != MVT::f32 && VT != MVT::f64))
return SDValue();
// Pack the i64 into a vector, do the operation and extract.
// Using 256-bit to ensure result is 128-bits for f32 case.
unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
MVT VecVT = MVT::getVectorVT(VT, NumElts);
SDLoc dl(Op);
SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
DAG.getIntPtrConstant(0, dl));
}
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
if (SrcVT.isVector()) {
if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
DAG.getUNDEF(SrcVT)));
}
return SDValue();
}
assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
// These are really Legal; return the operand so the caller accepts it as
// Legal.
if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
return Op;
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
return Op;
if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
return V;
SDValue ValueToStore = Op.getOperand(0);
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
!Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
unsigned Size = SrcVT.getSizeInBits()/8;
MachineFunction &MF = DAG.getMachineFunction();
auto PtrVT = getPointerTy(MF.getDataLayout());
int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
SDValue Chain = DAG.getStore(
DAG.getEntryNode(), dl, ValueToStore, StackSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue StackSlot,
SelectionDAG &DAG) const {
// Build the FILD
SDLoc DL(Op);
SDVTList Tys;
bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
if (useSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
unsigned ByteSize = SrcVT.getSizeInBits()/8;
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
MachineMemOperand *MMO;
if (FI) {
int SSFI = FI->getIndex();
MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOLoad, ByteSize, ByteSize);
} else {
MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
StackSlot = StackSlot.getOperand(1);
}
SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
X86ISD::FILD, DL,
Tys, Ops, SrcVT, MMO);
if (useSSE) {
Chain = Result.getValue(1);
SDValue InFlag = Result.getValue(2);
// FIXME: Currently the FST is glued to the FILD_FLAG. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
MachineFunction &MF = DAG.getMachineFunction();
unsigned SSFISize = Op.getValueSizeInBits()/8;
int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
auto PtrVT = getPointerTy(MF.getDataLayout());
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
Tys = DAG.getVTList(MVT::Other);
SDValue Ops[] = {
Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
};
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOStore, SSFISize, SSFISize);
Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
Ops, Op.getValueType(), MMO);
Result = DAG.getLoad(
Op.getValueType(), DL, Chain, StackSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
}
return Result;
}
/// 64-bit unsigned integer to double expansion.
static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// This algorithm is not obvious. Here it is what we're trying to output:
/*
movq %rax, %xmm0
punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
#ifdef __SSE3__
haddpd %xmm0, %xmm0
#else
pshufd $0x4e, %xmm0, %xmm1
addpd %xmm1, %xmm0
#endif
*/
SDLoc dl(Op);
LLVMContext *Context = DAG.getContext();
// Build some magic constants.
static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
Constant *C0 = ConstantDataVector::get(*Context, CV0);
auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
SmallVector<Constant*,2> CV1;
CV1.push_back(
ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
APInt(64, 0x4330000000000000ULL))));
CV1.push_back(
ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
APInt(64, 0x4530000000000000ULL))));
Constant *C1 = ConstantVector::get(CV1);
SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
// Load the 64-bit value into an XMM register.
SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
Op.getOperand(0));
SDValue CLod0 =
DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
/* Alignment = */ 16);
SDValue Unpck1 =
getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
SDValue CLod1 =
DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
/* Alignment = */ 16);
SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
SDValue Result;
if (Subtarget.hasSSE3()) {
// FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'.
Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
} else {
SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
}
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
DAG.getIntPtrConstant(0, dl));
}
/// 32-bit unsigned integer to float expansion.
static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
// FP constant to bias correct the final result.
SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
MVT::f64);
// Load the 32-bit value into an XMM register.
SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
Op.getOperand(0));
// Zero out the upper parts of the register.
Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getBitcast(MVT::v2f64, Load),
DAG.getIntPtrConstant(0, dl));
// Or the load with the bias.
SDValue Or = DAG.getNode(
ISD::OR, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
DAG.getBitcast(MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
Or =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
// Subtract the bias.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
// Handle final rounding.
return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
}
static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (Op.getSimpleValueType() != MVT::v2f64)
return SDValue();
SDValue N0 = Op.getOperand(0);
assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
// Legalize to v4i32 type.
N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
DAG.getUNDEF(MVT::v2i32));
if (Subtarget.hasAVX512())
return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
// Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
// but using v2i32 to v2f64 with X86ISD::CVTSI2P.
SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
// Two to the power of half-word-size.
SDValue TWOHW = DAG.getConstantFP(1 << 16, DL, MVT::v2f64);
// Clear upper part of LO, lower HI.
SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
// Add the two halves.
return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
}
static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// The algorithm is the following:
// #ifdef __SSE4_1__
// uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
// uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
// (uint4) 0x53000000, 0xaa);
// #else
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
// uint4 hi = (v >> 16) | (uint4) 0x53000000;
// #endif
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
// return (float4) lo + fhi;
// We shouldn't use it when unsafe-fp-math is enabled though: we might later
// reassociate the two FADDs, and if we do that, the algorithm fails
// spectacularly (PR24512).
// FIXME: If we ever have some kind of Machine FMF, this should be marked
// as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
// there's also the MachineCombiner reassociations happening on Machine IR.
if (DAG.getTarget().Options.UnsafeFPMath)
return SDValue();
SDLoc DL(Op);
SDValue V = Op->getOperand(0);
MVT VecIntVT = V.getSimpleValueType();
bool Is128 = VecIntVT == MVT::v4i32;
MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
// If we convert to something else than the supported type, e.g., to v4f64,
// abort early.
if (VecFloatVT != Op->getSimpleValueType(0))
return SDValue();
assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type");
// In the #idef/#else code, we have in common:
// - The vector of constants:
// -- 0x4b000000
// -- 0x53000000
// - A shift:
// -- v >> 16
// Create the splat vector for 0x4b000000.
SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
// Create the splat vector for 0x53000000.
SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
// Create the right shift.
SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
SDValue Low, High;
if (Subtarget.hasSSE41()) {
MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
// uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
// Low will be bitcasted right away, so do not bother bitcasting back to its
// original type.
Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
// uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
// (uint4) 0x53000000, 0xaa);
SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
// High will be bitcasted right away, so do not bother bitcasting back to
// its original type.
High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
} else {
SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
// uint4 hi = (v >> 16) | (uint4) 0x53000000;
High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
}
// Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
SDValue VecCstFAdd = DAG.getConstantFP(
APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
// TODO: Are there any fast-math-flags to propagate here?
SDValue FHigh =
DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
// return (float4) lo + fhi;
SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
}
static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = Op.getOperand(0);
MVT SrcVT = N0.getSimpleValueType();
SDLoc dl(Op);
switch (SrcVT.SimpleTy) {
default:
llvm_unreachable("Custom UINT_TO_FP is not supported!");
case MVT::v2i32:
return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
case MVT::v4i32:
case MVT::v8i32:
assert(!Subtarget.hasAVX512());
return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
}
}
SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (Op.getSimpleValueType().isVector())
return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
MVT SrcVT = N0.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
(SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
// Conversions from unsigned i32 to f32/f64 are legal,
// using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
return Op;
}
if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
return V;
if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
if (SrcVT == MVT::i32) {
SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
StackSlot, MachinePointerInfo());
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
OffsetSlot, MachinePointerInfo());
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
return Fild;
}
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue ValueToStore = Op.getOperand(0);
if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
MachinePointerInfo());
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
// DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
// we must be careful to do the computation in x87 extended precision, not
// in SSE. (The generic code can't know it's OK to do this, or how to.)
int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOLoad, 8, 8);
SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
MVT::i64, MMO);
APInt FF(32, 0x5F800000ULL);
// Check whether the sign bit is set.
SDValue SignSet = DAG.getSetCC(
dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
SDValue FudgePtr = DAG.getConstantPool(
ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
SDValue Zero = DAG.getIntPtrConstant(0, dl);
SDValue Four = DAG.getIntPtrConstant(4, dl);
SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
// Load the value out, extending it from f32 to f80.
// FIXME: Avoid the extend by constructing the right constant pool?
SDValue Fudge = DAG.getExtLoad(
ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
/* Alignment = */ 4);
// Extend everything to 80 bits to force it to be done on x87.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
DAG.getIntPtrConstant(0, dl));
}
// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
// just return an <SDValue(), SDValue()> pair.
// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
// to i16, i32 or i64, and we lower it to a legal sequence.
// If lowered to the final integer result we return a <result, SDValue()> pair.
// Otherwise we lower it to a sequence ending with a FIST, return a
// <FIST, StackSlot> pair, and the caller is responsible for loading
// the final integer result from StackSlot.
std::pair<SDValue,SDValue>
X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool IsSigned, bool IsReplace) const {
SDLoc DL(Op);
EVT DstTy = Op.getValueType();
EVT TheVT = Op.getOperand(0).getValueType();
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
// f16 must be promoted before using the lowering in this routine.
// fp128 does not use this lowering.
return std::make_pair(SDValue(), SDValue());
}
// If using FIST to compute an unsigned i64, we'll need some fixup
// to handle values above the maximum signed i64. A FIST is always
// used for the 32-bit subtarget, but also for f80 on a 64-bit target.
bool UnsignedFixup = !IsSigned &&
DstTy == MVT::i64 &&
(!Subtarget.is64Bit() ||
!isScalarFPTypeInSSEReg(TheVT));
if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
// Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
// The low 32 bits of the fist result will have the correct uint32 result.
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
DstTy = MVT::i64;
}
assert(DstTy.getSimpleVT() <= MVT::i64 &&
DstTy.getSimpleVT() >= MVT::i16 &&
"Unknown FP_TO_INT to lower!");
// These are really Legal.
if (DstTy == MVT::i32 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
if (Subtarget.is64Bit() &&
DstTy == MVT::i64 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
// We lower FP->int64 into FISTP64 followed by a load from a temporary
// stack slot.
MachineFunction &MF = DAG.getMachineFunction();
unsigned MemSize = DstTy.getSizeInBits()/8;
int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
unsigned Opc;
switch (DstTy.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
}
SDValue Chain = DAG.getEntryNode();
SDValue Value = Op.getOperand(0);
SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
if (UnsignedFixup) {
//
// Conversion to unsigned i64 is implemented with a select,
// depending on whether the source value fits in the range
// of a signed i64. Let Thresh be the FP equivalent of
// 0x8000000000000000ULL.
//
// Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
// FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
// Fist-to-mem64 FistSrc
// Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
// to XOR'ing the high 32 bits with Adjust.
//
// Being a power of 2, Thresh is exactly representable in all FP formats.
// For X87 we'd like to use the smallest FP type for this constant, but
// for DAG type consistency we have to match the FP operand type.
APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
bool LosesInfo = false;
if (TheVT == MVT::f64)
// The rounding mode is irrelevant as the conversion should be exact.
Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
&LosesInfo);
else if (TheVT == MVT::f80)
Status = Thresh.convert(APFloat::x87DoubleExtended(),
APFloat::rmNearestTiesToEven, &LosesInfo);
assert(Status == APFloat::opOK && !LosesInfo &&
"FP conversion should have been exact");
SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
SDValue Cmp = DAG.getSetCC(DL,
getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TheVT),
Value, ThreshVal, ISD::SETLT);
Adjust = DAG.getSelect(DL, MVT::i32, Cmp,
DAG.getConstant(0, DL, MVT::i32),
DAG.getConstant(0x80000000, DL, MVT::i32));
SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TheVT),
Value, ThreshVal, ISD::SETLT);
Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
}
// FIXME This causes a redundant load/store if the SSE-class value is already
// in memory, such as if it is on the callstack.
if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, DL, Value, StackSlot,
MachinePointerInfo::getFixedStack(MF, SSFI));
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDValue Ops[] = {
Chain, StackSlot, DAG.getValueType(TheVT)
};
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOLoad, MemSize, MemSize);
Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
Chain = Value.getValue(1);
SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
}
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOStore, MemSize, MemSize);
if (UnsignedFixup) {
// Insert the FIST, load its result as two i32's,
// and XOR the high i32 with Adjust.
SDValue FistOps[] = { Chain, Value, StackSlot };
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
FistOps, DstTy, MMO);
SDValue Low32 =
DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
SDValue High32 =
DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
if (Subtarget.is64Bit()) {
// Join High32 and Low32 into a 64-bit result.
// (High32 << 32) | Low32
Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32);
High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32,
DAG.getConstant(32, DL, MVT::i8));
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32);
return std::make_pair(Result, SDValue());
}
SDValue ResultOps[] = { Low32, High32 };
SDValue pair = IsReplace
? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps)
: DAG.getMergeValues(ResultOps, DL);
return std::make_pair(pair, SDValue());
} else {
// Build the FP_TO_INT*_IN_MEM
SDValue Ops[] = { Chain, Value, StackSlot };
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
Ops, DstTy, MMO);
return std::make_pair(FIST, StackSlot);
}
}
static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc dl(Op);
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements");
assert((VT.getVectorElementType() == MVT::i16 ||
VT.getVectorElementType() == MVT::i32 ||
VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type");
assert((InVT.getVectorElementType() == MVT::i8 ||
InVT.getVectorElementType() == MVT::i16 ||
InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type");
// Custom legalize v8i8->v8i64 on CPUs without avx512bw.
if (InVT == MVT::v8i8) {
if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
return SDValue();
In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
// FIXME: This should be ANY_EXTEND_VECTOR_INREG for ANY_EXTEND input.
return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, dl, VT, In);
}
if (Subtarget.hasInt256())
return Op;
// Optimize vectors in AVX mode:
//
// v8i16 -> v8i32
// Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
// Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
// Concat upper and lower parts.
//
// v4i32 -> v4i64
// Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
// Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
// Concat upper and lower parts.
//
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue OpLo = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, dl, HalfVT, In);
SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
SDValue Undef = DAG.getUNDEF(InVT);
bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
OpHi = DAG.getBitcast(HalfVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
const SDLoc &dl, SelectionDAG &DAG) {
assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
DAG.getIntPtrConstant(0, dl));
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
DAG.getIntPtrConstant(8, dl));
Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
SDLoc DL(Op);
unsigned NumElts = VT.getVectorNumElements();
// For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
// avoids a constant pool load.
if (VT.getVectorElementType() != MVT::i8) {
SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
return DAG.getNode(ISD::SRL, DL, VT, Extend,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
}
// Extend VT if BWI is not supported.
MVT ExtVT = VT;
if (!Subtarget.hasBWI()) {
// If v16i32 is to be avoided, we'll need to split and concatenate.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
}
// Widen to 512-bits if VLX is not supported.
MVT WideVT = ExtVT;
if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
NumElts *= 512 / ExtVT.getSizeInBits();
InVT = MVT::getVectorVT(MVT::i1, NumElts);
In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
In, DAG.getIntPtrConstant(0, DL));
WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
NumElts);
}
SDValue One = DAG.getConstant(1, DL, WideVT);
SDValue Zero = DAG.getConstant(0, DL, WideVT);
SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
// Truncate if we had to extend above.
if (VT != ExtVT) {
WideVT = MVT::getVectorVT(MVT::i8, NumElts);
SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
}
// Extract back to 128/256-bit if we widened.
if (WideVT != VT)
SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
DAG.getIntPtrConstant(0, DL));
return SelectedVal;
}
static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op.getOperand(0);
MVT SVT = In.getSimpleValueType();
if (SVT.getVectorElementType() == MVT::i1)
return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
return LowerAVXExtend(Op, DAG, Subtarget);
}
/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
/// It makes use of the fact that vectors with enough leading sign/zero bits
/// prevent the PACKSS/PACKUS from saturating the results.
/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
/// within each 128-bit lane.
static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
"Unexpected PACK opcode");
assert(DstVT.isVector() && "VT not a vector?");
// Requires SSE2 but AVX512 has fast vector truncate.
if (!Subtarget.hasSSE2())
return SDValue();
EVT SrcVT = In.getValueType();
// No truncation required, we might get here due to recursive calls.
if (SrcVT == DstVT)
return In;
// We only support vector truncation to 64bits or greater from a
// 128bits or greater source.
unsigned DstSizeInBits = DstVT.getSizeInBits();
unsigned SrcSizeInBits = SrcVT.getSizeInBits();
if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
return SDValue();
unsigned NumElems = SrcVT.getVectorNumElements();
if (!isPowerOf2_32(NumElems))
return SDValue();
LLVMContext &Ctx = *DAG.getContext();
assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
// Pack to the largest type possible:
// vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
EVT InVT = MVT::i16, OutVT = MVT::i8;
if (SrcVT.getScalarSizeInBits() > 16 &&
(Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
InVT = MVT::i32;
OutVT = MVT::i16;
}
// 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
if (SrcVT.is128BitVector()) {
InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
In = DAG.getBitcast(InVT, In);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
Res = extractSubVector(Res, 0, DAG, DL, 64);
return DAG.getBitcast(DstVT, Res);
}
// Extract lower/upper subvectors.
unsigned NumSubElts = NumElems / 2;
SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
unsigned SubSizeInBits = SrcSizeInBits / 2;
InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
// 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
Lo = DAG.getBitcast(InVT, Lo);
Hi = DAG.getBitcast(InVT, Hi);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
return DAG.getBitcast(DstVT, Res);
}
// AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
// AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
Lo = DAG.getBitcast(InVT, Lo);
Hi = DAG.getBitcast(InVT, Hi);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
// 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
// so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
Res = DAG.getBitcast(MVT::v4i64, Res);
Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, {0, 2, 1, 3});
if (DstVT.is256BitVector())
return DAG.getBitcast(DstVT, Res);
// If 512bit -> 128bit truncate another stage.
EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
Res = DAG.getBitcast(PackedVT, Res);
return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
}
// Recursively pack lower/upper subvectors, concat result and pack again.
assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
}
static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
// Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
if (InVT.getScalarSizeInBits() <= 16) {
if (Subtarget.hasBWI()) {
// legal, will go to VPMOVB2M, VPMOVW2M
if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
// We need to shift to get the lsb into sign position.
// Shift packed bytes not supported natively, bitcast to word
MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
In = DAG.getNode(ISD::SHL, DL, ExtVT,
DAG.getBitcast(ExtVT, In),
DAG.getConstant(ShiftInx, DL, ExtVT));
In = DAG.getBitcast(InVT, In);
}
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
In, ISD::SETGT);
}
// Use TESTD/Q, extended vector to packed dword/qword.
assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
"Unexpected vector type.");
unsigned NumElts = InVT.getVectorNumElements();
assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
// We need to change to a wider element type that we have support for.
// For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
// For 16 element vectors we extend to v16i32 unless we are explicitly
// trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
// we need to split into two 8 element vectors which we can extend to v8i32,
// truncate and concat the results. There's an additional complication if
// the original type is v16i8. In that case we can't split the v16i8 so
// first we pre-extend it to v16i16 which we can split to v8i16, then extend
// to v8i32, truncate that to v8i1 and concat the two halves.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
if (InVT == MVT::v16i8) {
// First we need to sign extend up to 256-bits so we can split that.
InVT = MVT::v16i16;
In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
}
SDValue Lo = extract128BitVector(In, 0, DAG, DL);
SDValue Hi = extract128BitVector(In, 8, DAG, DL);
// We're split now, just emit two truncates and a concat. The two
// truncates will trigger legalization to come back to this function.
Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
// We either have 8 elements or we're allowed to use 512-bit vectors.
// If we have VLX, we want to use the narrowest vector that can get the
// job done so we use vXi32.
MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
ShiftInx = InVT.getScalarSizeInBits() - 1;
}
if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
// We need to shift to get the lsb into sign position.
In = DAG.getNode(ISD::SHL, DL, InVT, In,
DAG.getConstant(ShiftInx, DL, InVT));
}
// If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
if (Subtarget.hasDQI())
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
}
SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
unsigned InNumEltBits = InVT.getScalarSizeInBits();
assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
"Invalid TRUNCATE operation");
// If called by the legalizer just return.
if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
return SDValue();
if (VT.getVectorElementType() == MVT::i1)
return LowerTruncateVecI1(Op, DAG, Subtarget);
// vpmovqb/w/d, vpmovdb/w, vpmovwb
if (Subtarget.hasAVX512()) {
// word to byte only under BWI. Otherwise we have to promoted to v16i32
// and then truncate that. But we should only do that if we haven't been
// asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
// handled by isel patterns.
if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
Subtarget.canExtendTo512DQ())
return Op;
}
unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
// Truncate with PACKUS if we are truncating a vector with leading zero bits
// that extend all the way to the packed/truncated value.
// Pre-SSE41 we can only use PACKUSWB.
KnownBits Known = DAG.computeKnownBits(In);
if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
if (SDValue V =
truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
return V;
// Truncate with PACKSS if we are truncating a vector with sign-bits that
// extend all the way to the packed/truncated value.
if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
if (SDValue V =
truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
return V;
if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
if (Subtarget.hasInt256()) {
static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
In = DAG.getBitcast(MVT::v8i32, In);
In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
DAG.getIntPtrConstant(0, DL));
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(2, DL));
OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
static const int ShufMask[] = {0, 2, 4, 6};
return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
}
if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
// On AVX2, v8i32 -> v8i16 becomes PSHUFB.
if (Subtarget.hasInt256()) {
In = DAG.getBitcast(MVT::v32i8, In);
// The PSHUFB mask:
static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
-1, -1, -1, -1, -1, -1, -1, -1,
16, 17, 20, 21, 24, 25, 28, 29,
-1, -1, -1, -1, -1, -1, -1, -1 };
In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
In = DAG.getBitcast(MVT::v4i64, In);
static const int ShufMask2[] = {0, 2, -1, -1};
In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
return DAG.getBitcast(VT, In);
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
DAG.getIntPtrConstant(4, DL));
OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
// The PSHUFB mask:
static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
-1, -1, -1, -1, -1, -1, -1, -1};
OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
// The MOVLHPS Mask:
static const int ShufMask2[] = {0, 1, 4, 5};
SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
return DAG.getBitcast(MVT::v8i16, res);
}
if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
// Use an AND to zero uppper bits for PACKUS.
In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
DAG.getIntPtrConstant(0, DL));
SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
DAG.getIntPtrConstant(8, DL));
return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
}
// Handle truncation of V256 to V128 using shuffles.
assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
assert(Subtarget.hasAVX() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
SmallVector<int, 16> MaskVec(NumElems * 2, -1);
// Prepare truncation shuffle mask
for (unsigned i = 0; i != NumElems; ++i)
MaskVec[i] = i * 2;
In = DAG.getBitcast(NVT, In);
SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
DAG.getIntPtrConstant(0, DL));
}
SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
MVT VT = Op.getSimpleValueType();
if (VT.isVector()) {
SDValue Src = Op.getOperand(0);
SDLoc dl(Op);
if (VT == MVT::v2i1 && Src.getSimpleValueType() == MVT::v2f64) {
MVT ResVT = MVT::v4i32;
MVT TruncVT = MVT::v4i1;
unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
if (!IsSigned && !Subtarget.hasVLX()) {
// Widen to 512-bits.
ResVT = MVT::v8i32;
TruncVT = MVT::v8i1;
Opc = ISD::FP_TO_UINT;
Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
DAG.getUNDEF(MVT::v8f64),
Src, DAG.getIntPtrConstant(0, dl));
}
SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
DAG.getIntPtrConstant(0, dl));
}
assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
if (VT == MVT::v2i64 && Src.getSimpleValueType() == MVT::v2f32) {
return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32)));
}
return SDValue();
}
assert(!VT.isVector());
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
IsSigned, /*IsReplace=*/ false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
// If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
if (!FIST.getNode())
return Op;
if (StackSlot.getNode())
// Load the result.
return DAG.getLoad(VT, SDLoc(Op), FIST, StackSlot, MachinePointerInfo());
// The node is the result.
return FIST;
}
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT SVT = In.getSimpleValueType();
assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
return DAG.getNode(X86ISD::VFPEXT, DL, VT,
DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
In, DAG.getUNDEF(SVT)));
}
/// Horizontal vector math instructions may be slower than normal math with
/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
/// implementation, and likely shuffle complexity of the alternate sequence.
static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize();
bool HasFastHOps = Subtarget.hasFastHorizontalOps();
return !IsSingleSource || IsOptimizingSize || HasFastHOps;
}
/// Depending on uarch and/or optimizing for size, we might prefer to use a
/// vector operation in place of the typical scalar operation.
static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// If both operands have other uses, this is probably not profitable.
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
if (!LHS.hasOneUse() && !RHS.hasOneUse())
return Op;
// FP horizontal add/sub were added with SSE3. Integer with SSSE3.
bool IsFP = Op.getSimpleValueType().isFloatingPoint();
if (IsFP && !Subtarget.hasSSE3())
return Op;
if (!IsFP && !Subtarget.hasSSSE3())
return Op;
// Defer forming the minimal horizontal op if the vector source has more than
// the 2 extract element uses that we're matching here. In that case, we might
// form a horizontal op that includes more than 1 add/sub op.
if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
LHS.getOperand(0) != RHS.getOperand(0) ||
!LHS.getOperand(0)->hasNUsesOfValue(2, 0))
return Op;
if (!isa<ConstantSDNode>(LHS.getOperand(1)) ||
!isa<ConstantSDNode>(RHS.getOperand(1)) ||
!shouldUseHorizontalOp(true, DAG, Subtarget))
return Op;
// Allow commuted 'hadd' ops.
// TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
unsigned HOpcode;
switch (Op.getOpcode()) {
case ISD::ADD: HOpcode = X86ISD::HADD; break;
case ISD::SUB: HOpcode = X86ISD::HSUB; break;
case ISD::FADD: HOpcode = X86ISD::FHADD; break;
case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
default:
llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
}
unsigned LExtIndex = LHS.getConstantOperandVal(1);
unsigned RExtIndex = RHS.getConstantOperandVal(1);
if (LExtIndex == 1 && RExtIndex == 0 &&
(HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
std::swap(LExtIndex, RExtIndex);
// TODO: This can be extended to handle other adjacent extract pairs.
if (LExtIndex != 0 || RExtIndex != 1)
return Op;
SDValue X = LHS.getOperand(0);
EVT VecVT = X.getValueType();
unsigned BitWidth = VecVT.getSizeInBits();
assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
"Not expecting illegal vector widths here");
// Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
// equivalent, so extract the 256/512-bit source op to 128-bit.
// This is free: ymm/zmm -> xmm.
SDLoc DL(Op);
if (BitWidth == 256 || BitWidth == 512)
X = extract128BitVector(X, 0, DAG, DL);
// add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
// add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
// sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
DAG.getIntPtrConstant(0, DL));
}
/// Depending on uarch and/or optimizing for size, we might prefer to use a
/// vector operation in place of the typical scalar operation.
static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
"Only expecting float/double");
return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
}
/// The only differences between FABS and FNEG are the mask and the logic op.
/// FNEG also has a folding opportunity for FNEG(FABS(x)).
static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
"Wrong opcode for lowering FABS or FNEG.");
bool IsFABS = (Op.getOpcode() == ISD::FABS);
// If this is a FABS and it has an FNEG user, bail out to fold the combination
// into an FNABS. We'll lower the FABS after that if it is still in use.
if (IsFABS)
for (SDNode *User : Op->uses())
if (User->getOpcode() == ISD::FNEG)
return Op;
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
bool IsF128 = (VT == MVT::f128);
assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
"Unexpected type in LowerFABSorFNEG");
// FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
// decide if we should generate a 16-byte constant mask when we only need 4 or
// 8 bytes for the scalar case.
// There are no scalar bitwise logical SSE/AVX instructions, so we
// generate a 16-byte vector constant and logic op even for the scalar case.
// Using a 16-byte mask allows folding the load of the mask with
// the logic op, so it can save (~4 bytes) on code size.
bool IsFakeVector = !VT.isVector() && !IsF128;
MVT LogicVT = VT;
if (IsFakeVector)
LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
unsigned EltBits = VT.getScalarSizeInBits();
// For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
APInt::getSignMask(EltBits);
const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
SDValue Op0 = Op.getOperand(0);
bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
unsigned LogicOp = IsFABS ? X86ISD::FAND :
IsFNABS ? X86ISD::FOR :
X86ISD::FXOR;
SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
if (VT.isVector() || IsF128)
return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
// For the scalar case extend to a 128-bit vector, perform the logic op,
// and extract the scalar result back out.
Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Mag = Op.getOperand(0);
SDValue Sign = Op.getOperand(1);
SDLoc dl(Op);
// If the sign operand is smaller, extend it first.
MVT VT = Op.getSimpleValueType();
if (Sign.getSimpleValueType().bitsLT(VT))
Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
// And if it is bigger, shrink it first.
if (Sign.getSimpleValueType().bitsGT(VT))
Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
// At this point the operands and the result should have the same
// type, and that won't be f80 since that is not custom lowered.
bool IsF128 = (VT == MVT::f128);
assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
"Unexpected type in LowerFCOPYSIGN");
const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
// Perform all scalar logic operations as 16-byte vectors because there are no
// scalar FP logic instructions in SSE.
// TODO: This isn't necessary. If we used scalar types, we might avoid some
// unnecessary splats, but we might miss load folding opportunities. Should
// this decision be based on OptimizeForSize?
bool IsFakeVector = !VT.isVector() && !IsF128;
MVT LogicVT = VT;
if (IsFakeVector)
LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
// The mask constants are automatically splatted for vector types.
unsigned EltSizeInBits = VT.getScalarSizeInBits();
SDValue SignMask = DAG.getConstantFP(
APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
SDValue MagMask = DAG.getConstantFP(
APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
// First, clear all bits but the sign bit from the second operand (sign).
if (IsFakeVector)
Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
// Next, clear the sign bit from the first operand (magnitude).
// TODO: If we had general constant folding for FP logic ops, this check
// wouldn't be necessary.
SDValue MagBits;
if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
APFloat APF = Op0CN->getValueAPF();
APF.clearSign();
MagBits = DAG.getConstantFP(APF, dl, LogicVT);
} else {
// If the magnitude operand wasn't a constant, we need to AND out the sign.
if (IsFakeVector)
Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
}
// OR the magnitude value with the sign bit.
SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT OpVT = N0.getSimpleValueType();
assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
"Unexpected type for FGETSIGN");
// Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
Res = DAG.getZExtOrTrunc(Res, dl, VT);
Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
return Res;
}
/// Helper for creating a X86ISD::SETCC node.
static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
SelectionDAG &DAG) {
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
}
// Check whether an OR'd tree is PTEST-able.
static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
const X86Subtarget &Subtarget,
SelectionDAG &DAG,
SDValue &X86CC) {
assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
if (!Subtarget.hasSSE41())
return SDValue();
if (!Op->hasOneUse())
return SDValue();
SDNode *N = Op.getNode();
SDLoc DL(N);
SmallVector<SDValue, 8> Opnds;
DenseMap<SDValue, unsigned> VecInMap;
SmallVector<SDValue, 8> VecIns;
EVT VT = MVT::Other;
// Recognize a special case where a vector is casted into wide integer to
// test all 0s.
Opnds.push_back(N->getOperand(0));
Opnds.push_back(N->getOperand(1));
for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
// BFS traverse all OR'd operands.
if (I->getOpcode() == ISD::OR) {
Opnds.push_back(I->getOperand(0));
Opnds.push_back(I->getOperand(1));
// Re-evaluate the number of nodes to be traversed.
e += 2; // 2 more nodes (LHS and RHS) are pushed.
continue;
}
// Quit if a non-EXTRACT_VECTOR_ELT
if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// Quit if without a constant index.
SDValue Idx = I->getOperand(1);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
SDValue ExtractedFromVec = I->getOperand(0);
DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
if (M == VecInMap.end()) {
VT = ExtractedFromVec.getValueType();
// Quit if not 128/256-bit vector.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
// Quit if not the same type.
if (VecInMap.begin() != VecInMap.end() &&
VT != VecInMap.begin()->first.getValueType())
return SDValue();
M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
VecIns.push_back(ExtractedFromVec);
}
M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
}
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Not extracted from 128-/256-bit vector.");
unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
for (DenseMap<SDValue, unsigned>::const_iterator
I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
// Quit if not all elements are used.
if (I->second != FullMask)
return SDValue();
}
MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
// Cast all vectors into TestVT for PTEST.
for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
// If more than one full vector is evaluated, OR them first before PTEST.
for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
// Each iteration will OR 2 nodes and append the result until there is only
// 1 node left, i.e. the final OR'd value of all vectors.
SDValue LHS = VecIns[Slot];
SDValue RHS = VecIns[Slot + 1];
VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
}
X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
DL, MVT::i8);
return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
VecIns.back(), VecIns.back());
}
/// return true if \c Op has a use that doesn't just read flags.
static bool hasNonFlagsUse(SDValue Op) {
for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
++UI) {
SDNode *User = *UI;
unsigned UOpNo = UI.getOperandNo();
if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
// Look pass truncate.
UOpNo = User->use_begin().getOperandNo();
User = *User->use_begin();
}
if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
!(User->getOpcode() == ISD::SELECT && UOpNo == 0))
return true;
}
return false;
}
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG, const X86Subtarget &Subtarget) {
// CF and OF aren't always set the way we want. Determine which
// of these we need.
bool NeedCF = false;
bool NeedOF = false;
switch (X86CC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
NeedCF = true;
break;
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
case X86::COND_O: case X86::COND_NO: {
// Check if we really need to set the
// Overflow flag. If NoSignedWrap is present
// that is not actually needed.
switch (Op->getOpcode()) {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::SHL:
if (Op.getNode()->getFlags().hasNoSignedWrap())
break;
LLVM_FALLTHROUGH;
default:
NeedOF = true;
break;
}
break;
}
}
// See if we can use the EFLAGS value from the operand instead of
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
unsigned Opcode = 0;
unsigned NumOperands = 0;
SDValue ArithOp = Op;
// NOTICE: In the code below we use ArithOp to hold the arithmetic operation
// which may be the result of a CAST. We use the variable 'Op', which is the
// non-casted variable when we check for possible users.
switch (ArithOp.getOpcode()) {
case ISD::AND:
// If the primary 'and' result isn't used, don't bother using X86ISD::AND,
// because a TEST instruction will be better.
if (!hasNonFlagsUse(Op))
break;
LLVM_FALLTHROUGH;
case ISD::ADD:
case ISD::SUB:
case ISD::OR:
case ISD::XOR:
// Transform to an x86-specific ALU node with flags if there is a chance of
// using an RMW op or only the flags are used. Otherwise, leave
// the node alone and emit a 'test' instruction.
for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
if (UI->getOpcode() != ISD::CopyToReg &&
UI->getOpcode() != ISD::SETCC &&
UI->getOpcode() != ISD::STORE)
goto default_case;
// Otherwise use a regular EFLAGS-setting instruction.
switch (ArithOp.getOpcode()) {
default: llvm_unreachable("unexpected operator!");
case ISD::ADD: Opcode = X86ISD::ADD; break;
case ISD::SUB: Opcode = X86ISD::SUB; break;
case ISD::XOR: Opcode = X86ISD::XOR; break;
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: Opcode = X86ISD::OR; break;
}
NumOperands = 2;
break;
case X86ISD::ADD:
case X86ISD::SUB:
case X86ISD::OR:
case X86ISD::XOR:
case X86ISD::AND:
return SDValue(Op.getNode(), 1);
default:
default_case:
break;
}
if (Opcode == 0) {
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
return SDValue(New.getNode(), 1);
}
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
const SDLoc &dl, SelectionDAG &DAG) const {
if (isNullConstant(Op1))
return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
// Only promote the compare up to I32 if it is a 16 bit operation
// with an immediate. 16 bit immediates are to be avoided.
if (Op0.getValueType() == MVT::i16 &&
((isa<ConstantSDNode>(Op0) &&
!cast<ConstantSDNode>(Op0)->getAPIntValue().isSignedIntN(8)) ||
(isa<ConstantSDNode>(Op1) &&
!cast<ConstantSDNode>(Op1)->getAPIntValue().isSignedIntN(8))) &&
!DAG.getMachineFunction().getFunction().optForMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
}
// Use SUB instead of CMP to enable CSE between SUB and CMP.
SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
return SDValue(Sub.getNode(), 1);
}
assert(Op0.getValueType().isFloatingPoint() && "Unexpected VT!");
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
}
/// Convert a comparison if required by the subtarget.
SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SelectionDAG &DAG) const {
// If the subtarget does not support the FUCOMI instruction, floating-point
// comparisons have to be converted.
if (Subtarget.hasCMov() ||
Cmp.getOpcode() != X86ISD::CMP ||
!Cmp.getOperand(0).getValueType().isFloatingPoint() ||
!Cmp.getOperand(1).getValueType().isFloatingPoint())
return Cmp;
// The instruction selector will select an FUCOM instruction instead of
// FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
// build an SDNode sequence that transfers the result from FPSW into EFLAGS:
// (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
SDLoc dl(Cmp);
SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
DAG.getConstant(8, dl, MVT::i8));
SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
// Some 64-bit targets lack SAHF support, but they do support FCOMI.
assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}
/// Check if replacement of SQRT with RSQRT should be disabled.
bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
// We never want to use both SQRT and RSQRT instructions for the same input.
if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
return false;
if (VT.isVector())
return Subtarget.hasFastVectorFSQRT();
return Subtarget.hasFastScalarFSQRT();
}
/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
SelectionDAG &DAG, int Enabled,
int &RefinementSteps,
bool &UseOneConstNR,
bool Reciprocal) const {
EVT VT = Op.getValueType();
// SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
// It is likely not profitable to do this for f64 because a double-precision
// rsqrt estimate with refinement on x86 prior to FMA requires at least 16
// instructions: convert to single, rsqrtss, convert back to double, refine
// (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
// TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
// after legalize types.
if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
(VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
(VT == MVT::v8f32 && Subtarget.hasAVX()) ||
(VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = 1;
UseOneConstNR = false;
// There is no FSQRT for 512-bits, but there is RSQRT14.
unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
}
return SDValue();
}
/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
int Enabled,
int &RefinementSteps) const {
EVT VT = Op.getValueType();
// SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
// It is likely not profitable to do this for f64 because a double-precision
// reciprocal estimate with refinement on x86 prior to FMA requires
// 15 instructions: convert to single, rcpss, convert back to double, refine
// (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v8f32 && Subtarget.hasAVX()) ||
(VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
// Enable estimate codegen with 1 refinement step for vector division.
// Scalar division estimates are disabled because they break too much
// real-world code. These defaults are intended to match GCC behavior.
if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
return SDValue();
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = 1;
// There is no FSQRT for 512-bits, but there is RCP14.
unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
}
return SDValue();
}
/// If we have at least two divisions that use the same divisor, convert to
/// multiplication by a reciprocal. This may need to be adjusted for a given
/// CPU if a division's cost is not at least twice the cost of a multiplication.
/// This is because we still need one division to calculate the reciprocal and
/// then we need two multiplies by that reciprocal as replacements for the
/// original divisions.
unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
return 2;
}
/// Result of 'and' is compared against zero. Change to a BT node if possible.
/// Returns the BT node and the condition code needed to use it.
static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG,
SDValue &X86CC) {
assert(And.getOpcode() == ISD::AND && "Expected AND node!");
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
Op0 = Op0.getOperand(0);
if (Op1.getOpcode() == ISD::TRUNCATE)
Op1 = Op1.getOperand(0);
SDValue Src, BitNo;
if (Op1.getOpcode() == ISD::SHL)
std::swap(Op0, Op1);
if (Op0.getOpcode() == ISD::SHL) {
if (isOneConstant(Op0.getOperand(0))) {
// If we looked past a truncate, check that it's only truncating away
// known zeros.
unsigned BitWidth = Op0.getValueSizeInBits();
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
KnownBits Known = DAG.computeKnownBits(Op0);
if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
return SDValue();
}
Src = Op1;
BitNo = Op0.getOperand(1);
}
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
uint64_t AndRHSVal = AndRHS->getZExtValue();
SDValue AndLHS = Op0;
if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
Src = AndLHS.getOperand(0);
BitNo = AndLHS.getOperand(1);
} else {
// Use BT if the immediate can't be encoded in a TEST instruction or we
// are optimizing for size and the immedaite won't fit in a byte.
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
isPowerOf2_64(AndRHSVal)) {
Src = AndLHS;
BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
Src.getValueType());
}
}
}
// No patterns found, give up.
if (!Src.getNode())
return SDValue();
// If Src is i8, promote it to i32 with any_extend. There is no i8 BT
// instruction. Since the shift amount is in-range-or-undefined, we know
// that doing a bittest on the i32 value is ok. We extend to i32 because
// the encoding for the i16 version is larger than the i32 version.
// Also promote i16 to i32 for performance / code size reason.
if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
// See if we can use the 32-bit instruction instead of the 64-bit one for a
// shorter encoding. Since the former takes the modulo 32 of BitNo and the
// latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
// known to be zero.
if (Src.getValueType() == MVT::i64 &&
DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
// If the operand types disagree, extend the shift amount to match. Since
// BT ignores high bits (like shifts) we can use anyextend.
if (Src.getValueType() != BitNo.getValueType())
BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
dl, MVT::i8);
return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
}
/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
/// CMPs.
static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
SDValue &Op1) {
unsigned SSECC;
bool Swap = false;
// SSE Condition code mapping:
// 0 - EQ
// 1 - LT
// 2 - LE
// 3 - UNORD
// 4 - NEQ
// 5 - NLT
// 6 - NLE
// 7 - ORD
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETOEQ:
case ISD::SETEQ: SSECC = 0; break;
case ISD::SETOGT:
case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLT:
case ISD::SETOLT: SSECC = 1; break;
case ISD::SETOGE:
case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLE:
case ISD::SETOLE: SSECC = 2; break;
case ISD::SETUO: SSECC = 3; break;
case ISD::SETUNE:
case ISD::SETNE: SSECC = 4; break;
case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: SSECC = 5; break;
case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGT: SSECC = 6; break;
case ISD::SETO: SSECC = 7; break;
case ISD::SETUEQ: SSECC = 8; break;
case ISD::SETONE: SSECC = 12; break;
}
if (Swap)
std::swap(Op0, Op1);
return SSECC;
}
/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
/// concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
SDValue CC = Op.getOperand(2);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
// Issue the operation on the smaller types and concatenate the result back
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
}
static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
assert(VT.getVectorElementType() == MVT::i1 &&
"Cannot set masked compare for this operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
// If this is a seteq make sure any build vectors of all zeros are on the RHS.
// This helps with vptestm matching.
// TODO: Should we just canonicalize the setcc during DAG combine?
if ((SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE) &&
ISD::isBuildVectorAllZeros(Op0.getNode()))
std::swap(Op0, Op1);
// Prefer SETGT over SETLT.
if (SetCCOpcode == ISD::SETLT) {
SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
std::swap(Op0, Op1);
}
return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
}
/// Given a simple buildvector constant, return a new vector constant with each
/// element decremented. If decrementing would result in underflow or this
/// is not a simple vector constant, return an empty value.
static SDValue decrementVectorConstant(SDValue V, SelectionDAG &DAG) {
auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
if (!BV)
return SDValue();
MVT VT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
SmallVector<SDValue, 8> NewVecC;
SDLoc DL(V);
for (unsigned i = 0; i < NumElts; ++i) {
auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
return SDValue();
// Avoid underflow.
if (Elt->getAPIntValue().isNullValue())
return SDValue();
NewVecC.push_back(DAG.getConstant(Elt->getAPIntValue() - 1, DL, EltVT));
}
return DAG.getBuildVector(VT, DL, NewVecC);
}
/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
/// Op0 u<= Op1:
/// t = psubus Op0, Op1
/// pcmpeq t, <0..0>
static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
ISD::CondCode Cond, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (!Subtarget.hasSSE2())
return SDValue();
MVT VET = VT.getVectorElementType();
if (VET != MVT::i8 && VET != MVT::i16)
return SDValue();
switch (Cond) {
default:
return SDValue();
case ISD::SETULT: {
// If the comparison is against a constant we can turn this into a
// setule. With psubus, setule does not require a swap. This is
// beneficial because the constant in the register is no longer
// destructed as the destination so it can be hoisted out of a loop.
// Only do this pre-AVX since vpcmp* is no longer destructive.
if (Subtarget.hasAVX())
return SDValue();
SDValue ULEOp1 = decrementVectorConstant(Op1, DAG);
if (!ULEOp1)
return SDValue();
Op1 = ULEOp1;
break;
}
// Psubus is better than flip-sign because it requires no inversion.
case ISD::SETUGE:
std::swap(Op0, Op1);
break;
case ISD::SETULE:
break;
}
SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
DAG.getConstant(0, dl, VT));
}
static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
SDLoc dl(Op);
if (isFP) {
#ifndef NDEBUG
MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif
unsigned Opc;
if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
assert(VT.getVectorNumElements() <= 16);
Opc = X86ISD::CMPM;
} else {
Opc = X86ISD::CMPP;
// The SSE/AVX packed FP comparison nodes are defined with a
// floating-point vector result that matches the operand type. This allows
// them to work with an SSE1 target (integer vector types are not legal).
VT = Op0.getSimpleValueType();
}
// In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
// emit two comparisons and a logic op to tie them together.
SDValue Cmp;
unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
if (SSECC >= 8 && !Subtarget.hasAVX()) {
// LLVM predicate is SETUEQ or SETONE.
unsigned CC0, CC1;
unsigned CombineOpc;
if (Cond == ISD::SETUEQ) {
CC0 = 3; // UNORD
CC1 = 0; // EQ
CombineOpc = X86ISD::FOR;
} else {
assert(Cond == ISD::SETONE);
CC0 = 7; // ORD
CC1 = 4; // NEQ
CombineOpc = X86ISD::FAND;
}
SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC0, dl, MVT::i8));
SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC1, dl, MVT::i8));
Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
} else {
// Handle all other FP comparisons here.
Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(SSECC, dl, MVT::i8));
}
// If this is SSE/AVX CMPP, bitcast the result back to integer to match the
// result type of SETCC. The bitcast is expected to be optimized away
// during combining/isel.
if (Opc == X86ISD::CMPP)
Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
return Cmp;
}
MVT VTOp0 = Op0.getSimpleValueType();
assert(VTOp0 == Op1.getSimpleValueType() &&
"Expected operands with same type!");
assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
"Invalid number of packed elements for source and destination!");
// This is being called by type legalization because v2i32 is marked custom
// for result type legalization for v2f32.
if (VTOp0 == MVT::v2i32)
return SDValue();
// The non-AVX512 code below works under the assumption that source and
// destination types are the same.
assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
"Value types for source and destination must be the same!");
// Break 256-bit integer vector compare into smaller ones.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntVSETCC(Op, DAG);
// The result is boolean, but operands are int/float
if (VT.getVectorElementType() == MVT::i1) {
// In AVX-512 architecture setcc returns mask with i1 elements,
// But there is no compare instruction for i8 and i16 elements in KNL.
assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
"Unexpected operand type");
return LowerIntVSETCC_AVX512(Op, DAG);
}
// Lower using XOP integer comparisons.
if (VT.is128BitVector() && Subtarget.hasXOP()) {
// Translate compare code to XOP PCOM compare mode.
unsigned CmpMode = 0;
switch (Cond) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETULT:
case ISD::SETLT: CmpMode = 0x00; break;
case ISD::SETULE:
case ISD::SETLE: CmpMode = 0x01; break;
case ISD::SETUGT:
case ISD::SETGT: CmpMode = 0x02; break;
case ISD::SETUGE:
case ISD::SETGE: CmpMode = 0x03; break;
case ISD::SETEQ: CmpMode = 0x04; break;
case ISD::SETNE: CmpMode = 0x05; break;
}
// Are we comparing unsigned or signed integers?
unsigned Opc =
ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
return DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CmpMode, dl, MVT::i8));
}
// (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
// Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
SDValue BC0 = peekThroughBitcasts(Op0);
if (BC0.getOpcode() == ISD::AND) {
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (getTargetConstantBitsFromNode(BC0.getOperand(1),
VT.getScalarSizeInBits(), UndefElts,
EltBits, false, false)) {
if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
Cond = ISD::SETEQ;
Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
}
}
}
}
// If this is a SETNE against the signed minimum value, change it to SETGT.
// If this is a SETNE against the signed maximum value, change it to SETLT.
// which will be swapped to SETGT.
// Otherwise we use PCMPEQ+invert.
APInt ConstValue;
if (Cond == ISD::SETNE &&
ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
if (ConstValue.isMinSignedValue())
Cond = ISD::SETGT;
else if (ConstValue.isMaxSignedValue())
Cond = ISD::SETLT;
}
// If both operands are known non-negative, then an unsigned compare is the
// same as a signed compare and there's no need to flip signbits.
// TODO: We could check for more general simplifications here since we're
// computing known bits.
bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
!(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
// Special case: Use min/max operations for unsigned compares.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (ISD::isUnsignedIntSetCC(Cond) &&
(FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
TLI.isOperationLegal(ISD::UMIN, VT)) {
// If we have a constant operand, increment/decrement it and change the
// condition to avoid an invert.
// TODO: This could be extended to handle a non-splat constant by checking
// that each element of the constant is not the max/null value.
APInt C;
if (Cond == ISD::SETUGT && isConstantSplat(Op1, C) && !C.isMaxValue()) {
// X > C --> X >= (C+1) --> X == umax(X, C+1)
Op1 = DAG.getConstant(C + 1, dl, VT);
Cond = ISD::SETUGE;
}
if (Cond == ISD::SETULT && isConstantSplat(Op1, C) && !C.isNullValue()) {
// X < C --> X <= (C-1) --> X == umin(X, C-1)
Op1 = DAG.getConstant(C - 1, dl, VT);
Cond = ISD::SETULE;
}
bool Invert = false;
unsigned Opc;
switch (Cond) {
default: llvm_unreachable("Unexpected condition code");
case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETULE: Opc = ISD::UMIN; break;
case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: Opc = ISD::UMAX; break;
}
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
return Result;
}
// Try to use SUBUS and PCMPEQ.
if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
return V;
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
: X86ISD::PCMPGT;
bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
Cond == ISD::SETGE || Cond == ISD::SETUGE;
bool Invert = Cond == ISD::SETNE ||
(Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
if (Swap)
std::swap(Op0, Op1);
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
if (VT == MVT::v2i64) {
if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
assert(Subtarget.hasSSE2() && "Don't know how to lower!");
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations. The lower
// compare is always unsigned.
SDValue SB;
if (FlipSigns) {
SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
} else {
SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
}
Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
// Cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
Op1 = DAG.getBitcast(MVT::v4i32, Op1);
// Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
// Create masks for only the low parts/high parts of the 64 bit integers.
static const int MaskHi[] = { 1, 1, 3, 3 };
static const int MaskLo[] = { 0, 0, 2, 2 };
SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
if (Invert)
Result = DAG.getNOT(dl, Result, MVT::v4i32);
return DAG.getBitcast(VT, Result);
}
if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
// If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
// pcmpeqd + pshufd + pand.
assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
Op1 = DAG.getBitcast(MVT::v4i32, Op1);
// Do the compare.
SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
// Make sure the lower and upper halves are both all-ones.
static const int Mask[] = { 1, 0, 3, 2 };
SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
if (Invert)
Result = DAG.getNOT(dl, Result, MVT::v4i32);
return DAG.getBitcast(VT, Result);
}
}
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations.
if (FlipSigns) {
MVT EltVT = VT.getVectorElementType();
SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
VT);
Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
}
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
return Result;
}
// Try to select this as a KORTEST+SETCC if possible.
static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SDValue &X86CC) {
// Only support equality comparisons.
if (CC != ISD::SETEQ && CC != ISD::SETNE)
return SDValue();
// Must be a bitcast from vXi1.
if (Op0.getOpcode() != ISD::BITCAST)
return SDValue();
Op0 = Op0.getOperand(0);
MVT VT = Op0.getSimpleValueType();
if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
!(Subtarget.hasDQI() && VT == MVT::v8i1) &&
!(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
return SDValue();
X86::CondCode X86Cond;
if (isNullConstant(Op1)) {
X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
} else if (isAllOnesConstant(Op1)) {
// C flag is set for all ones.
X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
} else
return SDValue();
// If the input is an OR, we can combine it's operands into the KORTEST.
SDValue LHS = Op0;
SDValue RHS = Op0;
if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
LHS = Op0.getOperand(0);
RHS = Op0.getOperand(1);
}
X86CC = DAG.getConstant(X86Cond, dl, MVT::i8);
return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
}
/// Emit flags for the given setcc condition and operands. Also returns the
/// corresponding X86 condition code constant in X86CC.
SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
ISD::CondCode CC, const SDLoc &dl,
SelectionDAG &DAG,
SDValue &X86CC) const {
// Optimize to BT if possible.
// Lower (X & (1 << N)) == 0 to BT(X, N).
// Lower ((X >>u N) & 1) != 0 to BT(X, N).
// Lower ((X >>s N) & 1) != 0 to BT(X, N).
if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
return BT;
}
// Try to use PTEST for a tree ORs equality compared with 0.
// TODO: We could do AND tree with all 1s as well by using the C flag.
if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
return PTEST;
}
// Try to lower using KORTEST.
if (SDValue KORTEST = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
return KORTEST;
// Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
// these.
if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
// If the input is a setcc, then reuse the input setcc or use a new one with
// the inverted condition.
if (Op0.getOpcode() == X86ISD::SETCC) {
bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
X86CC = Op0.getOperand(0);
if (Invert) {
X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
X86CC = DAG.getConstant(CCode, dl, MVT::i8);
}
return Op0.getOperand(1);
}
}
bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
if (CondCode == X86::COND_INVALID)
return SDValue();
SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
X86CC = DAG.getConstant(CondCode, dl, MVT::i8);
return EFLAGS;
}
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDLoc dl(Op);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDValue X86CC;
SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
if (!EFLAGS)
return SDValue();
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
}
SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue Carry = Op.getOperand(2);
SDValue Cond = Op.getOperand(3);
SDLoc DL(Op);
assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
// Recreate the carry if needed.
EVT CarryVT = Carry.getValueType();
APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
Carry, DAG.getConstant(NegOne, DL, CarryVT));
SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
return getSETCC(CC, Cmp.getValue(1), DL, DAG);
}
// This function returns three things: the arithmetic computation itself
// (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
// flag and the condition code define the case in which the arithmetic
// computation overflows.
static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
assert(Op.getResNo() == 0 && "Unexpected result number!");
SDValue Value, Overflow;
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
unsigned BaseOp = 0;
SDLoc DL(Op);
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown ovf instruction!");
case ISD::SADDO:
BaseOp = X86ISD::ADD;
Cond = X86::COND_O;
break;
case ISD::UADDO:
BaseOp = X86ISD::ADD;
Cond = X86::COND_B;
break;
case ISD::SSUBO:
BaseOp = X86ISD::SUB;
Cond = X86::COND_O;
break;
case ISD::USUBO:
BaseOp = X86ISD::SUB;
Cond = X86::COND_B;
break;
case ISD::SMULO:
BaseOp = X86ISD::SMUL;
Cond = X86::COND_O;
break;
case ISD::UMULO:
BaseOp = X86ISD::UMUL;
Cond = X86::COND_O;
break;
}
if (BaseOp) {
// Also sets EFLAGS.
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
Overflow = Value.getValue(1);
}
return std::make_pair(Value, Overflow);
}
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
// Lower the "add/sub/mul with overflow" instruction into a regular ins plus
// a "setcc" instruction that checks the overflow flag. The "brcond" lowering
// looks for this combo and may remove the "setcc" instruction if the "setcc"
// has only one use.
SDLoc DL(Op);
X86::CondCode Cond;
SDValue Value, Overflow;
std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
}
/// Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
unsigned Opc = Op.getOpcode();
if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
Opc == X86ISD::SAHF)
return true;
if (Op.getResNo() == 1 &&
(Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
return true;
return false;
}
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
if (V.getOpcode() != ISD::TRUNCATE)
return false;
SDValue VOp0 = V.getOperand(0);
unsigned InBits = VOp0.getValueSizeInBits();
unsigned Bits = V.getValueSizeInBits();
return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
}
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool AddTest = true;
SDValue Cond = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
SDLoc DL(Op);
MVT VT = Op1.getSimpleValueType();
SDValue CC;
// Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
// are available or VBLENDV if AVX is available.
// Otherwise FP cmovs get lowered into a less efficient branch sequence later.
if (Cond.getOpcode() == ISD::SETCC &&
((Subtarget.hasSSE2() && VT == MVT::f64) ||
(Subtarget.hasSSE1() && VT == MVT::f32)) &&
VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
unsigned SSECC = translateX86FSETCC(
cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
if (Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
assert(!VT.isVector() && "Not a scalar type?");
return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
}
if (SSECC < 8 || Subtarget.hasAVX()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
DAG.getConstant(SSECC, DL, MVT::i8));
// If we have AVX, we can use a variable vector select (VBLENDV) instead
// of 3 logic instructions for size savings and potentially speed.
// Unfortunately, there is no scalar form of VBLENDV.
// If either operand is a +0.0 constant, don't try this. We can expect to
// optimize away at least one of the logic instructions later in that
// case, so that sequence would be faster than a variable blend.
// BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
// uses XMM0 as the selection register. That may need just as many
// instructions as the AND/ANDN/OR sequence due to register moves, so
// don't bother.
if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
!isNullFPConstant(Op2)) {
// Convert to vectors, do a VSELECT, and convert back to scalar.
// All of the conversions should be optimized away.
MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
VCmp = DAG.getBitcast(VCmpVT, VCmp);
SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
VSel, DAG.getIntPtrConstant(0, DL));
}
SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
}
}
// AVX512 fallback is to lower selects of scalar floats to masked moves.
if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
}
// For v64i1 without 64-bit support we need to split and rejoin.
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
assert(Subtarget.hasBWI() && "Expected BWI to be legal");
SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
SDValue Op1Scalar;
if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
Op1Scalar = Op1.getOperand(0);
SDValue Op2Scalar;
if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
Op2Scalar = Op2.getOperand(0);
if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
Op1Scalar, Op2Scalar);
if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
return DAG.getBitcast(VT, newSelect);
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
DAG.getIntPtrConstant(0, DL));
}
}
if (Cond.getOpcode() == ISD::SETCC) {
if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
Cond = NewCond;
// If the condition was updated, it's possible that the operands of the
// select were also updated (for example, EmitTest has a RAUW). Refresh
// the local references to the select operands in case they got stale.
Op1 = Op.getOperand(1);
Op2 = Op.getOperand(2);
}
}
// (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
// (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
// (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
// (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
// (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
// (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
if (Cond.getOpcode() == X86ISD::SETCC &&
Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
isNullConstant(Cond.getOperand(1).getOperand(1))) {
SDValue Cmp = Cond.getOperand(1);
unsigned CondCode =
cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
SDValue CmpOp0 = Cmp.getOperand(0);
// Apply further optimizations for special cases
// (select (x != 0), -1, 0) -> neg & sbb
// (select (x == 0), 0, -1) -> neg & sbb
if (isNullConstant(Y) &&
(isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
SDValue Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
Zero = DAG.getConstant(0, DL, Op.getValueType());
return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
}
Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
SDValue Res = // Res = 0 or -1.
DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
Res = DAG.getNOT(DL, Res, Res.getValueType());
if (!isNullConstant(Op2))
Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
return Res;
} else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
Cmp.getOperand(0).getOpcode() == ISD::AND &&
isOneConstant(Cmp.getOperand(0).getOperand(1))) {
SDValue CmpOp0 = Cmp.getOperand(0);
SDValue Src1, Src2;
// true if Op2 is XOR or OR operator and one of its operands
// is equal to Op1
// ( a , a op b) || ( b , a op b)
auto isOrXorPattern = [&]() {
if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
(Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
Src1 =
Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
Src2 = Op1;
return true;
}
return false;
};
if (isOrXorPattern()) {
SDValue Neg;
unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
// we need mask of all zeros or ones with same size of the other
// operands.
if (CmpSz > VT.getSizeInBits())
Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
else if (CmpSz < VT.getSizeInBits())
Neg = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
DAG.getConstant(1, DL, VT));
else
Neg = CmpOp0;
SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
Neg); // -(and (x, 0x1))
SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
}
}
}
// Look past (and (setcc_carry (cmp ...)), 1).
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
isOneConstant(Cond.getOperand(1)))
Cond = Cond.getOperand(0);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
unsigned CondOpcode = Cond.getOpcode();
if (CondOpcode == X86ISD::SETCC ||
CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
MVT VT = Op.getSimpleValueType();
bool IllegalFPCMov = false;
if (VT.isFloatingPoint() && !VT.isVector() &&
!isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
Opc == X86ISD::BT) { // FIXME
Cond = Cmp;
AddTest = false;
}
} else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
SDValue Value;
X86::CondCode X86Cond;
std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
CC = DAG.getConstant(X86Cond, DL, MVT::i8);
AddTest = false;
}
if (AddTest) {
// Look past the truncate if the high bits are known zero.
if (isTruncWithZeroHighBitsInput(Cond, DAG))
Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
SDValue BTCC;
if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
CC = BTCC;
Cond = BT;
AddTest = false;
}
}
}
if (AddTest) {
CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
X86::COND_NE, DL, DAG);
}
// a < b ? -1 : 0 -> RES = ~setcc_carry
// a < b ? 0 : -1 -> RES = setcc_carry
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
Cond = ConvertCmpIfNecessary(Cond, DAG);
unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(isNullConstant(Op1) || isNullConstant(Op2))) {
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getConstant(X86::COND_B, DL, MVT::i8),
Cond);
if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
return DAG.getNOT(DL, Res, Res.getValueType());
return Res;
}
}
// X86 doesn't have an i8 cmov. If both operands are the result of a truncate
// widen the cmov and push the truncate through. This avoids introducing a new
// branch during isel and doesn't add any extensions.
if (Op.getValueType() == MVT::i8 &&
Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
if (T1.getValueType() == T2.getValueType() &&
// Blacklist CopyFromReg to avoid partial register stalls.
T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
CC, Cond);
return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
}
}
// Promote i16 cmovs if it won't prevent folding a load.
if (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) && !MayFoldLoad(Op2)) {
Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
SDValue Ops[] = { Op2, Op1, CC, Cond };
SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
}
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
SDValue Ops[] = { Op2, Op1, CC, Cond };
return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
}
static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
MVT VTElt = VT.getVectorElementType();
SDLoc dl(Op);
unsigned NumElts = VT.getVectorNumElements();
// Extend VT if the scalar type is i8/i16 and BWI is not supported.
MVT ExtVT = VT;
if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
// If v16i32 is to be avoided, we'll need to split and concatenate.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
}
// Widen to 512-bits if VLX is not supported.
MVT WideVT = ExtVT;
if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
NumElts *= 512 / ExtVT.getSizeInBits();
InVT = MVT::getVectorVT(MVT::i1, NumElts);
In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
In, DAG.getIntPtrConstant(0, dl));
WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
}
SDValue V;
MVT WideEltVT = WideVT.getVectorElementType();
if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
(Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
} else {
SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
SDValue Zero = DAG.getConstant(0, dl, WideVT);
V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
}
// Truncate if we had to extend i16/i8 above.
if (VT != ExtVT) {
WideVT = MVT::getVectorVT(VTElt, NumElts);
V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
}
// Extract back to 128/256-bit if we widened.
if (WideVT != VT)
V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
DAG.getIntPtrConstant(0, dl));
return V;
}
static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
if (InVT.getVectorElementType() == MVT::i1)
return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
return LowerAVXExtend(Op, DAG, Subtarget);
}
// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
// For sign extend this needs to handle all vector sizes and SSE4.1 and
// non-SSE4.1 targets. For zero extend this should only handle inputs of
// MVT::v64i8 when BWI is not supported, but AVX512 is.
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT VT = Op->getSimpleValueType(0);
MVT InVT = In.getSimpleValueType();
MVT SVT = VT.getVectorElementType();
MVT InSVT = InVT.getVectorElementType();
assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
!(VT.is256BitVector() && Subtarget.hasAVX()) &&
!(VT.is512BitVector() && Subtarget.hasAVX512()))
return SDValue();
SDLoc dl(Op);
unsigned Opc = Op.getOpcode();
unsigned NumElts = VT.getVectorNumElements();
// For 256-bit vectors, we only need the lower (128-bit) half of the input.
// For 512-bit vectors, we need 128-bits or 256-bits.
if (InVT.getSizeInBits() > 128) {
// Input needs to be at least the same number of elements as output, and
// at least 128-bits.
int InSize = InSVT.getSizeInBits() * NumElts;
In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
InVT = In.getSimpleValueType();
}
// SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
// so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
// need to be handled here for 256/512-bit results.
if (Subtarget.hasInt256()) {
assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
if (InVT.getVectorNumElements() != NumElts)
return DAG.getNode(Op.getOpcode(), dl, VT, In);
// FIXME: Apparently we create inreg operations that could be regular
// extends.
unsigned ExtOpc =
Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND;
return DAG.getNode(ExtOpc, dl, VT, In);
}
// pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
if (Subtarget.hasAVX()) {
assert(VT.is256BitVector() && "256-bit vector expected");
int HalfNumElts = NumElts / 2;
MVT HalfVT = MVT::getVectorVT(SVT, HalfNumElts);
unsigned NumSrcElts = InVT.getVectorNumElements();
SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
for (int i = 0; i != HalfNumElts; ++i)
HiMask[i] = HalfNumElts + i;
SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
}
// We should only get here for sign extend.
assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
// pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
SDValue Curr = In;
SDValue SignExt = Curr;
// As SRAI is only available on i16/i32 types, we expand only up to i32
// and handle i64 separately.
if (InVT != MVT::v4i32) {
MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
unsigned DestWidth = DestVT.getScalarSizeInBits();
unsigned Scale = DestWidth / InSVT.getSizeInBits();
unsigned InNumElts = InVT.getVectorNumElements();
unsigned DestElts = DestVT.getVectorNumElements();
// Build a shuffle mask that takes each input element and places it in the
// MSBs of the new element size.
SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
for (unsigned i = 0; i != DestElts; ++i)
Mask[i * Scale + (Scale - 1)] = i;
Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
Curr = DAG.getBitcast(DestVT, Curr);
unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
DAG.getConstant(SignExtShift, dl, MVT::i8));
}
if (VT == MVT::v2i64) {
assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
SignExt = DAG.getBitcast(VT, SignExt);
}
return SignExt;
}
static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc dl(Op);
if (InVT.getVectorElementType() == MVT::i1)
return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements");
assert((VT.getVectorElementType() == MVT::i16 ||
VT.getVectorElementType() == MVT::i32 ||
VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type");
assert((InVT.getVectorElementType() == MVT::i8 ||
InVT.getVectorElementType() == MVT::i16 ||
InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type");
// Custom legalize v8i8->v8i64 on CPUs without avx512bw.
if (InVT == MVT::v8i8) {
if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
return SDValue();
In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
}
if (Subtarget.hasInt256())
return Op;
// Optimize vectors in AVX mode
// Sign extend v8i16 to v8i32 and
// v4i32 to v4i64
//
// Divide input vector into two parts
// for v4i32 the high shuffle mask will be {2, 3, -1, -1}
// use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
// concat the vectors to original VT
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
unsigned NumElems = InVT.getVectorNumElements();
SmallVector<int,8> ShufMask(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask[i] = i + NumElems/2;
SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
SDLoc dl(St);
SDValue StoredVal = St->getValue();
// Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
if (StoredVal.getValueType().isVector() &&
StoredVal.getValueType().getVectorElementType() == MVT::i1) {
assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
"Unexpected VT");
assert(!St->isTruncatingStore() && "Expected non-truncating store");
assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI");
StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
DAG.getUNDEF(MVT::v16i1), StoredVal,
DAG.getIntPtrConstant(0, dl));
StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
if (St->isTruncatingStore())
return SDValue();
MVT StoreVT = StoredVal.getSimpleValueType();
assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
"Unexpected VT");
if (DAG.getTargetLoweringInfo().getTypeAction(*DAG.getContext(), StoreVT) !=
TargetLowering::TypeWidenVector)
return SDValue();
// Widen the vector, cast to a v2x64 type, extract the single 64-bit element
// and store it.
MVT WideVT = MVT::getVectorVT(StoreVT.getVectorElementType(),
StoreVT.getVectorNumElements() * 2);
StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
DAG.getUNDEF(StoreVT));
MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
MVT CastVT = MVT::getVectorVT(StVT, 2);
StoredVal = DAG.getBitcast(CastVT, StoredVal);
StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
DAG.getIntPtrConstant(0, dl));
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// Lower vector extended loads using a shuffle. If SSSE3 is not available we
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
// we'll emit a shuffle and a arithmetic shift.
// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT RegVT = Op.getSimpleValueType();
assert(RegVT.isVector() && "We only custom lower vector loads.");
assert(RegVT.isInteger() &&
"We only custom lower integer vector loads.");
LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
SDLoc dl(Ld);
EVT MemVT = Ld->getMemoryVT();
// Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
if (RegVT.getVectorElementType() == MVT::i1) {
assert(EVT(RegVT) == MemVT && "Expected non-extending load");
assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI");
SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
// Replace chain users with the new chain.
assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
DAG.getBitcast(MVT::v16i1, Val),
DAG.getIntPtrConstant(0, dl));
return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
}
// Nothing useful we can do without SSE2 shuffles.
assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned RegSz = RegVT.getSizeInBits();
ISD::LoadExtType Ext = Ld->getExtensionType();
assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
&& "Only anyext and sext are currently implemented.");
assert(MemVT != RegVT && "Cannot extend to the same type");
assert(MemVT.isVector() && "Must load a vector from memory");
unsigned NumElems = RegVT.getVectorNumElements();
unsigned MemSz = MemVT.getSizeInBits();
assert(RegSz > MemSz && "Register size must be greater than the mem size");
if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
// The only way in which we have a legal 256-bit vector result but not the
// integer 256-bit operations needed to directly lower a sextload is if we
// have AVX1 but not AVX2. In that case, we can always emit a sextload to
// a 128-bit vector and a normal sign_extend to 256-bits that should get
// correctly legalized. We do this late to allow the canonical form of
// sextload to persist throughout the rest of the DAG combiner -- it wants
// to fold together any extensions it can, and so will fuse a sign_extend
// of an sextload into a sextload targeting a wider value.
SDValue Load;
if (MemSz == 128) {
// Just switch this to a normal load.
assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
"it must be a legal 128-bit vector "
"type!");
Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
} else {
assert(MemSz < 128 &&
"Can't extend a type wider than 128 bits to a 256 bit vector!");
// Do an sext load to a 128-bit vector type. We want to use the same
// number of elements, but elements half as wide. This will end up being
// recursively lowered by this routine, but will succeed as we definitely
// have all the necessary features if we're using AVX1.
EVT HalfEltVT =
EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
Load =
DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
}
// Replace chain users with the new chain.
assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
// Finally, do a normal sign-extend to the desired register.
SDValue SExt = DAG.getSExtOrTrunc(Load, dl, RegVT);
return DAG.getMergeValues({SExt, Load.getValue(1)}, dl);
}
// All sizes must be a power of two.
assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
"Non-power-of-two elements are not custom lowered!");
// Attempt to load the original value using scalar loads.
// Find the largest scalar type that divides the total loaded size.
MVT SclrLoadTy = MVT::i8;
for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
SclrLoadTy = Tp;
}
}
// On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
(64 <= MemSz))
SclrLoadTy = MVT::f64;
// Calculate the number of scalar loads that we need to perform
// in order to load our vector from memory.
unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
"Can only lower sext loads with a single scalar load!");
unsigned loadRegSize = RegSz;
if (Ext == ISD::SEXTLOAD && RegSz >= 256)
loadRegSize = 128;
// If we don't have BWI we won't be able to create the shuffle needed for
// v8i8->v8i64.
if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
MemVT == MVT::v8i8)
loadRegSize = 128;
// Represent our vector as a sequence of elements which are the
// largest scalar that we can load.
EVT LoadUnitVecVT = EVT::getVectorVT(
*DAG.getContext(), SclrLoadTy, loadRegSize / SclrLoadTy.getSizeInBits());
// Represent the data using the same element type that is stored in
// memory. In practice, we ''widen'' MemVT.
EVT WideVecVT =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
loadRegSize / MemVT.getScalarSizeInBits());
assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
"Invalid vector type");
// We can't shuffle using an illegal type.
assert(TLI.isTypeLegal(WideVecVT) &&
"We only lower types that form legal widened vector types");
SmallVector<SDValue, 8> Chains;
SDValue Ptr = Ld->getBasePtr();
unsigned OffsetInc = SclrLoadTy.getSizeInBits() / 8;
SDValue Increment = DAG.getConstant(OffsetInc, dl,
TLI.getPointerTy(DAG.getDataLayout()));
SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
unsigned Offset = 0;
for (unsigned i = 0; i < NumLoads; ++i) {
unsigned NewAlign = MinAlign(Ld->getAlignment(), Offset);
// Perform a single load.
SDValue ScalarLoad =
DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr,
Ld->getPointerInfo().getWithOffset(Offset),
NewAlign, Ld->getMemOperand()->getFlags());
Chains.push_back(ScalarLoad.getValue(1));
// Create the first element type using SCALAR_TO_VECTOR in order to avoid
// another round of DAGCombining.
if (i == 0)
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
else
Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
ScalarLoad, DAG.getIntPtrConstant(i, dl));
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
Offset += OffsetInc;
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
// Bitcast the loaded value to a vector of the original element type, in
// the size of the target vector type.
SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
unsigned SizeRatio = RegSz / MemSz;
if (Ext == ISD::SEXTLOAD) {
SDValue Sext = getExtendInVec(/*Signed*/true, dl, RegVT, SlicedVec, DAG);
return DAG.getMergeValues({Sext, TF}, dl);
}
if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
MemVT == MVT::v8i8) {
SDValue Sext = getExtendInVec(/*Signed*/false, dl, RegVT, SlicedVec, DAG);
return DAG.getMergeValues({Sext, TF}, dl);
}
// Redistribute the loaded elements into the different locations.
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i * SizeRatio] = i;
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
DAG.getUNDEF(WideVecVT), ShuffleVec);
// Bitcast to the requested type.
Shuff = DAG.getBitcast(RegVT, Shuff);
return DAG.getMergeValues({Shuff, TF}, dl);
}
/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
/// each of which has no other use apart from the AND / OR.
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Opc = Op.getOpcode();
if (Opc != ISD::OR && Opc != ISD::AND)
return false;
return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
Op.getOperand(0).hasOneUse() &&
Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
Op.getOperand(1).hasOneUse());
}
/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
/// SETCC node has a single use.
static bool isXor1OfSetCC(SDValue Op) {
if (Op.getOpcode() != ISD::XOR)
return false;
if (isOneConstant(Op.getOperand(1)))
return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
Op.getOperand(0).hasOneUse();
return false;
}
SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Chain = Op.getOperand(0);
SDValue Cond = Op.getOperand(1);
SDValue Dest = Op.getOperand(2);
SDLoc dl(Op);
SDValue CC;
bool Inverted = false;
if (Cond.getOpcode() == ISD::SETCC) {
// Check for setcc([su]{add,sub,mul}o == 0).
if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
isNullConstant(Cond.getOperand(1)) &&
Cond.getOperand(0).getResNo() == 1 &&
(Cond.getOperand(0).getOpcode() == ISD::SADDO ||
Cond.getOperand(0).getOpcode() == ISD::UADDO ||
Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
Cond.getOperand(0).getOpcode() == ISD::USUBO ||
Cond.getOperand(0).getOpcode() == ISD::SMULO ||
Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
Inverted = true;
Cond = Cond.getOperand(0);
} else {
if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
}
#if 0
// FIXME: LowerXALUO doesn't handle these!!
else if (Cond.getOpcode() == X86ISD::ADD ||
Cond.getOpcode() == X86ISD::SUB ||
Cond.getOpcode() == X86ISD::SMUL ||
Cond.getOpcode() == X86ISD::UMUL)
Cond = LowerXALUO(Cond, DAG);
#endif
// Look pass (and (setcc_carry (cmp ...)), 1).
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
isOneConstant(Cond.getOperand(1)))
Cond = Cond.getOperand(0);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
unsigned CondOpcode = Cond.getOpcode();
if (CondOpcode == X86ISD::SETCC ||
CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
// FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
Cond = Cmp;
addTest = false;
} else {
switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
default: break;
case X86::COND_O:
case X86::COND_B:
// These can only come from an arithmetic instruction with overflow,
// e.g. SADDO, UADDO.
Cond = Cond.getOperand(1);
addTest = false;
break;
}
}
}
CondOpcode = Cond.getOpcode();
if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
SDValue Value;
X86::CondCode X86Cond;
std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
if (Inverted)
X86Cond = X86::GetOppositeBranchCondition(X86Cond);
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
addTest = false;
} else {
unsigned CondOpc;
if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
SDValue Cmp = Cond.getOperand(0).getOperand(1);
if (CondOpc == ISD::OR) {
// Also, recognize the pattern generated by an FCMP_UNE. We can emit
// two branches instead of an explicit OR instruction with a
// separate test.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
isX86LogicalCmp(Cmp)) {
CC = Cond.getOperand(0).getOperand(0);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = Cond.getOperand(1).getOperand(0);
Cond = Cmp;
addTest = false;
}
} else { // ISD::AND
// Also, recognize the pattern generated by an FCMP_OEQ. We can emit
// two branches instead of an explicit AND instruction with a
// separate test. However, we only do this if this block doesn't
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
isX86LogicalCmp(Cmp) &&
Op.getNode()->hasOneUse()) {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
if (User->getOpcode() == ISD::BR) {
SDValue FalseBB = User->getOperand(1);
SDNode *NewBR =
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
(void)NewBR;
Dest = FalseBB;
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
}
}
} else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
// Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
// It should be transformed during dag combiner except when the condition
// is set by a arithmetics with overflow node.
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cond.getOperand(0).getOperand(1);
addTest = false;
} else if (Cond.getOpcode() == ISD::SETCC &&
cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
// For FCMP_OEQ, we can emit
// two branches instead of an explicit AND instruction with a
// separate test. However, we only do this if this block doesn't
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Op.getNode()->hasOneUse()) {
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
if (User->getOpcode() == ISD::BR) {
SDValue FalseBB = User->getOperand(1);
SDNode *NewBR =
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
(void)NewBR;
Dest = FalseBB;
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
}
} else if (Cond.getOpcode() == ISD::SETCC &&
cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
// For FCMP_UNE, we can emit
// two branches instead of an explicit OR instruction with a
// separate test.
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
}
if (addTest) {
// Look pass the truncate if the high bits are known zero.
if (isTruncWithZeroHighBitsInput(Cond, DAG))
Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
SDValue BTCC;
if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
CC = BTCC;
Cond = BT;
addTest = false;
}
}
}
if (addTest) {
X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
X86Cond, dl, DAG);
}
Cond = ConvertCmpIfNecessary(Cond, DAG);
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cond);
}
// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
// Calls to _alloca are needed to probe the stack when allocating more than 4k
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
// that the guard pages used by the OS virtual memory manager are allocated in
// correct sequence.
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool SplitStack = MF.shouldSplitStack();
bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
SplitStack || EmitStackProbe;
SDLoc dl(Op);
// Get the inputs.
SDNode *Node = Op.getNode();
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
EVT VT = Node->getValueType(0);
// Chain the dynamic stack allocation so that it doesn't modify the stack
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
bool Is64Bit = Subtarget.is64Bit();
MVT SPTy = getPointerTy(DAG.getDataLayout());
SDValue Result;
if (!Lower) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment();
Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
Result = DAG.getNode(ISD::AND, dl, VT, Result,
DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
} else if (SplitStack) {
MachineRegisterInfo &MRI = MF.getRegInfo();
if (Is64Bit) {
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
const Function &F = MF.getFunction();
for (const auto &A : F.args()) {
if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
}
}
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
if (Align) {
SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
}
Result = SP;
}
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
SDValue Ops[2] = {Result, Chain};
return DAG.getMergeValues(Ops, dl);
}
SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
auto PtrVT = getPointerTy(MF.getDataLayout());
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDLoc DL(Op);
if (!Subtarget.is64Bit() ||
Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
MachinePointerInfo(SV));
}
// __va_list_tag:
// gp_offset (0 - 6 * 8)
// fp_offset (48 - 48 + 8 * 16)
// overflow_arg_area (point to parameters coming in memory).
// reg_save_area
SmallVector<SDValue, 8> MemOps;
SDValue FIN = Op.getOperand(1);
// Store gp_offset
SDValue Store = DAG.getStore(
Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
MachinePointerInfo(SV));
MemOps.push_back(Store);
// Store fp_offset
FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
Store = DAG.getStore(
Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
MachinePointerInfo(SV, 4));
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
Store =
DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
MemOps.push_back(Store);
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
Store = DAG.getStore(
Op.getOperand(0), DL, RSFIN, FIN,
MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget.is64Bit() &&
"LowerVAARG only handles 64-bit va_arg!");
assert(Op.getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
SDValue Chain = Op.getOperand(0);
SDValue SrcPtr = Op.getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
unsigned Align = Op.getConstantOperandVal(3);
SDLoc dl(Op);
EVT ArgVT = Op.getNode()->getValueType(0);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
uint8_t ArgMode;
// Decide which area this value should be read from.
// TODO: Implement the AMD64 ABI in its entirety. This simple
// selection mechanism works only for the basic types.
if (ArgVT == MVT::f80) {
llvm_unreachable("va_arg for f80 not yet implemented");
} else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
} else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
} else {
llvm_unreachable("Unhandled argument type in LowerVAARG");
}
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!Subtarget.useSoftFloat() &&
!(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
Subtarget.hasSSE1());
}
// Insert VAARG_64 node into the DAG
// VAARG_64 returns two values: Variable Argument Address, Chain
SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
DAG.getConstant(ArgMode, dl, MVT::i8),
DAG.getConstant(Align, dl, MVT::i32)};
SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode(
X86ISD::VAARG_64, dl,
VTs, InstOps, MVT::i64,
MachinePointerInfo(SV),
/*Align=*/0,
MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
Chain = VAARG.getValue(1);
// Load the next argument and return it
return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
}
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
// where a va_list is still an i8*.
assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
if (Subtarget.isCallingConvWin64(
DAG.getMachineFunction().getFunction().getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
SDValue Chain = Op.getOperand(0);
SDValue DstPtr = Op.getOperand(1);
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
false, false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
switch (Opc) {
case ISD::SHL:
case X86ISD::VSHL:
case X86ISD::VSHLI:
return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
case ISD::SRL:
case X86ISD::VSRL:
case X86ISD::VSRLI:
return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
case ISD::SRA:
case X86ISD::VSRA:
case X86ISD::VSRAI:
return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
}
llvm_unreachable("Unknown target vector shift node");
}
/// Handle vector element shifts where the shift amount is a constant.
/// Takes immediate version of shift as input.
static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, uint64_t ShiftAmt,
SelectionDAG &DAG) {
MVT ElementType = VT.getVectorElementType();
// Bitcast the source vector to the output type, this is mainly necessary for
// vXi8/vXi64 shifts.
if (VT != SrcOp.getSimpleValueType())
SrcOp = DAG.getBitcast(VT, SrcOp);
// Fold this packed shift into its first operand if ShiftAmt is 0.
if (ShiftAmt == 0)
return SrcOp;
// Check for ShiftAmt >= element width
if (ShiftAmt >= ElementType.getSizeInBits()) {
if (Opc == X86ISD::VSRAI)
ShiftAmt = ElementType.getSizeInBits() - 1;
else
return DAG.getConstant(0, dl, VT);
}
assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
&& "Unknown target vector shift-by-constant node");
// Fold this packed vector shift into a build vector if SrcOp is a
// vector of Constants or UNDEFs.
if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
SmallVector<SDValue, 8> Elts;
unsigned NumElts = SrcOp->getNumOperands();
ConstantSDNode *ND;
switch(Opc) {
default: llvm_unreachable("Unknown opcode!");
case X86ISD::VSHLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRAI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
}
break;
}
return DAG.getBuildVector(VT, dl, Elts);
}
return DAG.getNode(Opc, dl, VT, SrcOp,
DAG.getConstant(ShiftAmt, dl, MVT::i8));
}
/// Handle vector element shifts where the shift amount may or may not be a
/// constant. Takes immediate version of shift as input.
static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT SVT = ShAmt.getSimpleValueType();
assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
// Catch shift-by-constant.
if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
CShAmt->getZExtValue(), DAG);
// Change opcode to non-immediate version.
Opc = getTargetVShiftUniformOpcode(Opc, true);
// Need to build a vector containing shift amount.
// SSE/AVX packed shifts only use the lower 64-bit of the shift count.
// +====================+============+=======================================+
// | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
// +====================+============+=======================================+
// | i64 | Yes, No | Use ShAmt as lowest elt |
// | i32 | Yes | zero-extend in-reg |
// | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
// | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
// | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
// +====================+============+=======================================+
if (SVT == MVT::i64)
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
(ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
ShAmt = ShAmt.getOperand(0);
MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
if (Subtarget.hasSSE41())
ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
MVT::v2i64, ShAmt);
else {
SDValue ByteShift = DAG.getConstant(
(128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
ByteShift);
ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
ByteShift);
}
} else if (Subtarget.hasSSE41() &&
ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
MVT::v2i64, ShAmt);
} else {
SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
DAG.getUNDEF(SVT)};
ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
}
// The return type has to be a 128-bit type with the same element
// type as the input type.
MVT EltVT = VT.getVectorElementType();
MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
ShAmt = DAG.getBitcast(ShVT, ShAmt);
return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
}
/// Return Mask with the necessary casting or extending
/// for \p Mask according to \p MaskVT when lowering masking intrinsics
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl) {
if (isAllOnesConstant(Mask))
return DAG.getConstant(1, dl, MaskVT);
if (X86::isZeroNode(Mask))
return DAG.getConstant(0, dl, MaskVT);
assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
// In case 32bit mode, bitcast i64 is illegal, extend/split it.
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
DAG.getConstant(0, dl, MVT::i32));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
DAG.getConstant(1, dl, MVT::i32));
Lo = DAG.getBitcast(MVT::v32i1, Lo);
Hi = DAG.getBitcast(MVT::v32i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
} else {
MVT BitcastVT = MVT::getVectorVT(MVT::i1,
Mask.getSimpleValueType().getSizeInBits());
// In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
// are extracted by EXTRACT_SUBVECTOR.
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getBitcast(BitcastVT, Mask),
DAG.getIntPtrConstant(0, dl));
}
}
/// Return (and \p Op, \p Mask) for compare instructions or
/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
unsigned OpcodeSelect = ISD::VSELECT;
SDLoc dl(Op);
if (isAllOnesConstant(Mask))
return Op;
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}
/// Creates an SDNode for a predicated scalar operation.
/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
/// The mask is coming as MVT::i8 and it should be transformed
/// to MVT::v1i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
/// "X86select" instead of "vselect". We just can't create the "vselect" node
/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
if (MaskConst->getZExtValue() & 0x1)
return Op;
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
DAG.getBitcast(MVT::v8i1, Mask),
DAG.getIntPtrConstant(0, dl));
if (Op.getOpcode() == X86ISD::FSETCCM ||
Op.getOpcode() == X86ISD::FSETCCM_RND ||
Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
}
static int getSEHRegistrationNodeSize(const Function *Fn) {
if (!Fn->hasPersonalityFn())
report_fatal_error(
"querying registration node size for function without personality");
// The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
// WinEHStatePass for the full struct definition.
switch (classifyEHPersonality(Fn->getPersonalityFn())) {
case EHPersonality::MSVC_X86SEH: return 24;
case EHPersonality::MSVC_CXX: return 16;
default: break;
}
report_fatal_error(
"can only recover FP for 32-bit MSVC EH personality functions");
}
/// When the MSVC runtime transfers control to us, either to an outlined
/// function or when returning to a parent frame after catching an exception, we
/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
/// Here's the math:
/// RegNodeBase = EntryEBP - RegNodeSize
/// ParentFP = RegNodeBase - ParentFrameOffset
/// Subtracting RegNodeSize takes us to the offset of the registration node, and
/// subtracting the offset (negative on x86) takes us back to the parent FP.
static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
SDValue EntryEBP) {
MachineFunction &MF = DAG.getMachineFunction();
SDLoc dl;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
// It's possible that the parent function no longer has a personality function
// if the exceptional code was optimized away, in which case we just return
// the incoming EBP.
if (!Fn->hasPersonalityFn())
return EntryEBP;
// Get an MCSymbol that will ultimately resolve to the frame offset of the EH
// registration, or the .set_setframe offset.
MCSymbol *OffsetSym =
MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
GlobalValue::dropLLVMManglingEscape(Fn->getName()));
SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
SDValue ParentFrameOffset =
DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
// Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
// prologue to RBP in the parent function.
const X86Subtarget &Subtarget =
static_cast<const X86Subtarget &>(DAG.getSubtarget());
if (Subtarget.is64Bit())
return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
int RegNodeSize = getSEHRegistrationNodeSize(Fn);
// RegNodeBase = EntryEBP - RegNodeSize
// ParentFP = RegNodeBase - ParentFrameOffset
SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
DAG.getConstant(RegNodeSize, dl, PtrVT));
return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
}
SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
// Helper to detect if the operand is CUR_DIRECTION rounding mode.
auto isRoundModeCurDirection = [](SDValue Rnd) {
if (!isa<ConstantSDNode>(Rnd))
return false;
unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
return Round == X86::STATIC_ROUNDING::CUR_DIRECTION;
};
SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
MVT VT = Op.getSimpleValueType();
const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
if (IntrData) {
switch(IntrData->Type) {
case INTR_TYPE_1OP: {
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(2);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
Op.getOperand(1), Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
}
case INTR_TYPE_2OP: {
SDValue Src2 = Op.getOperand(2);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(3);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
Op.getOperand(1), Src2, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), Src2);
}
case INTR_TYPE_3OP:
case INTR_TYPE_3OP_IMM8: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
if (IntrData->Type == INTR_TYPE_3OP_IMM8)
Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Src1, Src2, Src3);
}
case INTR_TYPE_4OP:
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
case INTR_TYPE_1OP_MASK_RM: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue RoundingMode;
// We always add rounding mode to the Node.
// If the rounding mode is not specified, we add the
// "current direction" mode.
if (Op.getNumOperands() == 4)
RoundingMode =
DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
else
RoundingMode = Op.getOperand(4);
assert(IntrData->Opc1 == 0 && "Unexpected second opcode!");
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
RoundingMode),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_1OP_MASK: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
// We add rounding mode to the Node when
// - RM Opcode is specified and
// - RM is not "current direction".
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_SCALAR_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue passThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
// There are 2 kinds of intrinsics in this group:
// (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
// (2) With rounding mode and sae - 7 operands.
bool HasRounding = IntrWithRoundingModeOpcode != 0;
if (Op.getNumOperands() == (5U + HasRounding)) {
if (HasRounding) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2, Rnd),
Mask, passThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2),
Mask, passThru, Subtarget, DAG);
}
assert(Op.getNumOperands() == (6U + HasRounding) &&
"Unexpected intrinsic form");
SDValue RoundingMode = Op.getOperand(5);
if (HasRounding) {
SDValue Sae = Op.getOperand(6);
if (!isRoundModeCurDirection(Sae))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2,
RoundingMode, Sae),
Mask, passThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2, RoundingMode),
Mask, passThru, Subtarget, DAG);
}
case INTR_TYPE_SCALAR_MASK_RM: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src0 = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// There are 2 kinds of intrinsics in this group:
// (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
// (2) With rounding mode and sae - 7 operands.
if (Op.getNumOperands() == 6) {
SDValue Sae = Op.getOperand(5);
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
Sae),
Mask, Src0, Subtarget, DAG);
}
assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
SDValue RoundingMode = Op.getOperand(5);
SDValue Sae = Op.getOperand(6);
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
RoundingMode, Sae),
Mask, Src0, Subtarget, DAG);
}
case INTR_TYPE_2OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
// TODO: Intrinsics should have fast-math-flags to propagate.
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_2OP_MASK_RM: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// We specify 2 possible modes for intrinsics, with/without rounding
// modes.
// First, we check if the intrinsic have rounding mode (6 operands),
// if not, we set rounding mode to "current".
SDValue Rnd;
if (Op.getNumOperands() == 6)
Rnd = Op.getOperand(5);
else
Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Rnd),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_3OP_SCALAR_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue PassThru = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(6);
if (!isRoundModeCurDirection(Rnd))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2, Src3, Rnd),
Mask, PassThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_3OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue PassThru = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(6);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case VPERM_2OP : {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
// Swap Src1 and Src2 in the node creation
return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
}
case IFMA_OP:
// NOTE: We need to swizzle the operands to pass the multiply operands
// first.
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
case CVTPD2PS:
// ISD::FP_ROUND has a second argument that indicates if the truncation
// does not change the value. Set it to 0 since it can change.
return DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
DAG.getIntPtrConstant(0, dl));
case CVTPD2PS_RND_MASK: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
// We add rounding mode to the Node when
// - RM Opcode is specified and
// - RM is not "current direction".
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
assert(IntrData->Opc0 == ISD::FP_ROUND && "Unexpected opcode!");
// ISD::FP_ROUND has a second argument that indicates if the truncation
// does not change the value. Set it to 0 since it can change.
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
DAG.getIntPtrConstant(0, dl)),
Mask, PassThru, Subtarget, DAG);
}
case FPCLASSS: {
SDValue Src1 = Op.getOperand(1);
SDValue Imm = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
Subtarget, DAG);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
DAG.getConstant(0, dl, MVT::v8i1),
FPclassMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(MVT::i8, Ins);
}
case CMP_MASK_CC: {
MVT MaskVT = Op.getSimpleValueType();
SDValue Cmp;
SDValue CC = Op.getOperand(3);
CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
if (IntrData->Opc1 != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd))
Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
Op.getOperand(2), CC, Rnd);
}
//default rounding mode
if (!Cmp.getNode())
Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
Op.getOperand(2), CC);
return Cmp;
}
case CMP_MASK_SCALAR_CC: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
SDValue Mask = Op.getOperand(4);
SDValue Cmp;
if (IntrData->Opc1 != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Rnd);
}
//default rounding mode
if(!Cmp.getNode())
Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
Subtarget, DAG);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
DAG.getConstant(0, dl, MVT::v8i1),
CmpMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(MVT::i8, Ins);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
SDValue SetCC;
switch (CC) {
case ISD::SETEQ: { // (ZF = 0 and PF = 0)
SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
break;
}
case ISD::SETNE: { // (ZF = 1 or PF = 1)
SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
break;
}
case ISD::SETGT: // (CF = 0 and ZF = 0)
SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
break;
case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
break;
}
case ISD::SETGE: // CF = 0
SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
break;
case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
break;
default:
llvm_unreachable("Unexpected illegal condition!");
}
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case COMI_RM: { // Comparison intrinsics with Sae
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
SDValue Sae = Op.getOperand(4);
SDValue FCmp;
if (isRoundModeCurDirection(Sae))
FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
DAG.getConstant(CondVal, dl, MVT::i8));
else
FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS,
DAG.getConstant(CondVal, dl, MVT::i8), Sae);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
DAG.getConstant(0, dl, MVT::v16i1),
FCmp, DAG.getIntPtrConstant(0, dl));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
DAG.getBitcast(MVT::i16, Ins));
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), Subtarget,
DAG);
case COMPRESS_EXPAND_IN_REG: {
SDValue Mask = Op.getOperand(3);
SDValue DataToCompress = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
if (isAllOnesConstant(Mask)) // return data as is
return Op.getOperand(1);
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
DataToCompress),
Mask, PassThru, Subtarget, DAG);
}
case FIXUPIMMS:
case FIXUPIMMS_MASKZ:
case FIXUPIMM:
case FIXUPIMM_MASKZ:{
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue Imm = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
Src1 : getZeroVector(VT, Subtarget, DAG, dl);
// We specify 2 possible modes for intrinsics, with/without rounding
// modes.
// First, we check if the intrinsic have rounding mode (7 operands),
// if not, we set rounding mode to "current".
SDValue Rnd;
if (Op.getNumOperands() == 7)
Rnd = Op.getOperand(6);
else
Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3, Imm, Rnd),
Mask, Passthru, Subtarget, DAG);
else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3, Imm, Rnd),
Mask, Passthru, Subtarget, DAG);
}
case ROUNDP: {
assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
// Clear the upper bits of the rounding immediate so that the legacy
// intrinsic can't trigger the scaling behavior of VRNDSCALE.
SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
Op.getOperand(2),
DAG.getConstant(0xf, dl, MVT::i32));
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), RoundingMode);
}
case ROUNDS: {
assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
// Clear the upper bits of the rounding immediate so that the legacy
// intrinsic can't trigger the scaling behavior of VRNDSCALE.
SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
Op.getOperand(3),
DAG.getConstant(0xf, dl, MVT::i32));
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), RoundingMode);
}
// ADC/ADCX/SBB
case ADX: {
SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
SDValue Res;
// If the carry in is zero, then we should just use ADD/SUB instead of
// ADC/SBB.
if (isNullConstant(Op.getOperand(1))) {
Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
Op.getOperand(3));
} else {
SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
DAG.getConstant(-1, dl, MVT::i8));
Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
Op.getOperand(3), GenCF.getValue(1));
}
SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
SDValue Results[] = { SetCC, Res };
return DAG.getMergeValues(Results, dl);
}
case CVTPD2PS_MASK:
case CVTPD2I_MASK:
case TRUNCATE_TO_REG: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
if (isAllOnesConstant(Mask))
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
MVT SrcVT = Src.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
Mask);
}
case CVTPS2PH_MASK: {
SDValue Src = Op.getOperand(1);
SDValue Rnd = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
if (isAllOnesConstant(Mask))
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
MVT SrcVT = Src.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
PassThru, Mask);
}
default:
break;
}
}
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
// ptest and testp intrinsics. The intrinsic these come from are designed to
// return an integer value, not just an instruction so lower it to the ptest
// or testp pattern and a setcc for the result.
case Intrinsic::x86_avx512_ktestc_b:
case Intrinsic::x86_avx512_ktestc_w:
case Intrinsic::x86_avx512_ktestc_d:
case Intrinsic::x86_avx512_ktestc_q:
case Intrinsic::x86_avx512_ktestz_b:
case Intrinsic::x86_avx512_ktestz_w:
case Intrinsic::x86_avx512_ktestz_d:
case Intrinsic::x86_avx512_ktestz_q:
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestz_256:
case Intrinsic::x86_avx_ptestc_256:
case Intrinsic::x86_avx_ptestnzc_256:
case Intrinsic::x86_avx_vtestz_ps:
case Intrinsic::x86_avx_vtestc_ps:
case Intrinsic::x86_avx_vtestnzc_ps:
case Intrinsic::x86_avx_vtestz_pd:
case Intrinsic::x86_avx_vtestc_pd:
case Intrinsic::x86_avx_vtestnzc_pd:
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
case Intrinsic::x86_avx_vtestc_pd_256:
case Intrinsic::x86_avx_vtestnzc_pd_256: {
unsigned TestOpc = X86ISD::PTEST;
X86::CondCode X86CC;
switch (IntNo) {
default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
case Intrinsic::x86_avx512_ktestc_b:
case Intrinsic::x86_avx512_ktestc_w:
case Intrinsic::x86_avx512_ktestc_d:
case Intrinsic::x86_avx512_ktestc_q:
// CF = 1
TestOpc = X86ISD::KTEST;
X86CC = X86::COND_B;
break;
case Intrinsic::x86_avx512_ktestz_b:
case Intrinsic::x86_avx512_ktestz_w:
case Intrinsic::x86_avx512_ktestz_d:
case Intrinsic::x86_avx512_ktestz_q:
TestOpc = X86ISD::KTEST;
X86CC = X86::COND_E;
break;
case Intrinsic::x86_avx_vtestz_ps:
case Intrinsic::x86_avx_vtestz_pd:
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
TestOpc = X86ISD::TESTP;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_avx_ptestz_256:
// ZF = 1
X86CC = X86::COND_E;
break;
case Intrinsic::x86_avx_vtestc_ps:
case Intrinsic::x86_avx_vtestc_pd:
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestc_pd_256:
TestOpc = X86ISD::TESTP;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_avx_ptestc_256:
// CF = 1
X86CC = X86::COND_B;
break;
case Intrinsic::x86_avx_vtestnzc_ps:
case Intrinsic::x86_avx_vtestnzc_pd:
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestnzc_pd_256:
TestOpc = X86ISD::TESTP;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestnzc_256:
// ZF and CF = 0
X86CC = X86::COND_A;
break;
}
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case Intrinsic::x86_sse42_pcmpistria128:
case Intrinsic::x86_sse42_pcmpestria128:
case Intrinsic::x86_sse42_pcmpistric128:
case Intrinsic::x86_sse42_pcmpestric128:
case Intrinsic::x86_sse42_pcmpistrio128:
case Intrinsic::x86_sse42_pcmpestrio128:
case Intrinsic::x86_sse42_pcmpistris128:
case Intrinsic::x86_sse42_pcmpestris128:
case Intrinsic::x86_sse42_pcmpistriz128:
case Intrinsic::x86_sse42_pcmpestriz128: {
unsigned Opcode;
X86::CondCode X86CC;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_sse42_pcmpistria128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_A;
break;
case Intrinsic::x86_sse42_pcmpestria128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_A;
break;
case Intrinsic::x86_sse42_pcmpistric128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_B;
break;
case Intrinsic::x86_sse42_pcmpestric128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_B;
break;
case Intrinsic::x86_sse42_pcmpistrio128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_O;
break;
case Intrinsic::x86_sse42_pcmpestrio128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_O;
break;
case Intrinsic::x86_sse42_pcmpistris128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_S;
break;
case Intrinsic::x86_sse42_pcmpestris128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_S;
break;
case Intrinsic::x86_sse42_pcmpistriz128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_E;
break;
case Intrinsic::x86_sse42_pcmpestriz128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_E;
break;
}
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case Intrinsic::x86_sse42_pcmpistri128:
case Intrinsic::x86_sse42_pcmpestri128: {
unsigned Opcode;
if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
Opcode = X86ISD::PCMPISTR;
else
Opcode = X86ISD::PCMPESTR;
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps);
}
case Intrinsic::x86_sse42_pcmpistrm128:
case Intrinsic::x86_sse42_pcmpestrm128: {
unsigned Opcode;
if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
Opcode = X86ISD::PCMPISTR;
else
Opcode = X86ISD::PCMPESTR;
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
}
case Intrinsic::eh_sjlj_lsda: {
MachineFunction &MF = DAG.getMachineFunction();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
auto &Context = MF.getMMI().getContext();
MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
Twine(MF.getFunctionNumber()));
return DAG.getNode(getGlobalWrapperKind(), dl, VT,
DAG.getMCSymbol(S, PtrVT));
}
case Intrinsic::x86_seh_lsda: {
// Compute the symbol for the LSDA. We know it'll get emitted later.
MachineFunction &MF = DAG.getMachineFunction();
SDValue Op1 = Op.getOperand(1);
auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
GlobalValue::dropLLVMManglingEscape(Fn->getName()));
// Generate a simple absolute symbol reference. This intrinsic is only
// supported on 32-bit Windows, which isn't PIC.
SDValue Result = DAG.getMCSymbol(LSDASym, VT);
return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
}
case Intrinsic::eh_recoverfp: {
SDValue FnOp = Op.getOperand(1);
SDValue IncomingFPOp = Op.getOperand(2);
GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
if (!Fn)
report_fatal_error(
"llvm.eh.recoverfp must take a function as the first argument");
return recoverFramePointer(DAG, Fn, IncomingFPOp);
}
case Intrinsic::localaddress: {
// Returns one of the stack, base, or frame pointer registers, depending on
// which is used to reference local variables.
MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned Reg;
if (RegInfo->hasBasePointer(MF))
Reg = RegInfo->getBaseRegister();
else // This function handles the SP or FP case.
Reg = RegInfo->getPtrSizedFrameRegister(MF);
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
}
}
static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
EVT MaskVT = Mask.getValueType();
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, Base, Scale, Index, Disp, Segment, Mask, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
return DAG.getMergeValues(RetOps, dl);
}
static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
VT.getVectorNumElements());
MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
// We support two versions of the gather intrinsics. One with scalar mask and
// one with vXi1 mask. Convert scalar to vXi1 if necessary.
if (Mask.getValueType() != MaskVT)
Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, Mask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
return DAG.getMergeValues(RetOps, dl);
}
static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
Src.getSimpleValueType().getVectorNumElements());
MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
// We support two versions of the scatter intrinsics. One with scalar mask and
// one with vXi1 mask. Convert scalar to vXi1 if necessary.
if (Mask.getValueType() != MaskVT)
Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
return SDValue(Res, 1);
}
static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Mask, SDValue Base, SDValue Index,
SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
/// Handles the lowering of builtin intrinsic that return the value
/// of the extended control register.
static void getExtendedControlRegister(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
// The ECX register is used to select the index of the XCR register to
// return.
SDValue Chain =
DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX, N->getOperand(2));
SDNode *N1 = DAG.getMachineNode(X86::XGETBV, DL, Tys, Chain);
Chain = SDValue(N1, 0);
// Reads the content of XCR and returns it in registers EDX:EAX.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
Chain = HI.getValue(1);
if (Subtarget.is64Bit()) {
// Merge the two 32-bit values into a 64-bit one..
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
}
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
SDValue Ops[] = { LO, HI };
SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
Results.push_back(Pair);
Results.push_back(Chain);
}
/// Handles the lowering of builtin intrinsics that read performance monitor
/// counters (x86_rdpmc).
static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
// The ECX register is used to select the index of the performance counter
// to read.
SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
N->getOperand(2));
SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
// Reads the content of a 64-bit performance counter and returns it in the
// registers EDX:EAX.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
Chain = HI.getValue(1);
if (Subtarget.is64Bit()) {
// The EAX register is loaded with the low-order 32 bits. The EDX register
// is loaded with the supported high-order bits of the counter.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
}
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
SDValue Ops[] = { LO, HI };
SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
Results.push_back(Pair);
Results.push_back(Chain);
}
/// Handles the lowering of builtin intrinsics that read the time stamp counter
/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
/// READCYCLECOUNTER nodes.
static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
SDValue LO, HI;
// The processor's time-stamp counter (a 64-bit MSR) is stored into the
// EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
// and the EAX register is loaded with the low-order 32 bits.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
SDValue Chain = HI.getValue(1);
SDValue TSC;
if (Subtarget.is64Bit()) {
// The EDX register is loaded with the high-order 32 bits of the MSR, and
// the EAX register is loaded with the low-order 32 bits.
TSC = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
TSC = DAG.getNode(ISD::OR, DL, MVT::i64, LO, TSC);
} else {
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
TSC = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, { LO, HI });
}
if (Opcode == X86ISD::RDTSCP_DAG) {
assert(N->getNumOperands() == 2 && "Unexpected number of operands!");
// Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
// the ECX register. Add 'ecx' explicitly to the chain.
SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
HI.getValue(2));
Results.push_back(TSC);
Results.push_back(ecx);
Results.push_back(ecx.getValue(1));
return;
}
Results.push_back(TSC);
Results.push_back(Chain);
}
static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<SDValue, 3> Results;
SDLoc DL(Op);
getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
return DAG.getMergeValues(Results, DL);
}
static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue RegNode = Op.getOperand(2);
WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
if (!EHInfo)
report_fatal_error("EH registrations only live in functions using WinEH");
// Cast the operand to an alloca, and remember the frame index.
auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
if (!FINode)
report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
// Return the chain operand without making any DAG nodes.
return Chain;
}
static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue EHGuard = Op.getOperand(2);
WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
if (!EHInfo)
report_fatal_error("EHGuard only live in functions using WinEH");
// Cast the operand to an alloca, and remember the frame index.
auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
if (!FINode)
report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
EHInfo->EHGuardFrameIndex = FINode->getIndex();
// Return the chain operand without making any DAG nodes.
return Chain;
}
/// Emit Truncating Store with signed or unsigned saturation.
static SDValue
EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
SelectionDAG &DAG) {
SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
return SignedSat ?
DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}
/// Emit Masked Truncating Store with signed or unsigned saturation.
static SDValue
EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, SelectionDAG &DAG) {
SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Ops[] = { Chain, Val, Ptr, Mask };
return SignedSat ?
DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
if (!IntrData) {
switch (IntNo) {
case llvm::Intrinsic::x86_seh_ehregnode:
return MarkEHRegistrationNode(Op, DAG);
case llvm::Intrinsic::x86_seh_ehguard:
return MarkEHGuard(Op, DAG);
case llvm::Intrinsic::x86_flags_read_u32:
case llvm::Intrinsic::x86_flags_read_u64:
case llvm::Intrinsic::x86_flags_write_u32:
case llvm::Intrinsic::x86_flags_write_u64: {
// We need a frame pointer because this will get lowered to a PUSH/POP
// sequence.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setHasCopyImplyingStackAdjustment(true);
// Don't do anything here, we will expand these intrinsics out later
// during ExpandISelPseudos in EmitInstrWithCustomInserter.
return SDValue();
}
case Intrinsic::x86_lwpins32:
case Intrinsic::x86_lwpins64:
case Intrinsic::x86_umwait:
case Intrinsic::x86_tpause: {
SDLoc dl(Op);
SDValue Chain = Op->getOperand(0);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic");
case Intrinsic::x86_umwait:
Opcode = X86ISD::UMWAIT;
break;
case Intrinsic::x86_tpause:
Opcode = X86ISD::TPAUSE;
break;
case Intrinsic::x86_lwpins32:
case Intrinsic::x86_lwpins64:
Opcode = X86ISD::LWPINS;
break;
}
SDValue Operation =
DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
Op->getOperand(3), Op->getOperand(4));
SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
Operation.getValue(1));
}
}
return SDValue();
}
SDLoc dl(Op);
switch(IntrData->Type) {
default: llvm_unreachable("Unknown Intrinsic Type");
case RDSEED:
case RDRAND: {
// Emit the node with the right value type.
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
// If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
// Otherwise return the value from Rand, which is always 0, casted to i32.
SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
DAG.getConstant(1, dl, Op->getValueType(1)),
DAG.getConstant(X86::COND_B, dl, MVT::i8),
SDValue(Result.getNode(), 1) };
SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
// Return { result, isValid, chain }.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
SDValue(Result.getNode(), 2));
}
case GATHER_AVX2: {
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(2);
SDValue Base = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
Scale, Chain, Subtarget);
}
case GATHER: {
//gather(v1, mask, index, base, scale);
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(2);
SDValue Base = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale,
Chain, Subtarget);
}
case SCATTER: {
//scatter(base, mask, index, v1, scale);
SDValue Chain = Op.getOperand(0);
SDValue Base = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
Scale, Chain, Subtarget);
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
assert((HintVal == 2 || HintVal == 3) &&
"Wrong prefetch hint in intrinsic: should be 2 or 3");
unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
SDValue Chain = Op.getOperand(0);
SDValue Mask = Op.getOperand(2);
SDValue Index = Op.getOperand(3);
SDValue Base = Op.getOperand(4);
SDValue Scale = Op.getOperand(5);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
Subtarget);
}
// Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
case RDTSC: {
SmallVector<SDValue, 2> Results;
getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
Results);
return DAG.getMergeValues(Results, dl);
}
// Read Performance Monitoring Counters.
case RDPMC: {
SmallVector<SDValue, 2> Results;
getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
return DAG.getMergeValues(Results, dl);
}
// Get Extended Control Register.
case XGETBV: {
SmallVector<SDValue, 2> Results;
getExtendedControlRegister(Op.getNode(), dl, DAG, Subtarget, Results);
return DAG.getMergeValues(Results, dl);
}
// XTEST intrinsics.
case XTEST: {
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
Ret, SDValue(InTrans.getNode(), 1));
}
case TRUNCATE_TO_MEM_VI8:
case TRUNCATE_TO_MEM_VI16:
case TRUNCATE_TO_MEM_VI32: {
SDValue Mask = Op.getOperand(4);
SDValue DataToTruncate = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
assert(MemIntr && "Expected MemIntrinsicSDNode!");
EVT MemVT = MemIntr->getMemoryVT();
uint16_t TruncationOp = IntrData->Opc0;
switch (TruncationOp) {
case X86ISD::VTRUNC: {
if (isAllOnesConstant(Mask)) // return just a truncate store
return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
MemIntr->getMemOperand());
MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
MemIntr->getMemOperand(), true /* truncating */);
}
case X86ISD::VTRUNCUS:
case X86ISD::VTRUNCS: {
bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
if (isAllOnesConstant(Mask))
return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
MemIntr->getMemOperand(), DAG);
MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
VMask, MemVT, MemIntr->getMemOperand(), DAG);
}
default:
llvm_unreachable("Unsupported truncstore intrinsic");
}
}
}
}
SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setReturnAddressIsTaken(true);
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
MachinePointerInfo());
}
SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
return getReturnAddressFrameIndex(DAG);
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
EVT VT = Op.getValueType();
MFI.setFrameAddressIsTaken(true);
if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
// Depth > 0 makes no sense on targets which use Windows unwind codes. It
// is not possible to crawl up the stack without looking at the unwind codes
// simultaneously.
int FrameAddrIndex = FuncInfo->getFAIndex();
if (!FrameAddrIndex) {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
FuncInfo->setFAIndex(FrameAddrIndex);
}
return DAG.getFrameIndex(FrameAddrIndex, VT);
}
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
"Invalid Frame Register!");
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
MachinePointerInfo());
return FrameAddr;
}
// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const MachineFunction &MF = DAG.getMachineFunction();
unsigned Reg = StringSwitch<unsigned>(RegName)
.Case("esp", X86::ESP)
.Case("rsp", X86::RSP)
.Case("ebp", X86::EBP)
.Case("rbp", X86::RBP)
.Default(0);
if (Reg == X86::EBP || Reg == X86::RBP) {
if (!TFI.hasFP(MF))
report_fatal_error("register " + StringRef(RegName) +
" is allocatable: function has no frame pointer");
#ifndef NDEBUG
else {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
"Invalid Frame Register!");
}
#endif
}
if (Reg)
return Reg;
report_fatal_error("Invalid register name global variable");
}
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}
unsigned X86TargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
}
unsigned X86TargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
// Funclet personalities don't use selectors (the runtime does the selection).
assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
}
bool X86TargetLowering::needsFixedCatchObjects() const {
return Subtarget.isTargetWin64();
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc dl (Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
"Invalid Frame Register!");
SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
DAG.getIntPtrConstant(RegInfo->getSlotSize(),
dl));
StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
DAG.getRegister(StoreAddrReg, PtrVT));
}
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
// If the subtarget is not 64bit, we may need the global base reg
// after isel expand pseudo, i.e., after CGBR pass ran.
// Therefore, ask for the GlobalBaseReg now, so that the pass
// inserts the code for us in case we need it.
// Otherwise, we will end up in a situation where we will
// reference a virtual register that is not defined!
if (!Subtarget.is64Bit()) {
const X86InstrInfo *TII = Subtarget.getInstrInfo();
(void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
}
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
}
SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
Op.getOperand(0), Op.getOperand(1));
}
SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
Op.getOperand(0));
}
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
}
SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
SDValue Root = Op.getOperand(0);
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (Subtarget.is64Bit()) {
SDValue OutChains[6];
// Large code-model.
const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
// Load the pointer to the nested function into R11.
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(2, dl, MVT::i64));
OutChains[1] =
DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
/* Alignment = */ 2);
// Load the 'nest' parameter value into R10.
// R10 is specified in X86CallingConv.td
OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(10, dl, MVT::i64));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 10));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(12, dl, MVT::i64));
OutChains[3] =
DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
/* Alignment = */ 2);
// Jump to the nested function.
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(20, dl, MVT::i64));
OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 20));
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(22, dl, MVT::i64));
OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 22));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
} else {
const Function *Func =
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
CallingConv::ID CC = Func->getCallingConv();
unsigned NestReg;
switch (CC) {
default:
llvm_unreachable("Unsupported calling convention");
case CallingConv::C:
case CallingConv::X86_StdCall: {
// Pass 'nest' parameter in ECX.
// Must be kept in sync with X86CallingConv.td
NestReg = X86::ECX;
// Check that ECX wasn't needed by an 'inreg' parameter.
FunctionType *FTy = Func->getFunctionType();
const AttributeList &Attrs = Func->getAttributes();
if (!Attrs.isEmpty() && !Func->isVarArg()) {
unsigned InRegCount = 0;
unsigned Idx = 1;
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I, ++Idx)
if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
auto &DL = DAG.getDataLayout();
// FIXME: should only count parameters that are lowered to integers.
InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
}
if (InRegCount > 2) {
report_fatal_error("Nest register in use - reduce number of inreg"
" parameters!");
}
}
break;
}
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
case CallingConv::Fast:
// Pass 'nest' parameter in EAX.
// Must be kept in sync with X86CallingConv.td
NestReg = X86::EAX;
break;
}
SDValue OutChains[4];
SDValue Addr, Disp;
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(10, dl, MVT::i32));
Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
OutChains[0] =
DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
Trmp, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(1, dl, MVT::i32));
OutChains[1] =
DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
/* Alignment = */ 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(5, dl, MVT::i32));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 5),
/* Alignment = */ 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(6, dl, MVT::i32));
OutChains[3] =
DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
/* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
}
SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SelectionDAG &DAG) const {
/*
The rounding mode is in bits 11:10 of FPSR, and has the following
settings:
00 Round to nearest
01 Round to -inf
10 Round to +inf
11 Round to 0
FLT_ROUNDS, on the other hand, expects the following:
-1 Undefined
0 Round to 0
1 Round to nearest
2 Round to +inf
3 Round to -inf
To perform the conversion, we do:
(((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
*/
MachineFunction &MF = DAG.getMachineFunction();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
// Save FP Control Word to stack slot
int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
SDValue StackSlot =
DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOStore, 2, 2);
SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
DAG.getVTList(MVT::Other),
Ops, MVT::i16, MMO);
// Load FP Control Word from stack slot
SDValue CWD =
DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
// Transform as necessary
SDValue CWD1 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x800, DL, MVT::i16)),
DAG.getConstant(11, DL, MVT::i8));
SDValue CWD2 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x400, DL, MVT::i16)),
DAG.getConstant(9, DL, MVT::i8));
SDValue RetVal =
DAG.getNode(ISD::AND, DL, MVT::i16,
DAG.getNode(ISD::ADD, DL, MVT::i16,
DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
DAG.getConstant(1, DL, MVT::i16)),
DAG.getConstant(3, DL, MVT::i16));
return DAG.getNode((VT.getSizeInBits() < 16 ?
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
// Split an unary integer op into 2 half sized ops.
static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
unsigned NumElems = VT.getVectorNumElements();
unsigned SizeInBits = VT.getSizeInBits();
MVT EltVT = VT.getVectorElementType();
SDValue Src = Op.getOperand(0);
assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
"Src and Op should have the same element type!");
// Extract the Lo/Hi vectors
SDLoc dl(Op);
SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
}
// Decompose 256-bit ops into smaller 128-bit ops.
static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return LowerVectorIntUnary(Op, DAG);
}
// Decompose 512-bit ops into smaller 256-bit ops.
static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is512BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 512-bit vector integer operation");
return LowerVectorIntUnary(Op, DAG);
}
/// Lower a vector CTLZ using native supported vector CTLZ instruction.
//
// i8/i16 vector implemented using dword LZCNT vector instruction
// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
// split the vector, perform operation on it's Lo a Hi part and
// concatenate the results.
static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
"Unsupported element type");
// Split vector, it's Lo and Hi parts will be handled in next iteration.
if (NumElems > 16 ||
(NumElems == 16 && !Subtarget.canExtendTo512DQ()))
return LowerVectorIntUnary(Op, DAG);
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation");
// Use native supported vector instruction vplzcntd.
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
}
// Lower CTLZ using a PSHUFB lookup table implementation.
static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
int NumElts = VT.getVectorNumElements();
int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
// Per-nibble leading zero PSHUFB lookup table.
const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
/* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
/* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
/* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumBytes; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
// Begin by bitcasting the input to byte vector, then split those bytes
// into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
// If the hi input nibble is zero then we add both results together, otherwise
// we just take the hi result (by masking the lo result to zero before the
// add).
SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
SDValue Zero = DAG.getConstant(0, DL, CurrVT);
SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
SDValue Lo = Op0;
SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
SDValue HiZ;
if (CurrVT.is512BitVector()) {
MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
} else {
HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
}
Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
// Merge result back from vXi8 back to VT, working on the lo/hi halves
// of the current vector width in the same way we did for the nibbles.
// If the upper half of the input element is zero then add the halves'
// leading zero counts together, otherwise just use the upper half's.
// Double the width of the result until we are at target width.
while (CurrVT != VT) {
int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
int CurrNumElts = CurrVT.getVectorNumElements();
MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
// Check if the upper half of the input element is zero.
if (CurrVT.is512BitVector()) {
MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
} else {
HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
}
HiZ = DAG.getBitcast(NextVT, HiZ);
// Move the upper/lower halves to the lower bits as we'll be extending to
// NextVT. Mask the lower result to zero if HiZ is true and add the results
// together.
SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
CurrVT = NextVT;
}
return Res;
}
static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (Subtarget.hasCDI() &&
// vXi8 vectors need to be promoted to 512-bits for vXi32.
(Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Decompose 512-bit ops into smaller 256-bit ops.
if (VT.is512BitVector() && !Subtarget.hasBWI())
return Lower512IntUnary(Op, DAG);
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
}
static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
unsigned Opc = Op.getOpcode();
if (VT.isVector())
return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
// Zero extend to i32 since there is not an i8 bsr.
OpVT = MVT::i32;
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
}
// Issue a bsr (scan bits in reverse) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
if (Opc == ISD::CTLZ) {
// If src is zero (i.e. bsr sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
}
// Finally xor with NumBits-1.
Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
DAG.getConstant(NumBits - 1, dl, OpVT));
if (VT == MVT::i8)
Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
return Op;
}
static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
unsigned NumBits = VT.getScalarSizeInBits();
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
"Only scalar CTTZ requires custom lowering");
// Issue a bsf (scan bits forward) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
// If src is zero (i.e. bsf sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits, dl, VT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
/// Break a 256-bit integer operation into two new 128-bit ones and then
/// concatenate the result back.
static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
/// Break a 512-bit integer operation into two new 256-bit ones and then
/// concatenate the result back.
static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is512BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
if (VT == MVT::i16 || VT == MVT::i32)
return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
if (VT.getScalarType() == MVT::i1)
return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
Op.getOperand(0), Op.getOperand(1));
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return split256IntArith(Op, DAG);
}
static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getScalarType() == MVT::i1) {
SDLoc dl(Op);
switch (Op.getOpcode()) {
default: llvm_unreachable("Expected saturated arithmetic opcode");
case ISD::UADDSAT:
case ISD::SADDSAT:
return DAG.getNode(ISD::OR, dl, VT, Op.getOperand(0), Op.getOperand(1));
case ISD::USUBSAT:
case ISD::SSUBSAT:
return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
DAG.getNOT(dl, Op.getOperand(1), VT));
}
}
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return split256IntArith(Op, DAG);
}
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
// Since X86 does not have CMOV for 8-bit integer, we don't convert
// 8-bit integer abs to NEG and CMOV.
SDLoc DL(Op);
SDValue N0 = Op.getOperand(0);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
DAG.getConstant(0, DL, VT), N0);
SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8),
SDValue(Neg.getNode(), 1)};
return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
}
// ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
SDLoc DL(Op);
SDValue Src = Op.getOperand(0);
SDValue Sub =
DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
}
if (VT.is256BitVector() && !Subtarget.hasInt256()) {
assert(VT.isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntUnary(Op, DAG);
}
// Default to expand.
return SDValue();
}
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
// For AVX1 cases, split to use legal ops (everything but v4i64).
if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
return split256IntArith(Op, DAG);
SDLoc DL(Op);
unsigned Opcode = Op.getOpcode();
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
// For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
// using the SMIN/SMAX instructions and flipping the signbit back.
if (VT == MVT::v8i16) {
assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
"Unexpected MIN/MAX opcode");
SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
}
// Else, expand to a compare/select.
ISD::CondCode CC;
switch (Opcode) {
case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
default: llvm_unreachable("Unknown MINMAX opcode");
}
SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
return DAG.getSelect(DL, VT, Cond, N0, N1);
}
static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
if (VT.getScalarType() == MVT::i1)
return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
// Decompose 256-bit ops into 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return split256IntArith(Op, DAG);
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
// Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
// vector pairs, multiply and truncate.
if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
(VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
return DAG.getNode(
ISD::TRUNCATE, dl, VT,
DAG.getNode(ISD::MUL, dl, ExVT,
DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
}
MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
// Extract the lo/hi parts to any extend to i16.
// We're going to mask off the low byte of each result element of the
// pmullw, so it doesn't matter what's in the high byte of each 16-bit
// element.
SDValue Undef = DAG.getUNDEF(VT);
SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
SDValue BLo, BHi;
if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
// If the LHS is a constant, manually unpackl/unpackh.
SmallVector<SDValue, 16> LoOps, HiOps;
for (unsigned i = 0; i != NumElts; i += 16) {
for (unsigned j = 0; j != 8; ++j) {
LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
MVT::i16));
HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
MVT::i16));
}
}
BLo = DAG.getBuildVector(ExVT, dl, LoOps);
BHi = DAG.getBuildVector(ExVT, dl, HiOps);
} else {
BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
}
// Multiply, mask the lower 8bits of the lo/hi results and pack.
SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
// Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
if (VT == MVT::v4i32) {
assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmulld is available!");
// Extract the odd parts.
static const int UnpackMask[] = { 1, -1, 3, -1 };
SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
// Multiply the even parts.
SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, A),
DAG.getBitcast(MVT::v2i64, B));
// Now multiply odd parts.
SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, Aodds),
DAG.getBitcast(MVT::v2i64, Bodds));
Evens = DAG.getBitcast(VT, Evens);
Odds = DAG.getBitcast(VT, Odds);
// Merge the two vectors back together with a shuffle. This expands into 2
// shuffles.
static const int ShufMask[] = { 0, 4, 2, 6 };
return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
}
assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
"Only know how to lower V2I64/V4I64/V8I64 multiply");
assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
// Ahi = psrlqi(a, 32);
// Bhi = psrlqi(b, 32);
//
// AloBlo = pmuludq(a, b);
// AloBhi = pmuludq(a, Bhi);
// AhiBlo = pmuludq(Ahi, b);
//
// Hi = psllqi(AloBhi + AhiBlo, 32);
// return AloBlo + Hi;
KnownBits AKnown = DAG.computeKnownBits(A);
KnownBits BKnown = DAG.computeKnownBits(B);
APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
SDValue Zero = DAG.getConstant(0, dl, VT);
// Only multiply lo/hi halves that aren't known to be zero.
SDValue AloBlo = Zero;
if (!ALoIsZero && !BLoIsZero)
AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
SDValue AloBhi = Zero;
if (!ALoIsZero && !BHiIsZero) {
SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
}
SDValue AhiBlo = Zero;
if (!AHiIsZero && !BLoIsZero) {
SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
}
SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
}
static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
bool IsSigned = Op->getOpcode() == ISD::MULHS;
unsigned NumElts = VT.getVectorNumElements();
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
// Decompose 256-bit ops into 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return split256IntArith(Op, DAG);
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
(VT == MVT::v8i32 && Subtarget.hasInt256()) ||
(VT == MVT::v16i32 && Subtarget.hasAVX512()));
// PMULxD operations multiply each even value (starting at 0) of LHS with
// the related value of RHS and produce a widen result.
// E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
//
// In other word, to have all the results, we need to perform two PMULxD:
// 1. one with the even values.
// 2. one with the odd values.
// To achieve #2, with need to place the odd values at an even position.
//
// Place the odd value at an even position (basically, shift all values 1
// step to the left):
const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
9, -1, 11, -1, 13, -1, 15, -1};
// <a|b|c|d> => <b|undef|d|undef>
SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
makeArrayRef(&Mask[0], NumElts));
// <e|f|g|h> => <f|undef|h|undef>
SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
makeArrayRef(&Mask[0], NumElts));
// Emit two multiplies, one for the lower 2 ints and one for the higher 2
// ints.
MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
unsigned Opcode =
(IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
// PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
DAG.getBitcast(MulVT, A),
DAG.getBitcast(MulVT, B)));
// PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
// => <2 x i64> <bf|dh>
SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
DAG.getBitcast(MulVT, Odd0),
DAG.getBitcast(MulVT, Odd1)));
// Shuffle it back into the right order.
SmallVector<int, 16> ShufMask(NumElts);
for (int i = 0; i != (int)NumElts; ++i)
ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
// If we have a signed multiply but no PMULDQ fix up the result of an
// unsigned multiply.
if (IsSigned && !Subtarget.hasSSE41()) {
SDValue Zero = DAG.getConstant(0, dl, VT);
SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
}
return Res;
}
// Only i8 vectors should need custom lowering after this.
assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
(VT == MVT::v64i8 && Subtarget.hasBWI())) &&
"Unsupported vector type");
// Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
// logical shift down the upper half and pack back to i8.
// With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
// and then ashr/lshr the upper bits down to the lower bits before multiply.
unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
(VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
}
// For signed 512-bit vectors, split into 256-bit vectors to allow the
// sign-extension to occur.
if (VT == MVT::v64i8 && IsSigned)
return split512IntArith(Op, DAG);
// Signed AVX2 implementation - extend xmm subvectors to ymm.
if (VT == MVT::v32i8 && IsSigned) {
SDValue Lo = DAG.getIntPtrConstant(0, dl);
SDValue Hi = DAG.getIntPtrConstant(NumElts / 2, dl);
MVT ExVT = MVT::v16i16;
SDValue ALo = extract128BitVector(A, 0, DAG, dl);
SDValue BLo = extract128BitVector(B, 0, DAG, dl);
SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
// Bitcast back to VT and then pack all the even elements from Lo and Hi.
// Shuffle lowering should turn this into PACKUS+PERMQ
Lo = DAG.getBitcast(VT, Lo);
Hi = DAG.getBitcast(VT, Hi);
return DAG.getVectorShuffle(VT, dl, Lo, Hi,
{ 0, 2, 4, 6, 8, 10, 12, 14,
16, 18, 20, 22, 24, 26, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46,
48, 50, 52, 54, 56, 58, 60, 62});
}
// For signed v16i8 and all unsigned vXi8 we will unpack the low and high
// half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
// shift the results and pack the half lane results back together.
MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1};
// Extract the lo parts and zero/sign extend to i16.
// Only use SSE4.1 instructions for signed v16i8 where using unpack requires
// shifts to sign extend. Using unpack for unsigned only requires an xor to
// create zeros and a copy due to tied registers contraints pre-avx. But using
// zero_extend_vector_inreg would require an additional pshufd for the high
// part.
SDValue ALo, AHi;
if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
} else if (IsSigned) {
ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
} else {
ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
DAG.getConstant(0, dl, VT)));
AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
DAG.getConstant(0, dl, VT)));
}
SDValue BLo, BHi;
if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
// If the LHS is a constant, manually unpackl/unpackh and extend.
SmallVector<SDValue, 16> LoOps, HiOps;
for (unsigned i = 0; i != NumElts; i += 16) {
for (unsigned j = 0; j != 8; ++j) {
SDValue LoOp = B.getOperand(i + j);
SDValue HiOp = B.getOperand(i + j + 8);
if (IsSigned) {
LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
} else {
LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
}
LoOps.push_back(LoOp);
HiOps.push_back(HiOp);
}
}
BLo = DAG.getBuildVector(ExVT, dl, LoOps);
BHi = DAG.getBuildVector(ExVT, dl, HiOps);
} else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
} else if (IsSigned) {
BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
} else {
BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
DAG.getConstant(0, dl, VT)));
BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
DAG.getConstant(0, dl, VT)));
}
// Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
// pack back to vXi8.
SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
// Bitcast back to VT and then pack all the even elements from Lo and Hi.
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget.isTargetWin64() && "Unexpected target");
EVT VT = Op.getValueType();
assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering");
RTLIB::Libcall LC;
bool isSigned;
switch (Op->getOpcode()) {
default: llvm_unreachable("Unexpected request for libcall!");
case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
}
SDLoc dl(Op);
SDValue InChain = DAG.getEntryNode();
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
EVT ArgVT = Op->getOperand(i).getValueType();
assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
"Unexpected argument type for lowering");
SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
Entry.Node = StackPtr;
InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
MachinePointerInfo(), /* Alignment = */ 16);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Ty = PointerType::get(ArgTy,0);
Entry.IsSExt = false;
Entry.IsZExt = false;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
getPointerTy(DAG.getDataLayout()));
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(InChain)
.setLibCallee(
getLibcallCallingConv(LC),
static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
std::move(Args))
.setInRegister()
.setSExtResult(isSigned)
.setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
return DAG.getBitcast(VT, CallInfo.first);
}
// Return true if the required (according to Opcode) shift-imm form is natively
// supported by the Subtarget
static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (VT.getScalarSizeInBits() < 16)
return false;
if (VT.is512BitVector() && Subtarget.hasAVX512() &&
(VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
return true;
bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
(VT.is256BitVector() && Subtarget.hasInt256());
bool AShift = LShift && (Subtarget.hasAVX512() ||
(VT != MVT::v2i64 && VT != MVT::v4i64));
return (Opcode == ISD::SRA) ? AShift : LShift;
}
// The shift amount is a variable, but it is the same for all vector lanes.
// These instructions are defined together with shift-immediate.
static
bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
}
// Return true if the required (according to Opcode) variable-shift form is
// natively supported by the Subtarget
static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
return false;
// vXi16 supported only on AVX-512, BWI
if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
return false;
if (Subtarget.hasAVX512())
return true;
bool LShift = VT.is128BitVector() || VT.is256BitVector();
bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
return (Opcode == ISD::SRA) ? AShift : LShift;
}
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
SDValue Ex = DAG.getBitcast(ExVT, R);
// ashr(R, 63) === cmp_slt(R, 0)
if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
"Unsupported PCMPGT op");
return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
}
if (ShiftAmt >= 32) {
// Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
SDValue Upper =
getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
ShiftAmt - 32, DAG);
if (VT == MVT::v2i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
if (VT == MVT::v4i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
{9, 1, 11, 3, 13, 5, 15, 7});
} else {
// SRA upper i32, SRL whole i64 and select lower i32.
SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
ShiftAmt, DAG);
SDValue Lower =
getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
Lower = DAG.getBitcast(ExVT, Lower);
if (VT == MVT::v2i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
if (VT == MVT::v4i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
{8, 1, 10, 3, 12, 5, 14, 7});
}
return DAG.getBitcast(VT, Ex);
};
// Optimize shl/srl/sra with constant shift amount.
APInt APIntShiftAmt;
if (!isConstantSplat(Amt, APIntShiftAmt))
return SDValue();
uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
// i64 SRA needs to be performed as partial shifts.
if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
(Subtarget.hasInt256() && VT == MVT::v4i64)) &&
Op.getOpcode() == ISD::SRA)
return ArithmeticShiftRight64(ShiftAmt);
if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
// Simple i8 add case
if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
return DAG.getNode(ISD::ADD, dl, VT, R, R);
// ashr(R, 7) === cmp_slt(R, 0)
if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
SDValue Zeros = DAG.getConstant(0, dl, VT);
if (VT.is512BitVector()) {
assert(VT == MVT::v64i8 && "Unexpected element type!");
SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
}
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
}
// XOP can shift v16i8 directly instead of as shift v8i16 + mask.
if (VT == MVT::v16i8 && Subtarget.hasXOP())
return SDValue();
if (Op.getOpcode() == ISD::SHL) {
// Make a large shift.
SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
ShiftAmt, DAG);
SHL = DAG.getBitcast(VT, SHL);
// Zero out the rightmost bits.
return DAG.getNode(ISD::AND, dl, VT, SHL,
DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
ShiftAmt, DAG);
SRL = DAG.getBitcast(VT, SRL);
// Zero out the leftmost bits.
return DAG.getNode(ISD::AND, dl, VT, SRL,
DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
}
if (Op.getOpcode() == ISD::SRA) {
// ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
}
llvm_unreachable("Unknown shift opcode.");
}
return SDValue();
}
// If V is a splat value, return the source vector and splat index;
static SDValue IsSplatVector(SDValue V, int &SplatIdx, SelectionDAG &DAG) {
V = peekThroughEXTRACT_SUBVECTORs(V);
EVT VT = V.getValueType();
unsigned Opcode = V.getOpcode();
switch (Opcode) {
default: {
APInt UndefElts;
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
if (DAG.isSplatValue(V, DemandedElts, UndefElts)) {
// Handle case where all demanded elements are UNDEF.
if (DemandedElts.isSubsetOf(UndefElts)) {
SplatIdx = 0;
return DAG.getUNDEF(VT);
}
SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
return V;
}
break;
}
case ISD::VECTOR_SHUFFLE: {
// Check if this is a shuffle node doing a splat.
// TODO - remove this and rely purely on SelectionDAG::isSplatValue,
// getTargetVShiftNode currently struggles without the splat source.
auto *SVN = cast<ShuffleVectorSDNode>(V);
if (!SVN->isSplat())
break;
int Idx = SVN->getSplatIndex();
int NumElts = V.getValueType().getVectorNumElements();
SplatIdx = Idx % NumElts;
return V.getOperand(Idx / NumElts);
}
}
return SDValue();
}
static SDValue GetSplatValue(SDValue V, const SDLoc &dl,
SelectionDAG &DAG) {
int SplatIdx;
if (SDValue SrcVector = IsSplatVector(V, SplatIdx, DAG))
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
SrcVector.getValueType().getScalarType(), SrcVector,
DAG.getIntPtrConstant(SplatIdx, dl));
return SDValue();
}
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned Opcode = Op.getOpcode();
unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
if (SDValue BaseShAmt = GetSplatValue(Amt, dl, DAG)) {
if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
MVT EltVT = VT.getVectorElementType();
assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
else if (EltVT.bitsLT(MVT::i32))
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
}
// vXi8 shifts - shift as v8i16 + mask result.
if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
(VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
VT == MVT::v64i8) &&
!Subtarget.hasXOP()) {
unsigned NumElts = VT.getVectorNumElements();
MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
// Create the mask using vXi16 shifts. For shift-rights we need to move
// the upper byte down before splatting the vXi8 mask.
SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
BaseShAmt, Subtarget, DAG);
if (Opcode != ISD::SHL)
BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
8, DAG);
BitMask = DAG.getBitcast(VT, BitMask);
BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
SmallVector<int, 64>(NumElts, 0));
SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
DAG.getBitcast(ExtVT, R), BaseShAmt,
Subtarget, DAG);
Res = DAG.getBitcast(VT, Res);
Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
if (Opcode == ISD::SRA) {
// ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
// SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
BaseShAmt, Subtarget, DAG);
SignMask = DAG.getBitcast(VT, SignMask);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
}
return Res;
}
}
}
// Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
std::vector<SDValue> Vals(Ratio);
for (unsigned i = 0; i != Ratio; ++i)
Vals[i] = Amt.getOperand(i);
for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
for (unsigned j = 0; j != Ratio; ++j)
if (Vals[j] != Amt.getOperand(i + j))
return SDValue();
}
if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
}
return SDValue();
}
// Convert a shift/rotate left amount to a multiplication scale factor.
static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Amt.getSimpleValueType();
if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
(Subtarget.hasInt256() && VT == MVT::v16i16) ||
(!Subtarget.hasAVX512() && VT == MVT::v16i8)))
return SDValue();
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
SmallVector<SDValue, 8> Elts;
MVT SVT = VT.getVectorElementType();
unsigned SVTBits = SVT.getSizeInBits();
APInt One(SVTBits, 1);
unsigned NumElems = VT.getVectorNumElements();
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Op = Amt->getOperand(i);
if (Op->isUndef()) {
Elts.push_back(Op);
continue;
}
ConstantSDNode *ND = cast<ConstantSDNode>(Op);
APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
uint64_t ShAmt = C.getZExtValue();
if (ShAmt >= SVTBits) {
Elts.push_back(DAG.getUNDEF(SVT));
continue;
}
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
return DAG.getBuildVector(VT, dl, Elts);
}
// If the target doesn't support variable shifts, use either FP conversion
// or integer multiplication to avoid shifting each element individually.
if (VT == MVT::v4i32) {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
DAG.getConstant(0x3f800000U, dl, VT));
Amt = DAG.getBitcast(MVT::v4f32, Amt);
return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
}
// AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
SDValue Z = DAG.getConstant(0, dl, VT);
SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
if (Subtarget.hasSSE41())
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
DAG.getBitcast(VT, Hi),
{0, 2, 4, 6, 8, 10, 12, 14});
}
return SDValue();
}
static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned EltSizeInBits = VT.getScalarSizeInBits();
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
unsigned Opc = Op.getOpcode();
unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
assert(VT.isVector() && "Custom lowering only for vector shifts!");
assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
return V;
if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
return V;
if (SupportedVectorVarShift(VT, Subtarget, Opc))
return Op;
// XOP has 128-bit variable logical/arithmetic shifts.
// +ve/-ve Amt = shift left/right.
if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
VT == MVT::v8i16 || VT == MVT::v16i8)) {
if (Opc == ISD::SRL || Opc == ISD::SRA) {
SDValue Zero = DAG.getConstant(0, dl, VT);
Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
}
if (Opc == ISD::SHL || Opc == ISD::SRL)
return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
if (Opc == ISD::SRA)
return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
}
// 2i64 vector logical shifts can efficiently avoid scalarization - do the
// shifts per-lane and then shuffle the partial results back together.
if (VT == MVT::v2i64 && Opc != ISD::SRA) {
// Splat the shift amounts so the scalar shifts above will catch it.
SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
}
// i64 vector arithmetic shift can be emulated with the transform:
// M = lshr(SIGN_MASK, Amt)
// ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
Opc == ISD::SRA) {
SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
R = DAG.getNode(ISD::XOR, dl, VT, R, M);
R = DAG.getNode(ISD::SUB, dl, VT, R, M);
return R;
}
// If possible, lower this shift as a sequence of two shifts by
// constant plus a BLENDing shuffle instead of scalarizing it.
// Example:
// (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
//
// Could be rewritten as:
// (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
//
// The advantage is that the two shifts from the example would be
// lowered as X86ISD::VSRLI nodes in parallel before blending.
if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
(VT == MVT::v16i16 && Subtarget.hasInt256()))) {
SDValue Amt1, Amt2;
unsigned NumElts = VT.getVectorNumElements();
SmallVector<int, 8> ShuffleMask;
for (unsigned i = 0; i != NumElts; ++i) {
SDValue A = Amt->getOperand(i);
if (A.isUndef()) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
if (!Amt1 || Amt1 == A) {
ShuffleMask.push_back(i);
Amt1 = A;
continue;
}
if (!Amt2 || Amt2 == A) {
ShuffleMask.push_back(i + NumElts);
Amt2 = A;
continue;
}
break;
}
// Only perform this blend if we can perform it without loading a mask.
if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
(VT != MVT::v16i16 ||
is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
(VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
canWidenShuffleElements(ShuffleMask))) {
auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
Cst2->getAPIntValue().ult(EltSizeInBits)) {
SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
Cst1->getZExtValue(), DAG);
SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
Cst2->getZExtValue(), DAG);
return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
}
}
}
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
if (Opc == ISD::SHL)
if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
// Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
// can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
if (Opc == ISD::SRL && ConstantAmt &&
(VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
SDValue Zero = DAG.getConstant(0, dl, VT);
SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
return DAG.getSelect(dl, VT, ZAmt, R, Res);
}
}
// Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
// can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
// TODO: Special case handling for shift by 0/1, really we can afford either
// of these cases in pre-SSE41/XOP/AVX512 but not both.
if (Opc == ISD::SRA && ConstantAmt &&
(VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
!Subtarget.hasAVX512()) ||
DAG.isKnownNeverZero(Amt))) {
SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
SDValue Amt0 =
DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
SDValue Amt1 =
DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
SDValue Sra1 =
getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
Res = DAG.getSelect(dl, VT, Amt0, R, Res);
return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
}
}
// v4i32 Non Uniform Shifts.
// If the shift amount is constant we can shift each lane using the SSE2
// immediate shifts, else we need to zero-extend each lane to the lower i64
// and shift using the SSE2 variable shifts.
// The separate results can then be blended together.
if (VT == MVT::v4i32) {
SDValue Amt0, Amt1, Amt2, Amt3;
if (ConstantAmt) {
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
} else {
// The SSE2 shifts use the lower i64 as the same shift amount for
// all lanes and the upper i64 is ignored. On AVX we're better off
// just zero-extending, but for SSE just duplicating the top 16-bits is
// cheaper and has the same effect for out of range values.
if (Subtarget.hasAVX()) {
SDValue Z = DAG.getConstant(0, dl, VT);
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
} else {
SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{4, 5, 6, 7, -1, -1, -1, -1});
Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{0, 1, 1, 1, -1, -1, -1, -1});
Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{2, 3, 3, 3, -1, -1, -1, -1});
Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
{0, 1, 1, 1, -1, -1, -1, -1});
Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
{2, 3, 3, 3, -1, -1, -1, -1});
}
}
unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
// Merge the shifted lane results optimally with/without PBLENDW.
// TODO - ideally shuffle combining would handle this.
if (Subtarget.hasSSE41()) {
SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
}
SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
}
// It's worth extending once and using the vXi16/vXi32 shifts for smaller
// types, but without AVX512 the extra overheads to get from vXi8 to vXi32
// make the existing SSE solution better.
// NOTE: We honor prefered vector width before promoting to 512-bits.
if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
(Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
(Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
(Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
"Unexpected vector type");
MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
R = DAG.getNode(ExtOpc, dl, ExtVT, R);
Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(Opc, dl, ExtVT, R, Amt));
}
// Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
// extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
(VT == MVT::v16i8 || VT == MVT::v64i8 ||
(VT == MVT::v32i8 && Subtarget.hasInt256())) &&
!Subtarget.hasXOP()) {
int NumElts = VT.getVectorNumElements();
SDValue Cst8 = DAG.getConstant(8, dl, MVT::i8);
// Extend constant shift amount to vXi16 (it doesn't matter if the type
// isn't legal).
MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
"Constant build vector expected");
if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
: DAG.getZExtOrTrunc(R, dl, ExVT);
R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
return DAG.getZExtOrTrunc(R, dl, VT);
}
SmallVector<SDValue, 16> LoAmt, HiAmt;
for (int i = 0; i != NumElts; i += 16) {
for (int j = 0; j != 8; ++j) {
LoAmt.push_back(Amt.getOperand(i + j));
HiAmt.push_back(Amt.getOperand(i + j + 8));
}
}
MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
}
if (VT == MVT::v16i8 ||
(VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
(VT == MVT::v64i8 && Subtarget.hasBWI())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
if (VT.is512BitVector()) {
// On AVX512BW targets we make use of the fact that VSELECT lowers
// to a masked blend which selects bytes based just on the sign bit
// extracted to a mask.
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
ISD::SETGT);
return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
} else if (Subtarget.hasSSE41()) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
}
// On pre-SSE41 targets we test for the sign bit by comparing to
// zero - a negative value will set all bits of the lanes to true
// and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue Z = DAG.getConstant(0, dl, SelVT);
SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
return DAG.getSelect(dl, SelVT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
// We can safely do this using i16 shifts as we're only interested in
// the 3 lower bits of each byte.
Amt = DAG.getBitcast(ExtVT, Amt);
Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
Amt = DAG.getBitcast(VT, Amt);
if (Opc == ISD::SHL || Opc == ISD::SRL) {
// r = VSELECT(r, shift(r, 4), a);
SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 2), a);
M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// return VSELECT(r, shift(r, 1), a);
M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
return R;
}
if (Opc == ISD::SRA) {
// For SRA we need to unpack each byte to the higher byte of a i16 vector
// so we can correctly sign extend. We don't care what happens to the
// lower byte.
SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
// r = VSELECT(r, shift(r, 4), a);
SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 2), a);
MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 1), a);
MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// Logical shift the result back to the lower byte, leaving a zero upper
// byte meaning that we can safely pack with PACKUSWB.
RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
}
if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
MVT ExtVT = MVT::v8i32;
SDValue Z = DAG.getConstant(0, dl, VT);
SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
if (VT == MVT::v8i16) {
// If we have a constant shift amount, the non-SSE41 path is best as
// avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
bool UseSSE41 = Subtarget.hasSSE41() &&
!ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
if (UseSSE41) {
MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
V0 = DAG.getBitcast(ExtVT, V0);
V1 = DAG.getBitcast(ExtVT, V1);
Sel = DAG.getBitcast(ExtVT, Sel);
return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
}
// On pre-SSE41 targets we splat the sign bit - a negative value will
// set all bits of the lanes to true and VSELECT uses that in
// its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue C =
getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
return DAG.getSelect(dl, VT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 12;
if (UseSSE41) {
// On SSE41 targets we need to replicate the shift mask in both
// bytes for PBLENDVB.
Amt = DAG.getNode(
ISD::OR, dl, VT,
getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
} else {
Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
}
// r = VSELECT(r, shift(r, 8), a);
SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 4), a);
M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 2), a);
M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// return VSELECT(r, shift(r, 1), a);
M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
R = SignBitSelect(Amt, M, R);
return R;
}
// Decompose 256-bit shifts into 128-bit shifts.
if (VT.is256BitVector())
return split256IntArith(Op, DAG);
return SDValue();
}
static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.isVector() && "Custom lowering only for vector rotates!");
SDLoc DL(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned Opcode = Op.getOpcode();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
int NumElts = VT.getVectorNumElements();
// Check for constant splat rotation amount.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
int CstSplatIndex = -1;
if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
for (int i = 0; i != NumElts; ++i)
if (!UndefElts[i]) {
if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
CstSplatIndex = i;
continue;
}
CstSplatIndex = -1;
break;
}
// AVX512 implicitly uses modulo rotation amounts.
if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
// Attempt to rotate by immediate.
if (0 <= CstSplatIndex) {
unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
return DAG.getNode(Op, DL, VT, R,
DAG.getConstant(RotateAmt, DL, MVT::i8));
}
// Else, fall-back on VPROLV/VPRORV.
return Op;
}
assert((Opcode == ISD::ROTL) && "Only ROTL supported");
// XOP has 128-bit vector variable + immediate rotates.
// +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
// XOP implicitly uses modulo rotation amounts.
if (Subtarget.hasXOP()) {
if (VT.is256BitVector())
return split256IntArith(Op, DAG);
assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
// Attempt to rotate by immediate.
if (0 <= CstSplatIndex) {
uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
DAG.getConstant(RotateAmt, DL, MVT::i8));
}
// Use general rotate by variable (per-element).
return Op;
}
// Split 256-bit integers on pre-AVX2 targets.
if (VT.is256BitVector() && !Subtarget.hasAVX2())
return split256IntArith(Op, DAG);
assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
Subtarget.hasAVX2())) &&
"Only vXi32/vXi16/vXi8 vector rotates supported");
// Rotate by an uniform constant - expand back to shifts.
if (0 <= CstSplatIndex)
return SDValue();
bool IsSplatAmt = DAG.isSplatValue(Amt);
// v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
// the amount bit.
if (EltSizeInBits == 8 && !IsSplatAmt) {
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
return SDValue();
// We don't need ModuloAmt here as we just peek at individual bits.
MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
if (Subtarget.hasSSE41()) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
}
// On pre-SSE41 targets we test for the sign bit by comparing to
// zero - a negative value will set all bits of the lanes to true
// and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue Z = DAG.getConstant(0, DL, SelVT);
SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
return DAG.getSelect(DL, SelVT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
// We can safely do this using i16 shifts as we're only interested in
// the 3 lower bits of each byte.
Amt = DAG.getBitcast(ExtVT, Amt);
Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
Amt = DAG.getBitcast(VT, Amt);
// r = VSELECT(r, rot(r, 4), a);
SDValue M;
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
// r = VSELECT(r, rot(r, 2), a);
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
// return VSELECT(r, rot(r, 1), a);
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
return SignBitSelect(VT, Amt, M, R);
}
// ISD::ROT* uses modulo rotate amounts.
Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
DAG.getConstant(EltSizeInBits - 1, DL, VT));
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
// Fallback for splats + all supported variable shifts.
// Fallback for non-constants AVX2 vXi16 as well.
if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
// As with shifts, convert the rotation amount to a multiplication factor.
SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
assert(Scale && "Failed to convert ROTL amount to scale");
// v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
if (EltSizeInBits == 16) {
SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
}
// v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
// to v2i64 results at a time. The upper 32-bits contain the wrapped bits
// that can then be OR'd with the lower 32-bits.
assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
static const int OddMask[] = {1, -1, 3, -1};
SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, R),
DAG.getBitcast(MVT::v2i64, Scale));
SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, R13),
DAG.getBitcast(MVT::v2i64, Scale13));
Res02 = DAG.getBitcast(VT, Res02);
Res13 = DAG.getBitcast(VT, Res13);
return DAG.getNode(ISD::OR, DL, VT,
DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
}
/// Returns true if the operand type is exactly twice the native width, and
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128)
return Subtarget.hasCmpxchg16b();
else
return false;
}
bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return needsCmpXchgNb(SI->getValueOperand()->getType());
}
// Note: this turns large loads into lock cmpxchg8b/16b.
// FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
auto PTy = cast<PointerType>(LI->getPointerOperandType());
return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
// and default to library calls otherwise.
if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
AtomicRMWInst::BinOp Op = AI->getOperation();
switch (Op) {
default:
llvm_unreachable("Unknown atomic operation");
case AtomicRMWInst::Xchg:
case AtomicRMWInst::Add:
case AtomicRMWInst::Sub:
// It's better to use xadd, xsub or xchg for these in all cases.
return AtomicExpansionKind::None;
case AtomicRMWInst::Or:
case AtomicRMWInst::And:
case AtomicRMWInst::Xor:
// If the atomicrmw's result isn't actually used, we can just add a "lock"
// prefix to a normal instruction for these operations.
return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
case AtomicRMWInst::Nand:
case AtomicRMWInst::Max:
case AtomicRMWInst::Min:
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
// These always require a non-trivial set of data operations on x86. We must
// use a cmpxchg loop.
return AtomicExpansionKind::CmpXChg;
}
}
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
// harmful as it introduces a mfence.
if (MemType->getPrimitiveSizeInBits() > NativeWidth)
return nullptr;
auto Builder = IRBuilder<>(AI);
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
auto SSID = AI->getSyncScopeID();
// We must restrict the ordering to avoid generating loads with Release or
// ReleaseAcquire orderings.
auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
auto Ptr = AI->getPointerOperand();
// Before the load we need a fence. Here is an example lifted from
// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
// is required:
// Thread 0:
// x.store(1, relaxed);
// r1 = y.fetch_add(0, release);
// Thread 1:
// y.fetch_add(42, acquire);
// r2 = x.load(relaxed);
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
// FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
if (SSID == SyncScope::SingleThread)
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
if (!Subtarget.hasMFence())
// FIXME: it might make sense to use a locked operation here but on a
// different cache-line to prevent cache-line bouncing. In practice it
// is probably a small win, and x86 processors without mfence are rare
// enough that we do not bother.
return nullptr;
Function *MFence =
llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
Builder.CreateCall(MFence, {});
// Finally we can emit the atomic load.
LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
AI->getType()->getPrimitiveSizeInBits());
Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
return Loaded;
}
static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
FenceSSID == SyncScope::System) {
if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
SDValue Zero = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Ops[] = {
DAG.getRegister(X86::ESP, MVT::i32), // Base
DAG.getTargetConstant(1, dl, MVT::i8), // Scale
DAG.getRegister(0, MVT::i32), // Index
DAG.getTargetConstant(0, dl, MVT::i32), // Disp
DAG.getRegister(0, MVT::i32), // Segment.
Zero,
Chain
};
SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}
static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT T = Op.getSimpleValueType();
SDLoc DL(Op);
unsigned Reg = 0;
unsigned size = 0;
switch(T.SimpleTy) {
default: llvm_unreachable("Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
case MVT::i64:
assert(Subtarget.is64Bit() && "Node not type legal!");
Reg = X86::RAX; size = 8;
break;
}
SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
Op.getOperand(2), SDValue());
SDValue Ops[] = { cpIn.getValue(0),
Op.getOperand(1),
Op.getOperand(3),
DAG.getTargetConstant(size, DL, MVT::i8),
cpIn.getValue(1) };
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
Ops, T, MMO);
SDValue cpOut =
DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
MVT::i32, cpOut.getValue(2));
SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
return SDValue();
}
// Create MOVMSKB, taking into account whether we need to split for AVX1.
static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT InVT = V.getSimpleValueType();
if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
DAG.getConstant(16, DL, MVT::i8));
return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
}
return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
}
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
// Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
// half to v32i1 and concatenating the result.
if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
assert(Subtarget.hasBWI() && "Expected BWI target");
SDLoc dl(Op);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(0, dl));
Lo = DAG.getBitcast(MVT::v32i1, Lo);
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(1, dl));
Hi = DAG.getBitcast(MVT::v32i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
}
// Custom splitting for BWI types when AVX512F is available but BWI isn't.
if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
SDLoc dl(Op);
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
EVT CastVT = MVT::getVectorVT(DstVT.getVectorElementType(),
DstVT.getVectorNumElements() / 2);
Lo = DAG.getBitcast(CastVT, Lo);
Hi = DAG.getBitcast(CastVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
}
// Use MOVMSK for vector to scalar conversion to prevent scalarization.
if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
SDLoc DL(Op);
SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
V = getPMOVMSKB(DL, V, DAG, Subtarget);
return DAG.getZExtOrTrunc(V, DL, DstVT);
}
if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
SrcVT == MVT::i64) {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64 && DstVT != MVT::i64 &&
!(DstVT == MVT::x86mmx && SrcVT.isVector()))
// This conversion needs to be expanded.
return SDValue();
SDLoc dl(Op);
if (SrcVT.isVector()) {
// Widen the vector in input in the case of MVT::v2i32.
// Example: from MVT::v2i32 to MVT::v4i32.
MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
SrcVT.getVectorNumElements() * 2);
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
DAG.getUNDEF(SrcVT));
} else {
assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST");
Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
}
MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
if (DstVT == MVT::x86mmx)
return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
DAG.getIntPtrConstant(0, dl));
}
assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
Subtarget.hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BITCAST");
// i64 <=> MMX conversions are Legal.
if (SrcVT==MVT::i64 && DstVT.isVector())
return Op;
if (DstVT==MVT::i64 && SrcVT.isVector())
return Op;
// MMX <=> MMX conversions are Legal.
if (SrcVT.isVector() && DstVT.isVector())
return Op;
// All other conversions need to be expanded.
return SDValue();
}
/// Compute the horizontal sum of bytes in V for the elements of VT.
///
/// Requires V to be a byte vector and VT to be an integer vector type with
/// wider elements than V's type. The width of the elements of VT determines
/// how many bytes of V are summed horizontally to produce each element of the
/// result.
static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(V);
MVT ByteVecVT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
"Expected value to have byte element type.");
assert(EltVT != MVT::i8 &&
"Horizontal byte sum only makes sense for wider elements!");
unsigned VecSize = VT.getSizeInBits();
assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
// PSADBW instruction horizontally add all bytes and leave the result in i64
// chunks, thus directly computes the pop count for v2i64 and v4i64.
if (EltVT == MVT::i64) {
SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
return DAG.getBitcast(VT, V);
}
if (EltVT == MVT::i32) {
// We unpack the low half and high half into i32s interleaved with zeros so
// that we can use PSADBW to horizontally sum them. The most useful part of
// this is that it lines up the results of two PSADBW instructions to be
// two v2i64 vectors which concatenated are the 4 population counts. We can
// then use PACKUSWB to shrink and concatenate them into a v4i32 again.
SDValue Zeros = DAG.getConstant(0, DL, VT);
SDValue V32 = DAG.getBitcast(VT, V);
SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
// Do the horizontal sums into two v2i64s.
Zeros = DAG.getConstant(0, DL, ByteVecVT);
MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
DAG.getBitcast(ByteVecVT, Low), Zeros);
High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
DAG.getBitcast(ByteVecVT, High), Zeros);
// Merge them together.
MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
DAG.getBitcast(ShortVecVT, Low),
DAG.getBitcast(ShortVecVT, High));
return DAG.getBitcast(VT, V);
}
// The only element type left is i16.
assert(EltVT == MVT::i16 && "Unknown how to handle type");
// To obtain pop count for each i16 element starting from the pop count for
// i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
// right by 8. It is important to shift as i16s as i8 vector shift isn't
// directly supported.
SDValue ShifterV = DAG.getConstant(8, DL, VT);
SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
DAG.getBitcast(ByteVecVT, V));
return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
}
static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
int NumElts = VT.getVectorNumElements();
(void)EltVT;
assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
// Implement a lookup table in register by using an algorithm based on:
// http://wm.ite.pl/articles/sse-popcount.html
//
// The general idea is that every lower byte nibble in the input vector is an
// index into a in-register pre-computed pop count table. We then split up the
// input vector in two new ones: (1) a vector with only the shifted-right
// higher nibbles for each byte and (2) a vector with the lower nibbles (and
// masked out higher ones) for each byte. PSHUFB is used separately with both
// to index the in-register table. Next, both are added and the result is a
// i8 vector where each element contains the pop count for input byte.
const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
/* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
/* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
/* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumElts; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
SDValue M0F = DAG.getConstant(0x0F, DL, VT);
// High nibbles
SDValue FourV = DAG.getConstant(4, DL, VT);
SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
// Low nibbles
SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
// The input vector is used as the shuffle mask that index elements into the
// LUT. After counting low and high nibbles, add the vector to obtain the
// final pop count per i8 element.
SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
}
// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
"Unknown CTPOP type to handle");
SDLoc DL(Op.getNode());
SDValue Op0 = Op.getOperand(0);
// TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
if (Subtarget.hasVPOPCNTDQ()) {
unsigned NumElems = VT.getVectorNumElements();
assert((VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16) && "Unexpected type");
if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
}
}
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Decompose 512-bit ops into smaller 256-bit ops.
if (VT.is512BitVector() && !Subtarget.hasBWI())
return Lower512IntUnary(Op, DAG);
// For element types greater than i8, do vXi8 pop counts and a bytesum.
if (VT.getScalarType() != MVT::i8) {
MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
}
// We can't use the fast LUT approach, so fall back on LegalizeDAG.
if (!Subtarget.hasSSSE3())
return SDValue();
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().isVector() &&
"We only do custom lowering for vector population count.");
return LowerVectorCTPOP(Op, Subtarget, DAG);
}
static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
SDLoc DL(Op);
// For scalars, its still beneficial to transfer to/from the SIMD unit to
// perform the BITREVERSE.
if (!VT.isVector()) {
MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
}
int NumElts = VT.getVectorNumElements();
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector())
return Lower256IntUnary(Op, DAG);
assert(VT.is128BitVector() &&
"Only 128-bit vector bitreverse lowering supported.");
// VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
// perform the BSWAP in the shuffle.
// Its best to shuffle using the second operand as this will implicitly allow
// memory folding for multiple vectors.
SmallVector<SDValue, 16> MaskElts;
for (int i = 0; i != NumElts; ++i) {
for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
int PermuteByte = SourceByte | (2 << 5);
MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
}
}
SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
SDValue Res = DAG.getBitcast(MVT::v16i8, In);
Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
Res, Mask);
return DAG.getBitcast(VT, Res);
}
static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (Subtarget.hasXOP() && !VT.is512BitVector())
return LowerBITREVERSE_XOP(Op, DAG);
assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
SDValue In = Op.getOperand(0);
SDLoc DL(Op);
unsigned NumElts = VT.getVectorNumElements();
assert(VT.getScalarType() == MVT::i8 &&
"Only byte vector BITREVERSE supported");
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Perform BITREVERSE using PSHUFB lookups. Each byte is split into
// two nibbles and a PSHUFB lookup to find the bitreverse of each
// 0-15 value (moved to the other nibble).
SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
const int LoLUT[16] = {
/* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
/* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
/* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
/* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
const int HiLUT[16] = {
/* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
/* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
/* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
/* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
for (unsigned i = 0; i < NumElts; ++i) {
LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
}
SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
}
static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
unsigned NewOpc = 0;
switch (N->getOpcode()) {
case ISD::ATOMIC_LOAD_ADD:
NewOpc = X86ISD::LADD;
break;
case ISD::ATOMIC_LOAD_SUB:
NewOpc = X86ISD::LSUB;
break;
case ISD::ATOMIC_LOAD_OR:
NewOpc = X86ISD::LOR;
break;
case ISD::ATOMIC_LOAD_XOR:
NewOpc = X86ISD::LXOR;
break;
case ISD::ATOMIC_LOAD_AND:
NewOpc = X86ISD::LAND;
break;
default:
llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
}
MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
return DAG.getMemIntrinsicNode(
NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
{N->getOperand(0), N->getOperand(1), N->getOperand(2)},
/*MemVT=*/N->getSimpleValueType(0), MMO);
}
/// Lower atomic_load_ops into LOCK-prefixed operations.
static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Chain = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
unsigned Opc = N->getOpcode();
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
// We can lower atomic_load_add into LXADD. However, any other atomicrmw op
// can only be lowered when the result is unused. They should have already
// been transformed into a cmpxchg loop in AtomicExpand.
if (N->hasAnyUseOfValue(0)) {
// Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
// select LXADD if LOCK_SUB can't be selected.
if (Opc == ISD::ATOMIC_LOAD_SUB) {
AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
RHS, AN->getMemOperand());
}
assert(Opc == ISD::ATOMIC_LOAD_ADD &&
"Used AtomicRMW ops other than Add should have been expanded!");
return N;
}
SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
// RAUW the chain, but don't worry about the result, as it's unused.
assert(!N->hasAnyUseOfValue(0));
DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
return SDValue();
}
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
SDLoc dl(Node);
EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
// Convert seq_cst store -> xchg
// Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
// FIXME: On 32-bit, store -> fist or movq would be more efficient
// (The only way to get a 16-byte store is cmpxchg16b)
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
if (cast<AtomicSDNode>(Node)->getOrdering() ==
AtomicOrdering::SequentiallyConsistent ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
Node->getOperand(1), Node->getOperand(2),
cast<AtomicSDNode>(Node)->getMemOperand());
return Swap.getValue(1);
}
// Other atomic stores have a simple pattern.
return Op;
}
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
SDNode *N = Op.getNode();
MVT VT = N->getSimpleValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
SDLoc DL(N);
// Set the carry flag.
SDValue Carry = Op.getOperand(2);
EVT CarryVT = Carry.getValueType();
APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
Carry, DAG.getConstant(NegOne, DL, CarryVT));
unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
Op.getOperand(1), Carry.getValue(1));
SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
if (N->getValueType(1) == MVT::i1)
SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
// For MacOSX, we want to call an alternative entry point: __sincos_stret,
// which returns the values as { float, float } (in XMM0) or
// { double, double } (which is returned in XMM0, XMM1).
SDLoc dl(Op);
SDValue Arg = Op.getOperand(0);
EVT ArgVT = Arg.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Arg;
Entry.Ty = ArgTy;
Entry.IsSExt = false;
Entry.IsZExt = false;
Args.push_back(Entry);
bool isF64 = ArgVT == MVT::f64;
// Only optimize x86_64 for now. i386 is a bit messy. For f32,
// the small struct {f32, f32} is returned in (eax, edx). For f64,
// the results are returned via SRet in memory.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
const char *LibcallName = TLI.getLibcallName(LC);
SDValue Callee =
DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
: (Type *)VectorType::get(ArgTy, 4);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(DAG.getEntryNode())
.setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
if (isF64)
// Returned in xmm0 and xmm1.
return CallResult.first;
// Returned in bits 0:31 and 32:64 xmm0.
SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
CallResult.first, DAG.getIntPtrConstant(0, dl));
SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
CallResult.first, DAG.getIntPtrConstant(1, dl));
SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
}
/// Widen a vector input to a vector of NVT. The
/// input vector must have the same element type as NVT.
static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
bool FillWithZeroes = false) {
// Check if InOp already has the right width.
MVT InVT = InOp.getSimpleValueType();
if (InVT == NVT)
return InOp;
if (InOp.isUndef())
return DAG.getUNDEF(NVT);
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match");
unsigned InNumElts = InVT.getVectorNumElements();
unsigned WidenNumElts = NVT.getVectorNumElements();
assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
"Unexpected request for vector widening");
SDLoc dl(InOp);
if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
InOp.getNumOperands() == 2) {
SDValue N1 = InOp.getOperand(1);
if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
N1.isUndef()) {
InOp = InOp.getOperand(0);
InVT = InOp.getSimpleValueType();
InNumElts = InVT.getVectorNumElements();
}
}
if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0; i < InNumElts; ++i)
Ops.push_back(InOp.getOperand(i));
EVT EltVT = InOp.getOperand(0).getValueType();
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
Ops.push_back(FillVal);
return DAG.getBuildVector(NVT, dl, Ops);
}
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
DAG.getUNDEF(NVT);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
InOp, DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
SDValue Src = N->getValue();
MVT VT = Src.getSimpleValueType();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
SDLoc dl(Op);
SDValue Scale = N->getScale();
SDValue Index = N->getIndex();
SDValue Mask = N->getMask();
SDValue Chain = N->getChain();
SDValue BasePtr = N->getBasePtr();
if (VT == MVT::v2f32) {
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
// If the index is v2i64 and we have VLX we can use xmm for data and index.
if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32));
SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
return SDValue();
}
if (VT == MVT::v2i32) {
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
DAG.getUNDEF(MVT::v2i32));
// If the index is v2i64 and we have VLX we can use xmm for data and index.
if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
// Custom widen all the operands to avoid promotion.
EVT NewIndexVT = EVT::getVectorVT(
*DAG.getContext(), Index.getValueType().getVectorElementType(), 4);
Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
DAG.getUNDEF(Index.getValueType()));
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getConstant(0, dl, MVT::v2i1));
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(), dl,
Ops, N->getMemOperand());
}
MVT IndexVT = Index.getSimpleValueType();
MVT MaskVT = Mask.getSimpleValueType();
// If the index is v2i32, we're being called by type legalization and we
// should just let the default handling take care of it.
if (IndexVT == MVT::v2i32)
return SDValue();
// If we don't have VLX and neither the passthru or index is 512-bits, we
// need to widen until one is.
if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// Determine how much we need to widen by to get a 512-bit type.
unsigned Factor = std::min(512/VT.getSizeInBits(),
512/IndexVT.getSizeInBits());
unsigned NumElts = VT.getVectorNumElements() * Factor;
VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
Src = ExtendToType(Src, VT, DAG);
Index = ExtendToType(Index, IndexVT, DAG);
Mask = ExtendToType(Mask, MaskVT, DAG, true);
}
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
MVT VT = Op.getSimpleValueType();
MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!");
assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
"Expanding masked load is supported for 32 and 64-bit types only!");
assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
"Cannot lower masked load op.");
assert((ScalarVT.getSizeInBits() >= 32 ||
(Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
"Unsupported masked load op.");
// This operation is legal for targets with VLX, but without
// VLX the vector should be widened to 512 bit
unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
SDValue PassThru = ExtendToType(N->getPassThru(), WideDataVT, DAG);
// Mask element has to be i1.
assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type");
MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
N->getBasePtr(), Mask, PassThru,
N->getMemoryVT(), N->getMemOperand(),
N->getExtensionType(),
N->isExpandingLoad());
SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
NewLoad.getValue(0),
DAG.getIntPtrConstant(0, dl));
SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
return DAG.getMergeValues(RetOps, dl);
}
static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
SDValue DataToStore = N->getValue();
MVT VT = DataToStore.getSimpleValueType();
MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!");
assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
"Expanding masked load is supported for 32 and 64-bit types only!");
assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
"Cannot lower masked store op.");
assert((ScalarVT.getSizeInBits() >= 32 ||
(Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
"Unsupported masked store op.");
// This operation is legal for targets with VLX, but without
// VLX the vector should be widened to 512 bit
unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
// Mask element has to be i1.
assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type");
MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
Mask, N->getMemoryVT(), N->getMemOperand(),
N->isTruncatingStore(), N->isCompressingStore());
}
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX2() &&
"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
SDValue Index = N->getIndex();
SDValue Mask = N->getMask();
SDValue PassThru = N->getPassThru();
MVT IndexVT = Index.getSimpleValueType();
MVT MaskVT = Mask.getSimpleValueType();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
// If the index is v2i32, we're being called by type legalization.
if (IndexVT == MVT::v2i32)
return SDValue();
// If we don't have VLX and neither the passthru or index is 512-bits, we
// need to widen until one is.
MVT OrigVT = VT;
if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
!IndexVT.is512BitVector()) {
// Determine how much we need to widen by to get a 512-bit type.
unsigned Factor = std::min(512/VT.getSizeInBits(),
512/IndexVT.getSizeInBits());
unsigned NumElts = VT.getVectorNumElements() * Factor;
VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
PassThru = ExtendToType(PassThru, VT, DAG);
Index = ExtendToType(Index, IndexVT, DAG);
Mask = ExtendToType(Mask, MaskVT, DAG, true);
}
SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
N->getScale() };
SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
N->getMemOperand());
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
NewGather, DAG.getIntPtrConstant(0, dl));
return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
}
SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
SelectionDAG &DAG) const {
// TODO: Eventually, the lowering of these nodes should be informed by or
// deferred to the GC strategy for the function in which they appear. For
// now, however, they must be lowered to something. Since they are logically
// no-ops in the case of a null GC strategy (or a GC strategy which does not
// require special handling for these nodes), lower them as literal NOOPs for
// the time being.
SmallVector<SDValue, 2> Ops;
Ops.push_back(Op.getOperand(0));
if (Op->getGluedNode())
Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
SDLoc OpDL(Op);
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
return NOOP;
}
SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
SelectionDAG &DAG) const {
// TODO: Eventually, the lowering of these nodes should be informed by or
// deferred to the GC strategy for the function in which they appear. For
// now, however, they must be lowered to something. Since they are logically
// no-ops in the case of a null GC strategy (or a GC strategy which does not
// require special handling for these nodes), lower them as literal NOOPs for
// the time being.
SmallVector<SDValue, 2> Ops;
Ops.push_back(Op.getOperand(0));
if (Op->getGluedNode())
Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
SDLoc OpDL(Op);
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
return NOOP;
}
/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
case ISD::VSELECT: return LowerVSELECT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::SHL_PARTS:
case ISD::SRA_PARTS:
case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
case ISD::FSHL:
case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
case ISD::ZERO_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
case ISD::FADD:
case ISD::FSUB: return lowerFaddFsub(Op, DAG, Subtarget);
case ISD::FABS:
case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::FRAME_TO_ARGS_OFFSET:
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
case ISD::EH_SJLJ_SETUP_DISPATCH:
return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
case ISD::MULHS:
case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
case ISD::ROTL:
case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
case ISD::SRA:
case ISD::SRL:
case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
case ISD::USUBO:
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
case ISD::ADDCARRY:
case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
case ISD::ADD:
case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
case ISD::UADDSAT:
case ISD::SADDSAT:
case ISD::USUBSAT:
case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG);
case ISD::SMAX:
case ISD::SMIN:
case ISD::UMAX:
case ISD::UMIN: return LowerMINMAX(Op, DAG);
case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
case ISD::GC_TRANSITION_START:
return LowerGC_TRANSITION_START(Op, DAG);
case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
}
}
/// Places new result values for the node in Results (their number
/// and types must exactly match those of the original return values of
/// the node), or leaves Results empty, which indicates that the node is not
/// to be custom lowered after all.
void X86TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
if (!Res.getNode())
return;
assert((N->getNumValues() <= Res->getNumValues()) &&
"Lowering returned the wrong number of results!");
// Places new result values base on N result number.
// In some cases (LowerSINT_TO_FP for example) Res has more result values
// than original node, chain should be dropped(last value).
for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
Results.push_back(Res.getValue(I));
}
/// Replace a node with an illegal result type with a new node built out of
/// custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
SDLoc dl(N);
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
case ISD::MUL: {
EVT VT = N->getValueType(0);
assert(VT.isVector() && "Unexpected VT");
if (getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger &&
VT.getVectorNumElements() == 2) {
// Promote to a pattern that will be turned into PMULUDQ.
SDValue N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
N->getOperand(0));
SDValue N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
N->getOperand(1));
SDValue Mul = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, N0, N1);
Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, VT, Mul));
} else if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
VT.getVectorElementType() == MVT::i8) {
// Pre-promote these to vXi16 to avoid op legalization thinking all 16
// elements are needed.
MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
unsigned NumConcats = 16 / VT.getVectorNumElements();
SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
ConcatOps[0] = Res;
Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
Results.push_back(Res);
}
return;
}
case ISD::UADDSAT:
case ISD::SADDSAT:
case ISD::USUBSAT:
case ISD::SSUBSAT:
case X86ISD::VPMADDWD:
case X86ISD::AVG: {
// Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
// X86ISD::AVG/VPMADDWD by widening.
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT VT = N->getValueType(0);
EVT InVT = N->getOperand(0).getValueType();
assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
"Expected a VT that divides into 128 bits.");
unsigned NumConcat = 128 / InVT.getSizeInBits();
EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(),
NumConcat * InVT.getVectorNumElements());
EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
VT.getVectorElementType(),
NumConcat * VT.getVectorNumElements());
SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
Ops[0] = N->getOperand(0);
SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
Ops[0] = N->getOperand(1);
SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
case ISD::SETCC: {
// Widen v2i32 (setcc v2f32). This is really needed for AVX512VL when
// setCC result type is v2i1 because type legalzation will end up with
// a v4i1 setcc plus an extend.
assert(N->getValueType(0) == MVT::v2i32 && "Unexpected type");
if (N->getOperand(0).getValueType() != MVT::v2f32 ||
getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector)
return;
SDValue UNDEF = DAG.getUNDEF(MVT::v2f32);
SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(0), UNDEF);
SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(1), UNDEF);
SDValue Res = DAG.getNode(ISD::SETCC, dl, MVT::v4i32, LHS, RHS,
N->getOperand(2));
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
// We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
case X86ISD::FMINC:
case X86ISD::FMIN:
case X86ISD::FMAXC:
case X86ISD::FMAX: {
EVT VT = N->getValueType(0);
assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
SDValue UNDEF = DAG.getUNDEF(VT);
SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(0), UNDEF);
SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(1), UNDEF);
Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
return;
}
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM: {
EVT VT = N->getValueType(0);
if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector) {
// If this RHS is a constant splat vector we can widen this and let
// division/remainder by constant optimize it.
// TODO: Can we do something for non-splat?
APInt SplatVal;
if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
unsigned NumConcats = 128 / VT.getSizeInBits();
SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
Ops0[0] = N->getOperand(0);
EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
Results.push_back(Res);
}
return;
}
if (VT == MVT::v2i32) {
// Legalize v2i32 div/rem by unrolling. Otherwise we promote to the
// v2i64 and unroll later. But then we create i64 scalar ops which
// might be slow in 64-bit mode or require a libcall in 32-bit mode.
Results.push_back(DAG.UnrollVectorOp(N));
return;
}
if (VT.isVector())
return;
LLVM_FALLTHROUGH;
}
case ISD::SDIVREM:
case ISD::UDIVREM: {
SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
Results.push_back(V);
return;
}
case ISD::TRUNCATE: {
MVT VT = N->getSimpleValueType(0);
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
return;
// The generic legalizer will try to widen the input type to the same
// number of elements as the widened result type. But this isn't always
// the best thing so do some custom legalization to avoid some cases.
MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
unsigned InBits = InVT.getSizeInBits();
if (128 % InBits == 0) {
// 128 bit and smaller inputs should avoid truncate all together and
// just use a build_vector that will become a shuffle.
// TODO: Widen and use a shuffle directly?
MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
EVT EltVT = VT.getVectorElementType();
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
// Use the original element count so we don't do more scalar opts than
// necessary.
unsigned MinElts = VT.getVectorNumElements();
for (unsigned i=0; i < MinElts; ++i) {
SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
DAG.getIntPtrConstant(i, dl));
Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
}
Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
return;
}
// With AVX512 there are some cases that can use a target specific
// truncate node to go from 256/512 to less than 128 with zeros in the
// upper elements of the 128 bit result.
if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
// We can use VTRUNC directly if for 256 bits with VLX or for any 512.
if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
return;
}
// There's one case we can widen to 512 bits and use VTRUNC.
if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
DAG.getUNDEF(MVT::v4i64));
Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
return;
}
}
return;
}
case ISD::SIGN_EXTEND_VECTOR_INREG: {
if (ExperimentalVectorWideningLegalization)
return;
EVT VT = N->getValueType(0);
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
(InVT == MVT::v16i16 || InVT == MVT::v32i8)) {
// Custom split this so we can extend i8/i16->i32 invec. This is better
// since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
// sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
// we allow the sra from the extend to i32 to be shared by the split.
EVT ExtractVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(),
InVT.getVectorNumElements() / 2);
MVT ExtendVT = MVT::getVectorVT(MVT::i32,
VT.getVectorNumElements());
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExtractVT,
In, DAG.getIntPtrConstant(0, dl));
In = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, MVT::v4i32, In);
// Fill a vector with sign bits for each element.
SDValue Zero = DAG.getConstant(0, dl, ExtendVT);
SDValue SignBits = DAG.getSetCC(dl, ExtendVT, Zero, In, ISD::SETGT);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Create an unpackl and unpackh to interleave the sign bits then bitcast
// to vXi64.
SDValue Lo = getUnpackl(DAG, dl, ExtendVT, In, SignBits);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
SDValue Hi = getUnpackh(DAG, dl, ExtendVT, In, SignBits);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
Results.push_back(Res);
return;
}
return;
}
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND: {
if (!ExperimentalVectorWideningLegalization)
return;
EVT VT = N->getValueType(0);
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
(InVT == MVT::v4i16 || InVT == MVT::v4i8)) {
// Custom split this so we can extend i8/i16->i32 invec. This is better
// since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
// sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
// we allow the sra from the extend to i32 to be shared by the split.
In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
// Fill a vector with sign bits for each element.
SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
// Create an unpackl and unpackh to interleave the sign bits then bitcast
// to v2i64.
SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
{0, 4, 1, 5});
Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
{2, 6, 3, 7});
Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
Results.push_back(Res);
return;
}
if ((VT == MVT::v16i32 || VT == MVT::v8i64) && InVT.is128BitVector()) {
// Perform custom splitting instead of the two stage extend we would get
// by default.
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
assert(isTypeLegal(LoVT) && "Split VT not legal?");
bool IsSigned = N->getOpcode() == ISD::SIGN_EXTEND;
SDValue Lo = getExtendInVec(IsSigned, dl, LoVT, In, DAG);
// We need to shift the input over by half the number of elements.
unsigned NumElts = InVT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
for (unsigned i = 0; i != HalfNumElts; ++i)
ShufMask[i] = i + HalfNumElts;
SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
Hi = getExtendInVec(IsSigned, dl, HiVT, Hi, DAG);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
Results.push_back(Res);
}
return;
}
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
// Promote these manually to avoid over promotion to v2i64. Type
// legalization will revisit the v2i32 operation for more cleanup.
if ((VT == MVT::v2i8 || VT == MVT::v2i16) &&
getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger) {
// AVX512DQ provides instructions that produce a v2i64 result.
if (Subtarget.hasDQI())
return;
SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v2i32, Src);
Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
: ISD::AssertSext,
dl, MVT::v2i32, Res,
DAG.getValueType(VT.getVectorElementType()));
Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
Results.push_back(Res);
return;
}
if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
return;
// Try to create a 128 bit vector, but don't exceed a 32 bit element.
unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
VT.getVectorNumElements());
SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
// Preserve what we know about the size of the original result. Except
// when the result is v2i32 since we can't widen the assert.
if (PromoteVT != MVT::v2i32)
Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
: ISD::AssertSext,
dl, PromoteVT, Res,
DAG.getValueType(VT.getVectorElementType()));
// Truncate back to the original width.
Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
// Now widen to 128 bits.
unsigned NumConcats = 128 / VT.getSizeInBits();
MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
VT.getVectorNumElements() * NumConcats);
SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
ConcatOps[0] = Res;
Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
Results.push_back(Res);
return;
}
if (VT == MVT::v2i32) {
assert((IsSigned || Subtarget.hasAVX512()) &&
"Can only handle signed conversion without AVX512");
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
bool Widenv2i32 =
getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector;
if (Src.getValueType() == MVT::v2f64) {
unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
if (!IsSigned && !Subtarget.hasVLX()) {
// If v2i32 is widened, we can defer to the generic legalizer.
if (Widenv2i32)
return;
// Custom widen by doubling to a legal vector with. Isel will
// further widen to v8f64.
Opc = ISD::FP_TO_UINT;
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64,
Src, DAG.getUNDEF(MVT::v2f64));
}
SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
if (!Widenv2i32)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
if (SrcVT == MVT::v2f32 &&
getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
SDValue Idx = DAG.getIntPtrConstant(0, dl);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32));
Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT
: ISD::FP_TO_UINT, dl, MVT::v4i32, Res);
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
Results.push_back(Res);
return;
}
// The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
// so early out here.
return;
}
if (Subtarget.hasDQI() && VT == MVT::i64 &&
(SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
assert(!Subtarget.is64Bit() && "i64 should be legal");
unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
// Using a 256-bit input here to guarantee 128-bit input for f32 case.
// TODO: Use 128-bit vectors for f64 case?
// TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
DAG.getConstantFP(0.0, dl, VecInVT), Src,
ZeroIdx);
Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
Results.push_back(Res);
return;
}
std::pair<SDValue,SDValue> Vals =
FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
if (FIST.getNode()) {
// Return a load from the stack slot.
if (StackSlot.getNode())
Results.push_back(
DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
else
Results.push_back(FIST);
}
return;
}
case ISD::SINT_TO_FP: {
assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
SDValue Src = N->getOperand(0);
if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
return;
Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
return;
}
case ISD::UINT_TO_FP: {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT VT = N->getValueType(0);
if (VT != MVT::v2f32)
return;
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
return;
}
if (SrcVT != MVT::v2i32)
return;
SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
SDValue VBias =
DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
DAG.getBitcast(MVT::v2i64, VBias));
Or = DAG.getBitcast(MVT::v2f64, Or);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
return;
}
case ISD::FP_ROUND: {
if (!isTypeLegal(N->getOperand(0).getValueType()))
return;
SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
Results.push_back(V);
return;
}
case ISD::FP_EXTEND: {
// Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
// No other ValueType for FP_EXTEND should reach this point.
assert(N->getValueType(0) == MVT::v2f32 &&
"Do not know how to legalize this Node");
return;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IntNo) {
default : llvm_unreachable("Do not know how to custom type "
"legalize this intrinsic operation!");
case Intrinsic::x86_rdtsc:
return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
case Intrinsic::x86_rdtscp:
return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
Results);
case Intrinsic::x86_rdpmc:
return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
case Intrinsic::x86_xgetbv:
return getExtendedControlRegister(N, dl, DAG, Subtarget, Results);
}
}
case ISD::INTRINSIC_WO_CHAIN: {
if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG))
Results.push_back(V);
return;
}
case ISD::READCYCLECOUNTER: {
return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
}
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
EVT T = N->getValueType(0);
assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
bool Regs64bit = T == MVT::i128;
MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
SDValue cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
DAG.getConstant(0, dl, HalfT));
cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
DAG.getConstant(1, dl, HalfT));
cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
cpInL, SDValue());
cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
Regs64bit ? X86::RDX : X86::EDX,
cpInH, cpInL.getValue(1));
SDValue swapInL, swapInH;
swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(0, dl, HalfT));
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(1, dl, HalfT));
swapInH =
DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
swapInH, cpInH.getValue(1));
// If the current function needs the base pointer, RBX,
// we shouldn't use cmpxchg directly.
// Indeed the lowering of that instruction will clobber
// that register and since RBX will be a reserved register
// the register allocator will not make sure its value will
// be properly saved and restored around this live-range.
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
SDValue Result;
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
unsigned BasePtr = TRI->getBaseRegister();
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
(BasePtr == X86::RBX || BasePtr == X86::EBX)) {
// ISel prefers the LCMPXCHG64 variant.
// If that assert breaks, that means it is not the case anymore,
// and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
// not just EBX. This is a matter of accepting i64 input for that
// pseudo, and restoring into the register of the right wide
// in expand pseudo. Everything else should just work.
assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
"Saving only half of the RBX");
unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
: X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
Regs64bit ? X86::RBX : X86::EBX,
HalfT, swapInH.getValue(1));
SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
RBXSave,
/*Glue*/ RBXSave.getValue(2)};
Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
} else {
unsigned Opcode =
Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
Regs64bit ? X86::RBX : X86::EBX, swapInL,
swapInH.getValue(1));
SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
swapInL.getValue(1)};
Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
}
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
HalfT, Result.getValue(1));
SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
Regs64bit ? X86::RDX : X86::EDX,
HalfT, cpOutL.getValue(2));
SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
MVT::i32, cpOutH.getValue(2));
SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
Results.push_back(Success);
Results.push_back(EFLAGS.getValue(1));
return;
}
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_LOAD: {
// Delegate to generic TypeLegalization. Situations we can really handle
// should have already been dealt with by AtomicExpandPass.cpp.
break;
}
case ISD::BITCAST: {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0).getValueType();
// If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
// we can split using the k-register rather than memory.
if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
Lo = DAG.getBitcast(MVT::i32, Lo);
Hi = DAG.getBitcast(MVT::i32, Hi);
SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
Results.push_back(Res);
return;
}
// Custom splitting for BWI types when AVX512F is available but BWI isn't.
if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
SrcVT.isVector() && isTypeLegal(SrcVT)) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
Lo = DAG.getBitcast(CastVT, Lo);
Hi = DAG.getBitcast(CastVT, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
Results.push_back(Res);
return;
}
if (SrcVT != MVT::f64 ||
(DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8) ||
getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector)
return;
unsigned NumElts = DstVT.getVectorNumElements();
EVT SVT = DstVT.getVectorElementType();
EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue Res;
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, N->getOperand(0));
Res = DAG.getBitcast(WiderVT, Res);
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
case ISD::MGATHER: {
EVT VT = N->getValueType(0);
if (VT == MVT::v2f32 && (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
auto *Gather = cast<MaskedGatherSDNode>(N);
SDValue Index = Gather->getIndex();
if (Index.getValueType() != MVT::v2i64)
return;
SDValue Mask = Gather->getMask();
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
Gather->getPassThru(),
DAG.getUNDEF(MVT::v2f32));
if (!Subtarget.hasVLX()) {
// We need to widen the mask, but the instruction will only use 2
// of its elements. So we can use undef.
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getUNDEF(MVT::v2i1));
Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
}
SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
Gather->getBasePtr(), Index, Gather->getScale() };
SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(MVT::v4f32, Mask.getValueType(), MVT::Other), Ops, dl,
Gather->getMemoryVT(), Gather->getMemOperand());
Results.push_back(Res);
Results.push_back(Res.getValue(2));
return;
}
if (VT == MVT::v2i32) {
auto *Gather = cast<MaskedGatherSDNode>(N);
SDValue Index = Gather->getIndex();
SDValue Mask = Gather->getMask();
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32,
Gather->getPassThru(),
DAG.getUNDEF(MVT::v2i32));
// If the index is v2i64 we can use it directly.
if (Index.getValueType() == MVT::v2i64 &&
(Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
if (!Subtarget.hasVLX()) {
// We need to widen the mask, but the instruction will only use 2
// of its elements. So we can use undef.
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getUNDEF(MVT::v2i1));
Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
}
SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
Gather->getBasePtr(), Index, Gather->getScale() };
SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(MVT::v4i32, Mask.getValueType(), MVT::Other), Ops, dl,
Gather->getMemoryVT(), Gather->getMemOperand());
SDValue Chain = Res.getValue(2);
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
Results.push_back(Chain);
return;
}
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
EVT IndexVT = Index.getValueType();
EVT NewIndexVT = EVT::getVectorVT(*DAG.getContext(),
IndexVT.getScalarType(), 4);
// Otherwise we need to custom widen everything to avoid promotion.
Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
DAG.getUNDEF(IndexVT));
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getConstant(0, dl, MVT::v2i1));
SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
Gather->getBasePtr(), Index, Gather->getScale() };
SDValue Res = DAG.getMaskedGather(DAG.getVTList(MVT::v4i32, MVT::Other),
Gather->getMemoryVT(), dl, Ops,
Gather->getMemOperand());
SDValue Chain = Res.getValue(1);
if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
Results.push_back(Chain);
return;
}
}
return;
}
case ISD::LOAD: {
// Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
// avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
// cast since type legalization will try to use an i64 load.
MVT VT = N->getSimpleValueType(0);
assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
return;
if (!ISD::isNON_EXTLoad(N))
return;
auto *Ld = cast<LoadSDNode>(N);
MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(),
Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
SDValue Chain = Res.getValue(1);
MVT WideVT = MVT::getVectorVT(LdVT, 2);
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, WideVT, Res);
MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() * 2);
Res = DAG.getBitcast(CastVT, Res);
Results.push_back(Res);
Results.push_back(Chain);
return;
}
}
}
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((X86ISD::NodeType)Opcode) {
case X86ISD::FIRST_NUMBER: break;
case X86ISD::BSF: return "X86ISD::BSF";
case X86ISD::BSR: return "X86ISD::BSR";
case X86ISD::SHLD: return "X86ISD::SHLD";
case X86ISD::SHRD: return "X86ISD::SHRD";
case X86ISD::FAND: return "X86ISD::FAND";
case X86ISD::FANDN: return "X86ISD::FANDN";
case X86ISD::FOR: return "X86ISD::FOR";
case X86ISD::FXOR: return "X86ISD::FXOR";
case X86ISD::FILD: return "X86ISD::FILD";
case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
case X86ISD::FLD: return "X86ISD::FLD";
case X86ISD::FST: return "X86ISD::FST";
case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
case X86ISD::CMPM: return "X86ISD::CMPM";
case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND";
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
case X86ISD::FSETCC: return "X86ISD::FSETCC";
case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
case X86ISD::FSETCCM_RND: return "X86ISD::FSETCCM_RND";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
case X86ISD::IRET: return "X86ISD::IRET";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
case X86ISD::Wrapper: return "X86ISD::Wrapper";
case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
case X86ISD::PINSRB: return "X86ISD::PINSRB";
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::ANDNP: return "X86ISD::ANDNP";
case X86ISD::BLENDI: return "X86ISD::BLENDI";
case X86ISD::BLENDV: return "X86ISD::BLENDV";
case X86ISD::HADD: return "X86ISD::HADD";
case X86ISD::HSUB: return "X86ISD::HSUB";
case X86ISD::FHADD: return "X86ISD::FHADD";
case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMAXS: return "X86ISD::FMAXS";
case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND";
case X86ISD::FMAXS_RND: return "X86ISD::FMAX_RND";
case X86ISD::FMIN: return "X86ISD::FMIN";
case X86ISD::FMINS: return "X86ISD::FMINS";
case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND";
case X86ISD::FMINS_RND: return "X86ISD::FMINS_RND";
case X86ISD::FMAXC: return "X86ISD::FMAXC";
case X86ISD::FMINC: return "X86ISD::FMINC";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
case X86ISD::EH_SJLJ_SETUP_DISPATCH:
return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
case X86ISD::LADD: return "X86ISD::LADD";
case X86ISD::LSUB: return "X86ISD::LSUB";
case X86ISD::LOR: return "X86ISD::LOR";
case X86ISD::LXOR: return "X86ISD::LXOR";
case X86ISD::LAND: return "X86ISD::LAND";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
case X86ISD::VFPEXT_RND: return "X86ISD::VFPEXT_RND";
case X86ISD::VFPEXTS_RND: return "X86ISD::VFPEXTS_RND";
case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
case X86ISD::VSRL: return "X86ISD::VSRL";
case X86ISD::VSRA: return "X86ISD::VSRA";
case X86ISD::VSHLI: return "X86ISD::VSHLI";
case X86ISD::VSRLI: return "X86ISD::VSRLI";
case X86ISD::VSRAI: return "X86ISD::VSRAI";
case X86ISD::VSHLV: return "X86ISD::VSHLV";
case X86ISD::VSRLV: return "X86ISD::VSRLV";
case X86ISD::VSRAV: return "X86ISD::VSRAV";
case X86ISD::VROTLI: return "X86ISD::VROTLI";
case X86ISD::VROTRI: return "X86ISD::VROTRI";
case X86ISD::VPPERM: return "X86ISD::VPPERM";
case X86ISD::CMPP: return "X86ISD::CMPP";
case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::ADC: return "X86ISD::ADC";
case X86ISD::SBB: return "X86ISD::SBB";
case X86ISD::SMUL: return "X86ISD::SMUL";
case X86ISD::UMUL: return "X86ISD::UMUL";
case X86ISD::OR: return "X86ISD::OR";
case X86ISD::XOR: return "X86ISD::XOR";
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::BZHI: return "X86ISD::BZHI";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::KORTEST: return "X86ISD::KORTEST";
case X86ISD::KTEST: return "X86ISD::KTEST";
case X86ISD::KADD: return "X86ISD::KADD";
case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
case X86ISD::PACKSS: return "X86ISD::PACKSS";
case X86ISD::PACKUS: return "X86ISD::PACKUS";
case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
case X86ISD::VALIGN: return "X86ISD::VALIGN";
case X86ISD::VSHLD: return "X86ISD::VSHLD";
case X86ISD::VSHRD: return "X86ISD::VSHRD";
case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
case X86ISD::SHUFP: return "X86ISD::SHUFP";
case X86ISD::SHUF128: return "X86ISD::SHUF128";
case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
case X86ISD::MOVSD: return "X86ISD::MOVSD";
case X86ISD::MOVSS: return "X86ISD::MOVSS";
case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
case X86ISD::VPERMV: return "X86ISD::VPERMV";
case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
case X86ISD::VRANGE: return "X86ISD::VRANGE";
case X86ISD::VRANGE_RND: return "X86ISD::VRANGE_RND";
case X86ISD::VRANGES: return "X86ISD::VRANGES";
case X86ISD::VRANGES_RND: return "X86ISD::VRANGES_RND";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
case X86ISD::PSADBW: return "X86ISD::PSADBW";
case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::MFENCE: return "X86ISD::MFENCE";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
case X86ISD::SAHF: return "X86ISD::SAHF";
case X86ISD::RDRAND: return "X86ISD::RDRAND";
case X86ISD::RDSEED: return "X86ISD::RDSEED";
case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
case X86ISD::VPSHA: return "X86ISD::VPSHA";
case X86ISD::VPSHL: return "X86ISD::VPSHL";
case X86ISD::VPCOM: return "X86ISD::VPCOM";
case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
case X86ISD::FMSUB: return "X86ISD::FMSUB";
case X86ISD::FNMADD: return "X86ISD::FNMADD";
case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
case X86ISD::VRNDSCALE_RND: return "X86ISD::VRNDSCALE_RND";
case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
case X86ISD::VRNDSCALES_RND: return "X86ISD::VRNDSCALES_RND";
case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
case X86ISD::VREDUCE_RND: return "X86ISD::VREDUCE_RND";
case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
case X86ISD::VREDUCES_RND: return "X86ISD::VREDUCES_RND";
case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
case X86ISD::VGETMANT_RND: return "X86ISD::VGETMANT_RND";
case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
case X86ISD::VGETMANTS_RND: return "X86ISD::VGETMANTS_RND";
case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
case X86ISD::XTEST: return "X86ISD::XTEST";
case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
case X86ISD::EXPAND: return "X86ISD::EXPAND";
case X86ISD::SELECTS: return "X86ISD::SELECTS";
case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
case X86ISD::RCP14: return "X86ISD::RCP14";
case X86ISD::RCP14S: return "X86ISD::RCP14S";
case X86ISD::RCP28: return "X86ISD::RCP28";
case X86ISD::RCP28S: return "X86ISD::RCP28S";
case X86ISD::EXP2: return "X86ISD::EXP2";
case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
case X86ISD::FGETEXPS_RND: return "X86ISD::FGETEXPS_RND";
case X86ISD::SCALEF: return "X86ISD::SCALEF";
case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
case X86ISD::AVG: return "X86ISD::AVG";
case X86ISD::MULHRS: return "X86ISD::MULHRS";
case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
case X86ISD::CVTTP2SI_RND: return "X86ISD::CVTTP2SI_RND";
case X86ISD::CVTTP2UI_RND: return "X86ISD::CVTTP2UI_RND";
case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
case X86ISD::CVTTS2SI_RND: return "X86ISD::CVTTS2SI_RND";
case X86ISD::CVTTS2UI_RND: return "X86ISD::CVTTS2UI_RND";
case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
case X86ISD::CVTPH2PS_RND: return "X86ISD::CVTPH2PS_RND";
case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
case X86ISD::LWPINS: return "X86ISD::LWPINS";
case X86ISD::MGATHER: return "X86ISD::MGATHER";
case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
}
return nullptr;
}
/// Return true if the addressing mode represented by AM is legal for this
/// target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
return false;
if (AM.BaseGV) {
unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
// If a reference to this global requires an extra load, we can't fold it.
if (isGlobalStubReference(GVFlags))
return false;
// If BaseGV requires a register for the PIC base, we cannot also have a
// BaseReg specified.
if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
return false;
// If lower 4G is not available, then we must use rip-relative addressing.
if ((M != CodeModel::Small || isPositionIndependent()) &&
Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
return false;
}
switch (AM.Scale) {
case 0:
case 1:
case 2:
case 4:
case 8:
// These scales always work.
break;
case 3:
case 5:
case 9:
// These scales are formed with basereg+scalereg. Only accept if there is
// no basereg yet.
if (AM.HasBaseReg)
return false;
break;
default: // Other stuff never works.
return false;
}
return true;
}
bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
unsigned Bits = Ty->getScalarSizeInBits();
// 8-bit shifts are always expensive, but versions with a scalar amount aren't
// particularly cheaper than those without.
if (Bits == 8)
return false;
// XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
(Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
return false;
// AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
// shifts just as cheap as scalar ones.
if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
return false;
// AVX512BW has shifts such as vpsllvw.
if (Subtarget.hasBWI() && Bits == 16)
return false;
// Otherwise, it's significantly cheaper to shift by a scalar amount than by a
// fully general vector.
return true;
}
bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
return NumBits1 > NumBits2;
}
bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
if (!isTypeLegal(EVT::getEVT(Ty1)))
return false;
assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
// Assuming the caller doesn't have a zeroext or signext return parameter,
// truncation all the way down to i1 is valid.
return true;
}
bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<32>(Imm);
}
bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
// Can also use sub to handle negated immediates.
return isInt<32>(Imm);
}
bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
return isInt<32>(Imm);
}
bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
if (!VT1.isInteger() || !VT2.isInteger())
return false;
unsigned NumBits1 = VT1.getSizeInBits();
unsigned NumBits2 = VT2.getSizeInBits();
return NumBits1 > NumBits2;
}
bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
EVT VT1 = Val.getValueType();
if (isZExtFree(VT1, VT2))
return true;
if (Val.getOpcode() != ISD::LOAD)
return false;
if (!VT1.isSimple() || !VT1.isInteger() ||
!VT2.isSimple() || !VT2.isInteger())
return false;
switch (VT1.getSimpleVT().SimpleTy) {
default: break;
case MVT::i8:
case MVT::i16:
case MVT::i32:
// X86 has 8, 16, and 32-bit zero-extending loads.
return true;
}
return false;
}
bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
EVT SrcVT = ExtVal.getOperand(0).getValueType();
// There is no extending load for vXi1.
if (SrcVT.getScalarType() == MVT::i1)
return false;
return true;
}
bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
if (!Subtarget.hasAnyFMA())
return false;
VT = VT.getScalarType();
if (!VT.isSimple())
return false;
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
case MVT::f64:
return true;
default:
break;
}
return false;
}
bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
// i16 instructions are longer (0x66 prefix) and potentially slower.
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}
/// Targets can use this to indicate that they only support *some*
/// VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
if (!VT.isSimple())
return false;
// Not for i1 vectors
if (VT.getSimpleVT().getScalarType() == MVT::i1)
return false;
// Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSimpleVT().getSizeInBits() == 64)
return false;
// We only care that the types being shuffled are legal. The lowering can
// handle any possible shuffle mask that results.
return isTypeLegal(VT.getSimpleVT());
}
bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
EVT VT) const {
// Don't convert an 'and' into a shuffle that we don't directly support.
// vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
if (!Subtarget.hasAVX2())
if (VT == MVT::v32i8 || VT == MVT::v16i16)
return false;
// Just delegate to the generic legality, clear masks aren't special.
return isShuffleMaskLegal(Mask, VT);
}
bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
// If the subtarget is using retpolines, we need to not generate jump tables.
if (Subtarget.useRetpolineIndirectBranches())
return false;
// Otherwise, fallback on the generic logic.
return TargetLowering::areJTsAllowed(Fn);
}
//===----------------------------------------------------------------------===//
// X86 Scheduler Hooks
//===----------------------------------------------------------------------===//
/// Utility function to emit xbegin specifying the start of an RTM region.
static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
const TargetInstrInfo *TII) {
DebugLoc DL = MI.getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// For the v = xbegin(), we generate
//
// thisMBB:
// xbegin sinkMBB
//
// mainMBB:
// s0 = -1
//
// fallBB:
// eax = # XABORT_DEF
// s1 = eax
//
// sinkMBB:
// v = phi(s0/mainBB, s1/fallBB)
MachineBasicBlock *thisMBB = MBB;
MachineFunction *MF = MBB->getParent();
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, mainMBB);
MF->insert(I, fallMBB);
MF->insert(I, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
unsigned mainDstReg = MRI.createVirtualRegister(RC);
unsigned fallDstReg = MRI.createVirtualRegister(RC);
// thisMBB:
// xbegin fallMBB
// # fallthrough to mainMBB
// # abortion to fallMBB
BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(fallMBB);
// mainMBB:
// mainDstReg := -1
BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
mainMBB->addSuccessor(sinkMBB);
// fallMBB:
// ; pseudo instruction to model hardware's definition from XABORT
// EAX := XABORT_DEF
// fallDstReg := EAX
BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
.addReg(X86::EAX);
fallMBB->addSuccessor(sinkMBB);
// sinkMBB:
// DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(fallDstReg).addMBB(fallMBB);
MI.eraseFromParent();
return sinkMBB;
}
static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert input VAL into EAX
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
.addReg(MI.getOperand(0).getReg());
// insert zero to ECX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
// insert zero to EDX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
// insert WRPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert zero to ECX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
// insert RDPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
.addReg(X86::EAX);
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget,
unsigned Opc) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
unsigned ValOps = X86::AddrNumOperands;
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
.addReg(MI.getOperand(ValOps).getReg());
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
.addReg(MI.getOperand(ValOps + 1).getReg());
// The instruction doesn't actually take any operands though.
BuildMI(*BB, MI, dl, TII->get(Opc));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitClzero(MachineInstr *MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX
unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI->getOperand(i));
// The instruction doesn't actually take any operands though.
BuildMI(*BB, MI, dl, TII->get(X86::CLZEROr));
MI->eraseFromParent(); // The pseudo is gone now.
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64.
// Operands to this pseudo-instruction:
// 0 ) Output : destination address (reg)
// 1-5) Input : va_list address (addr, i64mem)
// 6 ) ArgSize : Size (in bytes) of vararg type
// 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
// 8 ) Align : Alignment of type
// 9 ) EFLAGS (implicit-def)
assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
static_assert(X86::AddrNumOperands == 5,
"VAARG_64 assumes 5 address operands");
unsigned DestReg = MI.getOperand(0).getReg();
MachineOperand &Base = MI.getOperand(1);
MachineOperand &Scale = MI.getOperand(2);
MachineOperand &Index = MI.getOperand(3);
MachineOperand &Disp = MI.getOperand(4);
MachineOperand &Segment = MI.getOperand(5);
unsigned ArgSize = MI.getOperand(6).getImm();
unsigned ArgMode = MI.getOperand(7).getImm();
unsigned Align = MI.getOperand(8).getImm();
// Memory Reference
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
MI.memoperands_end());
// Machine Information
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
DebugLoc DL = MI.getDebugLoc();
// struct va_list {
// i32 gp_offset
// i32 fp_offset
// i64 overflow_area (address)
// i64 reg_save_area (address)
// }
// sizeof(va_list) = 24
// alignment(va_list) = 8
unsigned TotalNumIntRegs = 6;
unsigned TotalNumXMMRegs = 8;
bool UseGPOffset = (ArgMode == 1);
bool UseFPOffset = (ArgMode == 2);
unsigned MaxOffset = TotalNumIntRegs * 8 +
(UseFPOffset ? TotalNumXMMRegs * 16 : 0);
/* Align ArgSize to a multiple of 8 */
unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
bool NeedsAlign = (Align > 8);
MachineBasicBlock *thisMBB = MBB;
MachineBasicBlock *overflowMBB;
MachineBasicBlock *offsetMBB;
MachineBasicBlock *endMBB;
unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
unsigned OffsetReg = 0;
if (!UseGPOffset && !UseFPOffset) {
// If we only pull from the overflow region, we don't create a branch.
// We don't need to alter control flow.
OffsetDestReg = 0; // unused
OverflowDestReg = DestReg;
offsetMBB = nullptr;
overflowMBB = thisMBB;
endMBB = thisMBB;
} else {
// First emit code to check if gp_offset (or fp_offset) is below the bound.
// If so, pull the argument from reg_save_area. (branch to offsetMBB)
// If not, pull from overflow_area. (branch to overflowMBB)
//
// thisMBB
// | .
// | .
// offsetMBB overflowMBB
// | .
// | .
// endMBB
// Registers for the PHI in endMBB
OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
MachineFunction *MF = MBB->getParent();
overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator MBBIter = ++MBB->getIterator();
// Insert the new basic blocks
MF->insert(MBBIter, offsetMBB);
MF->insert(MBBIter, overflowMBB);
MF->insert(MBBIter, endMBB);
// Transfer the remainder of MBB and its successor edges to endMBB.
endMBB->splice(endMBB->begin(), thisMBB,
std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Make offsetMBB and overflowMBB successors of thisMBB
thisMBB->addSuccessor(offsetMBB);
thisMBB->addSuccessor(overflowMBB);
// endMBB is a successor of both offsetMBB and overflowMBB
offsetMBB->addSuccessor(endMBB);
overflowMBB->addSuccessor(endMBB);
// Load the offset value into a register
OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.setMemRefs(MMOs);
// Check if there is enough room left to pull this argument.
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
.addReg(OffsetReg)
.addImm(MaxOffset + 8 - ArgSizeA8);
// Branch to "overflowMBB" if offset >= max
// Fall through to "offsetMBB" otherwise
BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
.addMBB(overflowMBB);
}
// In offsetMBB, emit code to use the reg_save_area.
if (offsetMBB) {
assert(OffsetReg != 0);
// Read the reg_save_area address.
unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 16)
.add(Segment)
.setMemRefs(MMOs);
// Zero-extend the offset
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
.addImm(0)
.addReg(OffsetReg)
.addImm(X86::sub_32bit);
// Add the offset to the reg_save_area to get the final address.
BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
.addReg(OffsetReg64)
.addReg(RegSaveReg);
// Compute the offset for the next argument
unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
.addReg(OffsetReg)
.addImm(UseFPOffset ? 16 : 8);
// Store it back into the va_list.
BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.addReg(NextOffsetReg)
.setMemRefs(MMOs);
// Jump to endMBB
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
.addMBB(endMBB);
}
//
// Emit code to use overflow area
//
// Load the overflow_area address into a register.
unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
.setMemRefs(MMOs);
// If we need to align it, do so. Otherwise, just copy the address
// to OverflowDestReg.
if (NeedsAlign) {
// Align the overflow address
assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
// aligned_addr = (addr + (align-1)) & ~(align-1)
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
.addReg(OverflowAddrReg)
.addImm(Align-1);
BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
.addReg(TmpReg)
.addImm(~(uint64_t)(Align-1));
} else {
BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
.addReg(OverflowAddrReg);
}
// Compute the next overflow address after this argument.
// (the overflow address should be kept 8-byte aligned)
unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
.addReg(OverflowDestReg)
.addImm(ArgSizeA8);
// Store the new overflow address.
BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
.addReg(NextAddrReg)
.setMemRefs(MMOs);
// If we branched, emit the PHI to the front of endMBB.
if (offsetMBB) {
BuildMI(*endMBB, endMBB->begin(), DL,
TII->get(X86::PHI), DestReg)
.addReg(OffsetDestReg).addMBB(offsetMBB)
.addReg(OverflowDestReg).addMBB(overflowMBB);
}
// Erase the pseudo instruction
MI.eraseFromParent();
return endMBB;
}
MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *MBB) const {
// Emit code to save XMM registers to the stack. The ABI says that the
// number of registers to save is given in %al, so it's theoretically
// possible to do an indirect jump trick to avoid saving all of them,
// however this code takes a simpler approach and just executes all
// of the stores if %al is non-zero. It's less code, and it's probably
// easier on the hardware branch predictor, and stores aren't all that
// expensive anyway.
// Create the new basic blocks. One block contains all the XMM stores,
// and one block is the final destination regardless of whether any
// stores were performed.
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
MachineFunction *F = MBB->getParent();
MachineFunction::iterator MBBIter = ++MBB->getIterator();
MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(MBBIter, XMMSaveMBB);
F->insert(MBBIter, EndMBB);
// Transfer the remainder of MBB and its successor edges to EndMBB.
EndMBB->splice(EndMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
// The original block will now fall through to the XMM save block.
MBB->addSuccessor(XMMSaveMBB);
// The XMMSaveMBB will fall through to the end block.
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
unsigned CountReg = MI.getOperand(0).getReg();
int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
MBB->addSuccessor(EndMBB);
}
// Make sure the last operand is EFLAGS, which gets clobbered by the branch
// that was just emitted, but clearly shouldn't be "saved".
assert((MI.getNumOperands() <= 3 ||
!MI.getOperand(MI.getNumOperands() - 1).isReg() ||
MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
"Expected last argument to be EFLAGS");
unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
// In the XMM save block, save all the XMM argument registers.
for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
MachineMemOperand::MOStore,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
.addFrameIndex(RegSaveFrameIndex)
.addImm(/*Scale=*/1)
.addReg(/*IndexReg=*/0)
.addImm(/*Disp=*/Offset)
.addReg(/*Segment=*/0)
.addReg(MI.getOperand(i).getReg())
.addMemOperand(MMO);
}
MI.eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
// The EFLAGS operand of SelectItr might be missing a kill marker
// because there were multiple uses of EFLAGS, and ISel didn't know
// which to mark. Figure out whether SelectItr should have had a
// kill marker, and set it if it should. Returns the correct kill
// marker value.
static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock* BB,
const TargetRegisterInfo* TRI) {
// Scan forward through BB for a use/def of EFLAGS.
MachineBasicBlock::iterator miI(std::next(SelectItr));
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
if (mi.readsRegister(X86::EFLAGS))
return false;
if (mi.definesRegister(X86::EFLAGS))
break; // Should have kill-flag - update below.
}
// If we hit the end of the block, check whether EFLAGS is live into a
// successor.
if (miI == BB->end()) {
for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
sEnd = BB->succ_end();
sItr != sEnd; ++sItr) {
MachineBasicBlock* succ = *sItr;
if (succ->isLiveIn(X86::EFLAGS))
return false;
}
}
// We found a def, or hit the end of the basic block and EFLAGS wasn't live
// out. SelectMI should have a kill flag on EFLAGS.
SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
return true;
}
// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
// together with other CMOV pseudo-opcodes into a single basic-block with
// conditional jump around it.
static bool isCMOVPseudo(MachineInstr &MI) {
switch (MI.getOpcode()) {
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_GR8:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
case X86::CMOV_RFP32:
case X86::CMOV_RFP64:
case X86::CMOV_RFP80:
case X86::CMOV_VR128:
case X86::CMOV_VR128X:
case X86::CMOV_VR256:
case X86::CMOV_VR256X:
case X86::CMOV_VR512:
case X86::CMOV_VK2:
case X86::CMOV_VK4:
case X86::CMOV_VK8:
case X86::CMOV_VK16:
case X86::CMOV_VK32:
case X86::CMOV_VK64:
return true;
default:
return false;
}
}
// Helper function, which inserts PHI functions into SinkMBB:
// %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
// the last PHI function inserted.
static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
MachineBasicBlock *SinkMBB) {
MachineFunction *MF = TrueMBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
DebugLoc DL = MIItBegin->getDebugLoc();
X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
// As we are creating the PHIs, we have to be careful if there is more than
// one. Later CMOVs may reference the results of earlier CMOVs, but later
// PHIs have to reference the individual true/false inputs from earlier PHIs.
// That also means that PHI construction must work forward from earlier to
// later, and that the code must maintain a mapping from earlier PHI's
// destination registers, and the registers that went into the PHI.
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
MachineInstrBuilder MIB;
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
unsigned DestReg = MIIt->getOperand(0).getReg();
unsigned Op1Reg = MIIt->getOperand(1).getReg();
unsigned Op2Reg = MIIt->getOperand(2).getReg();
// If this CMOV we are generating is the opposite condition from
// the jump we generated, then we have to swap the operands for the
// PHI that is going to be generated.
if (MIIt->getOperand(3).getImm() == OppCC)
std::swap(Op1Reg, Op2Reg);
if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
Op1Reg = RegRewriteTable[Op1Reg].first;
if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
Op2Reg = RegRewriteTable[Op2Reg].second;
MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
.addReg(Op1Reg)
.addMBB(FalseMBB)
.addReg(Op2Reg)
.addMBB(TrueMBB);
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
}
return MIB;
}
// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
MachineBasicBlock *
X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
MachineInstr &SecondCascadedCMOV,
MachineBasicBlock *ThisMBB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = FirstCMOV.getDebugLoc();
// We lower cascaded CMOVs such as
//
// (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
//
// to two successive branches.
//
// Without this, we would add a PHI between the two jumps, which ends up
// creating a few copies all around. For instance, for
//
// (sitofp (zext (fcmp une)))
//
// we would generate:
//
// ucomiss %xmm1, %xmm0
// movss <1.0f>, %xmm0
// movaps %xmm0, %xmm1
// jne .LBB5_2
// xorps %xmm1, %xmm1
// .LBB5_2:
// jp .LBB5_4
// movaps %xmm1, %xmm0
// .LBB5_4:
// retq
//
// because this custom-inserter would have generated:
//
// A
// | \
// | B
// | /
// C
// | \
// | D
// | /
// E
//
// A: X = ...; Y = ...
// B: empty
// C: Z = PHI [X, A], [Y, B]
// D: empty
// E: PHI [X, C], [Z, D]
//
// If we lower both CMOVs in a single step, we can instead generate:
//
// A
// | \
// | C
// | /|
// |/ |
// | |
// | D
// | /
// E
//
// A: X = ...; Y = ...
// D: empty
// E: PHI [X, A], [X, C], [Y, D]
//
// Which, in our sitofp/fcmp example, gives us something like:
//
// ucomiss %xmm1, %xmm0
// movss <1.0f>, %xmm0
// jne .LBB5_4
// jp .LBB5_4
// xorps %xmm0, %xmm0
// .LBB5_4:
// retq
//
// We lower cascaded CMOV into two successive branches to the same block.
// EFLAGS is used by both, so mark it as live in the second.
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction *F = ThisMBB->getParent();
MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++ThisMBB->getIterator();
F->insert(It, FirstInsertedMBB);
F->insert(It, SecondInsertedMBB);
F->insert(It, SinkMBB);
// For a cascaded CMOV, we lower it to two successive branches to
// the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
// the FirstInsertedMBB.
FirstInsertedMBB->addLiveIn(X86::EFLAGS);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
SecondInsertedMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), ThisMBB,
std::next(MachineBasicBlock::iterator(FirstCMOV)),
ThisMBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
// Fallthrough block for ThisMBB.
ThisMBB->addSuccessor(FirstInsertedMBB);
// The true block target of the first branch is always SinkMBB.
ThisMBB->addSuccessor(SinkMBB);
// Fallthrough block for FirstInsertedMBB.
FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
// The true block for the branch of FirstInsertedMBB.
FirstInsertedMBB->addSuccessor(SinkMBB);
// This is fallthrough.
SecondInsertedMBB->addSuccessor(SinkMBB);
// Create the conditional branch instructions.
X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
unsigned Opc = X86::GetCondBranchFromCond(FirstCC);
BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB);
X86::CondCode SecondCC =
X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
unsigned Opc2 = X86::GetCondBranchFromCond(SecondCC);
BuildMI(FirstInsertedMBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
// SinkMBB:
// %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
unsigned DestReg = FirstCMOV.getOperand(0).getReg();
unsigned Op1Reg = FirstCMOV.getOperand(1).getReg();
unsigned Op2Reg = FirstCMOV.getOperand(2).getReg();
MachineInstrBuilder MIB =
BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
.addReg(Op1Reg)
.addMBB(SecondInsertedMBB)
.addReg(Op2Reg)
.addMBB(ThisMBB);
// The second SecondInsertedMBB provides the same incoming value as the
// FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
// Copy the PHI result to the register defined by the second CMOV.
BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
TII->get(TargetOpcode::COPY),
SecondCascadedCMOV.getOperand(0).getReg())
.addReg(FirstCMOV.getOperand(0).getReg());
// Now remove the CMOVs.
FirstCMOV.eraseFromParent();
SecondCascadedCMOV.eraseFromParent();
return SinkMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
MachineBasicBlock *ThisMBB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
// true/false values to select between and a branch opcode to use.
// ThisMBB:
// ...
// TrueVal = ...
// cmpTY ccX, r1, r2
// bCC copy1MBB
// fallthrough --> FalseMBB
// This code lowers all pseudo-CMOV instructions. Generally it lowers these
// as described above, by inserting a BB, and then making a PHI at the join
// point to select the true and false operands of the CMOV in the PHI.
//
// The code also handles two different cases of multiple CMOV opcodes
// in a row.
//
// Case 1:
// In this case, there are multiple CMOVs in a row, all which are based on
// the same condition setting (or the exact opposite condition setting).
// In this case we can lower all the CMOVs using a single inserted BB, and
// then make a number of PHIs at the join point to model the CMOVs. The only
// trickiness here, is that in a case like:
//
// t2 = CMOV cond1 t1, f1
// t3 = CMOV cond1 t2, f2
//
// when rewriting this into PHIs, we have to perform some renaming on the
// temps since you cannot have a PHI operand refer to a PHI result earlier
// in the same block. The "simple" but wrong lowering would be:
//
// t2 = PHI t1(BB1), f1(BB2)
// t3 = PHI t2(BB1), f2(BB2)
//
// but clearly t2 is not defined in BB1, so that is incorrect. The proper
// renaming is to note that on the path through BB1, t2 is really just a
// copy of t1, and do that renaming, properly generating:
//
// t2 = PHI t1(BB1), f1(BB2)
// t3 = PHI t1(BB1), f2(BB2)
//
// Case 2:
// CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
// function - EmitLoweredCascadedSelect.
X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineInstr *LastCMOV = &MI;
MachineBasicBlock::iterator NextMIIt =
std::next(MachineBasicBlock::iterator(MI));
// Check for case 1, where there are multiple CMOVs with the same condition
// first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
// number of jumps the most.
if (isCMOVPseudo(MI)) {
// See if we have a string of CMOVS with the same condition.
while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
(NextMIIt->getOperand(3).getImm() == CC ||
NextMIIt->getOperand(3).getImm() == OppCC)) {
LastCMOV = &*NextMIIt;
++NextMIIt;
}
}
// This checks for case 2, but only do this if we didn't already find
// case 1, as indicated by LastCMOV == MI.
if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
NextMIIt->getOpcode() == MI.getOpcode() &&
NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
NextMIIt->getOperand(1).isKill()) {
return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
}
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction *F = ThisMBB->getParent();
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++ThisMBB->getIterator();
F->insert(It, FalseMBB);
F->insert(It, SinkMBB);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (!LastCMOV->killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
FalseMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), ThisMBB,
std::next(MachineBasicBlock::iterator(LastCMOV)),
ThisMBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
// Fallthrough block for ThisMBB.
ThisMBB->addSuccessor(FalseMBB);
// The true block target of the first (or only) branch is always a SinkMBB.
ThisMBB->addSuccessor(SinkMBB);
// Fallthrough block for FalseMBB.
FalseMBB->addSuccessor(SinkMBB);
// Create the conditional branch instruction.
unsigned Opc = X86::GetCondBranchFromCond(CC);
BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB);
// SinkMBB:
// %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
// ...
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator MIItEnd =
std::next(MachineBasicBlock::iterator(LastCMOV));
createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
// Now remove the CMOV(s).
ThisMBB->erase(MIItBegin, MIItEnd);
return SinkMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Combine the following atomic floating-point modification pattern:
// a.store(reg OP a.load(acquire), release)
// Transform them into:
// OPss (%gpr), %xmm
// movss %xmm, (%gpr)
// Or sd equivalent for 64-bit operations.
unsigned MOp, FOp;
switch (MI.getOpcode()) {
default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
case X86::RELEASE_FADD32mr:
FOp = X86::ADDSSrm;
MOp = X86::MOVSSmr;
break;
case X86::RELEASE_FADD64mr:
FOp = X86::ADDSDrm;
MOp = X86::MOVSDmr;
break;
}
const X86InstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
unsigned ValOpIdx = X86::AddrNumOperands;
unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(FOp),
MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
.addReg(VSrc);
for (int i = 0; i < X86::AddrNumOperands; ++i) {
MachineOperand &Operand = MI.getOperand(i);
// Clear any kill flags on register operands as we'll create a second
// instruction using the same address operands.
if (Operand.isReg())
Operand.setIsKill(false);
MIB.add(Operand);
}
MachineInstr *FOpMI = MIB;
MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
assert(MF->shouldSplitStack());
const bool Is64Bit = Subtarget.is64Bit();
const bool IsLP64 = Subtarget.isTarget64BitLP64();
const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
// BB:
// ... [Till the alloca]
// If stacklet is not large enough, jump to mallocMBB
//
// bumpMBB:
// Allocate by subtracting from RSP
// Jump to continueMBB
//
// mallocMBB:
// Allocate by call to runtime
//
// continueMBB:
// ...
// [rest of original BB]
//
MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetRegisterClass *AddrRegClass =
getRegClassFor(getPointerTy(MF->getDataLayout()));
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
sizeVReg = MI.getOperand(1).getReg(),
physSPReg =
IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
MachineFunction::iterator MBBIter = ++BB->getIterator();
MF->insert(MBBIter, bumpMBB);
MF->insert(MBBIter, mallocMBB);
MF->insert(MBBIter, continueMBB);
continueMBB->splice(continueMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
continueMBB->transferSuccessorsAndUpdatePHIs(BB);
// Add code to the main basic block to check if the stack limit has been hit,
// and if so, jump to mallocMBB otherwise to bumpMBB.
BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
.addReg(tmpSPVReg).addReg(sizeVReg);
BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
.addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
.addReg(SPLimitVReg);
BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
// bumpMBB simply decreases the stack pointer, since we know the current
// stacklet has enough space.
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
.addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
.addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask =
Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::RDI, RegState::Implicit)
.addReg(X86::RAX, RegState::ImplicitDefine);
} else if (Is64Bit) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::EDI, RegState::Implicit)
.addReg(X86::EAX, RegState::ImplicitDefine);
} else {
BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
.addImm(12);
BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::EAX, RegState::ImplicitDefine);
}
if (!Is64Bit)
BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
.addImm(16);
BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
.addReg(IsLP64 ? X86::RAX : X86::EAX);
BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Set up the CFG correctly.
BB->addSuccessor(bumpMBB);
BB->addSuccessor(mallocMBB);
mallocMBB->addSuccessor(continueMBB);
bumpMBB->addSuccessor(continueMBB);
// Take care of the PHI nodes.
BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
MI.getOperand(0).getReg())
.addReg(mallocPtrVReg)
.addMBB(mallocMBB)
.addReg(bumpSPPtrVReg)
.addMBB(bumpMBB);
// Delete the original pseudo instruction.
MI.eraseFromParent();
// And we're done.
return continueMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
if (!Subtarget.is32Bit())
return BB;
// C++ EH creates a new target block to hold the restore code, and wires up
// the new block to the return destination with a normal JMP_4.
MachineBasicBlock *RestoreMBB =
MF->CreateMachineBasicBlock(BB->getBasicBlock());
assert(BB->succ_size() == 1);
MF->insert(std::next(BB->getIterator()), RestoreMBB);
RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(RestoreMBB);
MI.getOperand(0).setMBB(RestoreMBB);
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
}
MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const {
// So, here we replace TLSADDR with the sequence:
// adjust_stackdown -> TLSADDR -> adjust_stackup.
// We need this because TLSADDR is lowered into calls
// inside MC, therefore without the two markers shrink-wrapping
// may push the prologue/epilogue pass them.
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
MachineFunction &MF = *BB->getParent();
// Emit CALLSEQ_START right before the instruction.
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
MachineInstrBuilder CallseqStart =
BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
// Emit CALLSEQ_END right after the instruction.
// We don't call erase from parent because we want to keep the
// original instruction around.
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
MachineInstrBuilder CallseqEnd =
BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const {
// This is pretty easy. We're taking the value that we received from
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
MachineFunction *F = BB->getParent();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
assert(MI.getOperand(3).isGlobal() && "This should be a global");
// Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
const uint32_t *RegMask =
Subtarget.is64Bit() ?
Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
if (Subtarget.is64Bit()) {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else if (!isPositionIndependent()) {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
.addReg(0)
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
.addReg(TII->getGlobalBaseReg(F))
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
}
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
switch (RPOpc) {
case X86::RETPOLINE_CALL32:
return X86::CALLpcrel32;
case X86::RETPOLINE_CALL64:
return X86::CALL64pcrel32;
case X86::RETPOLINE_TCRETURN32:
return X86::TCRETURNdi;
case X86::RETPOLINE_TCRETURN64:
return X86::TCRETURNdi64;
}
llvm_unreachable("not retpoline opcode");
}
static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
unsigned Reg) {
if (Subtarget.useRetpolineExternalThunk()) {
// When using an external thunk for retpolines, we pick names that match the
// names GCC happens to use as well. This helps simplify the implementation
// of the thunks for kernels where they have no easy ability to create
// aliases and are doing non-trivial configuration of the thunk's body. For
// example, the Linux kernel will do boot-time hot patching of the thunk
// bodies and cannot easily export aliases of these to loaded modules.
//
// Note that at any point in the future, we may need to change the semantics
// of how we implement retpolines and at that time will likely change the
// name of the called thunk. Essentially, there is no hard guarantee that
// LLVM will generate calls to specific thunks, we merely make a best-effort
// attempt to help out kernels and other systems where duplicating the
// thunks is costly.
switch (Reg) {
case X86::EAX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_eax";
case X86::ECX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_ecx";
case X86::EDX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edi";
case X86::R11:
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__x86_indirect_thunk_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
// When targeting an internal COMDAT thunk use an LLVM-specific name.
switch (Reg) {
case X86::EAX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_eax";
case X86::ECX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_ecx";
case X86::EDX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edi";
case X86::R11:
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__llvm_retpoline_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Copy the virtual register into the R11 physical register and
// call the retpoline thunk.
DebugLoc DL = MI.getDebugLoc();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
unsigned CalleeVReg = MI.getOperand(0).getReg();
unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
// Find an available scratch register to hold the callee. On 64-bit, we can
// just use R11, but we scan for uses anyway to ensure we don't generate
// incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
// already a register use operand to the call to hold the callee. If none
// are available, use EDI instead. EDI is chosen because EBX is the PIC base
// register and ESI is the base pointer to realigned stack frames with VLAs.
SmallVector<unsigned, 3> AvailableRegs;
if (Subtarget.is64Bit())
AvailableRegs.push_back(X86::R11);
else
AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
// Zero out any registers that are already used.
for (const auto &MO : MI.operands()) {
if (MO.isReg() && MO.isUse())
for (unsigned &Reg : AvailableRegs)
if (Reg == MO.getReg())
Reg = 0;
}
// Choose the first remaining non-zero available register.
unsigned AvailableReg = 0;
for (unsigned MaybeReg : AvailableRegs) {
if (MaybeReg) {
AvailableReg = MaybeReg;
break;
}
}
if (!AvailableReg)
report_fatal_error("calling convention incompatible with retpoline, no "
"available registers");
const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
.addReg(CalleeVReg);
MI.getOperand(0).ChangeToES(Symbol);
MI.setDesc(TII->get(Opc));
MachineInstrBuilder(*BB->getParent(), &MI)
.addReg(AvailableReg, RegState::Implicit | RegState::Kill);
return BB;
}
/// SetJmp implies future control flow change upon calling the corresponding
/// LongJmp.
/// Instead of using the 'return' instruction, the long jump fixes the stack and
/// performs an indirect branch. To do so it uses the registers that were stored
/// in the jump buffer (when calling SetJmp).
/// In case the shadow stack is enabled we need to fix it as well, because some
/// return addresses will be skipped.
/// The function will save the SSP for future fixing in the function
/// emitLongJmpShadowStackFix.
/// \sa emitLongJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineInstrBuilder MIB;
// Memory Reference.
SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
MI.memoperands_end());
// Initialize a register with zero.
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
unsigned ZReg = MRI.createVirtualRegister(PtrRC);
unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
.addDef(ZReg)
.addReg(ZReg, RegState::Undef)
.addReg(ZReg, RegState::Undef);
// Read the current SSP Register value to the zeroed register.
unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
// Write the SSP register value to offset 3 in input memory buffer.
unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
const int64_t SSPOffset = 3 * PVT.getStoreSize();
const unsigned MemOpndSlot = 1;
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
else
MIB.add(MI.getOperand(MemOpndSlot + i));
}
MIB.addReg(SSPCopyReg);
MIB.setMemRefs(MMOs);
}
MachineBasicBlock *
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
MI.memoperands_end());
unsigned DstReg;
unsigned MemOpndSlot = 0;
unsigned CurOp = 0;
DstReg = MI.getOperand(CurOp++).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
(void)TRI;
unsigned mainDstReg = MRI.createVirtualRegister(RC);
unsigned restoreDstReg = MRI.createVirtualRegister(RC);
MemOpndSlot = CurOp;
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
// For v = setjmp(buf), we generate
//
// thisMBB:
// buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
// SjLjSetup restoreMBB
//
// mainMBB:
// v_main = 0
//
// sinkMBB:
// v = phi(main, restore)
//
// restoreMBB:
// if base pointer being used, load it from frame
// v_restore = 1
MachineBasicBlock *thisMBB = MBB;
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, mainMBB);
MF->insert(I, sinkMBB);
MF->push_back(restoreMBB);
restoreMBB->setHasAddressTaken();
MachineInstrBuilder MIB;
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// thisMBB:
unsigned PtrStoreOpc = 0;
unsigned LabelReg = 0;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
!isPositionIndependent();
// Prepare IP either in reg or imm.
if (!UseImmLabel) {
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
LabelReg = MRI.createVirtualRegister(PtrRC);
if (Subtarget.is64Bit()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addMBB(restoreMBB)
.addReg(0);
} else {
const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
.addReg(XII->getGlobalBaseReg(MF))
.addImm(0)
.addReg(0)
.addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
} else
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
// Store IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
else
MIB.add(MI.getOperand(MemOpndSlot + i));
}
if (!UseImmLabel)
MIB.addReg(LabelReg);
else
MIB.addMBB(restoreMBB);
MIB.setMemRefs(MMOs);
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
emitSetJmpShadowStackFix(MI, thisMBB);
}
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
// mainMBB:
// EAX = 0
BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
mainMBB->addSuccessor(sinkMBB);
// sinkMBB:
BuildMI(*sinkMBB, sinkMBB->begin(), DL,
TII->get(X86::PHI), DstReg)
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(restoreDstReg).addMBB(restoreMBB);
// restoreMBB:
if (RegInfo->hasBasePointer(*MF)) {
const bool Uses64BitFramePtr =
Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
X86FI->setRestoreBasePointer(MF);
unsigned FramePtr = RegInfo->getFrameRegister(*MF);
unsigned BasePtr = RegInfo->getBaseRegister();
unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
FramePtr, true, X86FI->getRestoreBasePointerOffset())
.setMIFlag(MachineInstr::FrameSetup);
}
BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
restoreMBB->addSuccessor(sinkMBB);
MI.eraseFromParent();
return sinkMBB;
}
/// Fix the shadow stack using the previously saved SSP pointer.
/// \sa emitSetJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
/// \return The sink MBB that will perform the future indirect branch.
MachineBasicBlock *
X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
// checkSspMBB:
// xor vreg1, vreg1
// rdssp vreg1
// test vreg1, vreg1
// je sinkMBB # Jump if Shadow Stack is not supported
// fallMBB:
// mov buf+24/12(%rip), vreg2
// sub vreg1, vreg2
// jbe sinkMBB # No need to fix the Shadow Stack
// fixShadowMBB:
// shr 3/2, vreg2
// incssp vreg2 # fix the SSP according to the lower 8 bits
// shr 8, vreg2
// je sinkMBB
// fixShadowLoopPrepareMBB:
// shl vreg2
// mov 128, vreg3
// fixShadowLoopMBB:
// incssp vreg3
// dec vreg2
// jne fixShadowLoopMBB # Iterate until you finish fixing
// # the Shadow Stack
// sinkMBB:
MachineFunction::iterator I = ++MBB->getIterator();
const BasicBlock *BB = MBB->getBasicBlock();
MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, checkSspMBB);
MF->insert(I, fallMBB);
MF->insert(I, fixShadowMBB);
MF->insert(I, fixShadowLoopPrepareMBB);
MF->insert(I, fixShadowLoopMBB);
MF->insert(I, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
MBB->addSuccessor(checkSspMBB);
// Initialize a register with zero.
unsigned ZReg = MRI.createVirtualRegister(PtrRC);
unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
.addDef(ZReg)
.addReg(ZReg, RegState::Undef)
.addReg(ZReg, RegState::Undef);
// Read the current SSP Register value to the zeroed register.
unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
// Check whether the result of the SSP register is zero and jump directly
// to the sink.
unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
.addReg(SSPCopyReg)
.addReg(SSPCopyReg);
BuildMI(checkSspMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB);
checkSspMBB->addSuccessor(sinkMBB);
checkSspMBB->addSuccessor(fallMBB);
// Reload the previously saved SSP register value.
unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC);
unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
const int64_t SPPOffset = 3 * PVT.getStoreSize();
MachineInstrBuilder MIB =
BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (i == X86::AddrDisp)
MIB.addDisp(MO, SPPOffset);
else if (MO.isReg()) // Don't add the whole operand, we don't want to
// preserve kill flags.
MIB.addReg(MO.getReg());
else
MIB.add(MO);
}
MIB.setMemRefs(MMOs);
// Subtract the current SSP from the previous SSP.
unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
.addReg(PrevSSPReg)
.addReg(SSPCopyReg);
// Jump to sink in case PrevSSPReg <= SSPCopyReg.
BuildMI(fallMBB, DL, TII->get(X86::JBE_1)).addMBB(sinkMBB);
fallMBB->addSuccessor(sinkMBB);
fallMBB->addSuccessor(fixShadowMBB);
// Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
.addReg(SspSubReg)
.addImm(Offset);
// Increase SSP when looking only on the lower 8 bits of the delta.
unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
// Reset the lower 8 bits.
unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
.addReg(SspFirstShrReg)
.addImm(8);
// Jump if the result of the shift is zero.
BuildMI(fixShadowMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB);
fixShadowMBB->addSuccessor(sinkMBB);
fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
// Do a single shift left.
unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
.addReg(SspSecondShrReg);
// Save the value 128 to a register (will be used next with incssp).
unsigned Value128InReg = MRI.createVirtualRegister(PtrRC);
unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
.addImm(128);
fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
// Since incssp only looks at the lower 8 bits, we might need to do several
// iterations of incssp until we finish fixing the shadow stack.
unsigned DecReg = MRI.createVirtualRegister(PtrRC);
unsigned CounterReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
.addReg(SspAfterShlReg)
.addMBB(fixShadowLoopPrepareMBB)
.addReg(DecReg)
.addMBB(fixShadowLoopMBB);
// Every iteration we increase the SSP by 128.
BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
// Every iteration we decrement the counter by 1.
unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
// Jump if the counter is not zero yet.
BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JNE_1)).addMBB(fixShadowLoopMBB);
fixShadowLoopMBB->addSuccessor(sinkMBB);
fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
return sinkMBB;
}
MachineBasicBlock *
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
const TargetRegisterClass *RC =
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
MachineInstrBuilder MIB;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t SPOffset = 2 * PVT.getStoreSize();
unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
MachineBasicBlock *thisMBB = MBB;
// When CET and shadow stack is enabled, we need to fix the Shadow Stack.
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
}
// Reload FP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg()) // Don't add the whole operand, we don't want to
// preserve kill flags.
MIB.addReg(MO.getReg());
else
MIB.add(MO);
}
MIB.setMemRefs(MMOs);
// Reload IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (i == X86::AddrDisp)
MIB.addDisp(MO, LabelOffset);
else if (MO.isReg()) // Don't add the whole operand, we don't want to
// preserve kill flags.
MIB.addReg(MO.getReg());
else
MIB.add(MO);
}
MIB.setMemRefs(MMOs);
// Reload SP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(i), SPOffset);
else
MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
// the last instruction of the expansion.
}
MIB.setMemRefs(MMOs);
// Jump
BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
MI.eraseFromParent();
return thisMBB;
}
void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB,
int FI) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
unsigned Op = 0;
unsigned VR = 0;
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
!isPositionIndependent();
if (UseImmLabel) {
Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
} else {
const TargetRegisterClass *TRC =
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
VR = MRI->createVirtualRegister(TRC);
Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
if (Subtarget.is64Bit())
BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
.addReg(X86::RIP)
.addImm(1)
.addReg(0)
.addMBB(DispatchBB)
.addReg(0);
else
BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
.addReg(0) /* TII->getGlobalBaseReg(MF) */
.addImm(1)
.addReg(0)
.addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
if (UseImmLabel)
MIB.addMBB(DispatchBB);
else
MIB.addReg(VR);
}
MachineBasicBlock *
X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
MachineBasicBlock *BB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = BB->getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
int FI = MFI.getFunctionContextIndex();
// Get a mapping of the call site numbers to all of the landing pads they're
// associated with.
DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
unsigned MaxCSNum = 0;
for (auto &MBB : *MF) {
if (!MBB.isEHPad())
continue;
MCSymbol *Sym = nullptr;
for (const auto &MI : MBB) {
if (MI.isDebugInstr())
continue;
assert(MI.isEHLabel() && "expected EH_LABEL");
Sym = MI.getOperand(0).getMCSymbol();
break;
}
if (!MF->hasCallSiteLandingPad(Sym))
continue;
for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
CallSiteNumToLPad[CSI].push_back(&MBB);
MaxCSNum = std::max(MaxCSNum, CSI);
}
}
// Get an ordered list of the machine basic blocks for the jump table.
std::vector<MachineBasicBlock *> LPadList;
SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
LPadList.reserve(CallSiteNumToLPad.size());
for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
for (auto &LP : CallSiteNumToLPad[CSI]) {
LPadList.push_back(LP);
InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
}
}
assert(!LPadList.empty() &&
"No landing pad destinations for the dispatch jump table!");
// Create the MBBs for the dispatch code.
// Shove the dispatch's address into the return slot in the function context.
MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
DispatchBB->setIsEHPad(true);
MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
BuildMI(TrapBB, DL, TII->get(X86::TRAP));
DispatchBB->addSuccessor(TrapBB);
MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
DispatchBB->addSuccessor(DispContBB);
// Insert MBBs.
MF->push_back(DispatchBB);
MF->push_back(DispContBB);
MF->push_back(TrapBB);
// Insert code into the entry block that creates and registers the function
// context.
SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
// Create the jump table and associated information
unsigned JTE = getJumpTableEncoding();
MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
unsigned MJTI = JTI->createJumpTableIndex(LPadList);
const X86RegisterInfo &RI = TII->getRegisterInfo();
// Add a register mask with no preserved registers. This results in all
// registers being marked as clobbered.
if (RI.hasBasePointer(*MF)) {
const bool FPIs64Bit =
Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
MFI->setRestoreBasePointer(MF);
unsigned FP = RI.getFrameRegister(*MF);
unsigned BP = RI.getBaseRegister();
unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
MFI->getRestoreBasePointerOffset())
.addRegMask(RI.getNoPreservedMask());
} else {
BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
.addRegMask(RI.getNoPreservedMask());
}
// IReg is used as an index in a memory operand and therefore can't be SP
unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
Subtarget.is64Bit() ? 8 : 4);
BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
.addReg(IReg)
.addImm(LPadList.size());
BuildMI(DispatchBB, DL, TII->get(X86::JAE_1)).addMBB(TrapBB);
if (Subtarget.is64Bit()) {
unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
// leaq .LJTI0_0(%rip), BReg
BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
.addReg(X86::RIP)
.addImm(1)
.addReg(0)
.addJumpTableIndex(MJTI)
.addReg(0);
// movzx IReg64, IReg
BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
.addImm(0)
.addReg(IReg)
.addImm(X86::sub_32bit);
switch (JTE) {
case MachineJumpTableInfo::EK_BlockAddress:
// jmpq *(BReg,IReg64,8)
BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
.addReg(BReg)
.addImm(8)
.addReg(IReg64)
.addImm(0)
.addReg(0);
break;
case MachineJumpTableInfo::EK_LabelDifference32: {
unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
// movl (BReg,IReg64,4), OReg
BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
.addReg(BReg)
.addImm(4)
.addReg(IReg64)
.addImm(0)
.addReg(0);
// movsx OReg64, OReg
BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
// addq BReg, OReg64, TReg
BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
.addReg(OReg64)
.addReg(BReg);
// jmpq *TReg
BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
break;
}
default:
llvm_unreachable("Unexpected jump table encoding");
}
} else {
// jmpl *.LJTI0_0(,IReg,4)
BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
.addReg(0)
.addImm(4)
.addReg(IReg)
.addJumpTableIndex(MJTI)
.addReg(0);
}
// Add the jump table entries as successors to the MBB.
SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
for (auto &LP : LPadList)
if (SeenMBBs.insert(LP).second)
DispContBB->addSuccessor(LP);
// N.B. the order the invoke BBs are processed in doesn't matter here.
SmallVector<MachineBasicBlock *, 64> MBBLPads;
const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
for (MachineBasicBlock *MBB : InvokeBBs) {
// Remove the landing pad successor from the invoke block and replace it
// with the new dispatch block.
// Keep a copy of Successors since it's modified inside the loop.
SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
MBB->succ_rend());
// FIXME: Avoid quadratic complexity.
for (auto MBBS : Successors) {
if (MBBS->isEHPad()) {
MBB->removeSuccessor(MBBS);
MBBLPads.push_back(MBBS);
}
}
MBB->addSuccessor(DispatchBB);
// Find the invoke call and mark all of the callee-saved registers as
// 'implicit defined' so that they're spilled. This prevents code from
// moving instructions to before the EH block, where they will never be
// executed.
for (auto &II : reverse(*MBB)) {
if (!II.isCall())
continue;
DenseMap<unsigned, bool> DefRegs;
for (auto &MOp : II.operands())
if (MOp.isReg())
DefRegs[MOp.getReg()] = true;
MachineInstrBuilder MIB(*MF, &II);
for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
unsigned Reg = SavedRegs[RI];
if (!DefRegs[Reg])
MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
}
break;
}
}
// Mark all former landing pads as non-landing pads. The dispatch is the only
// landing pad now.
for (auto &LP : MBBLPads)
LP->setIsEHPad(false);
// The instruction is gone now.
MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
switch (MI.getOpcode()) {
default: llvm_unreachable("Unexpected instr type to insert");
case X86::TLS_addr32:
case X86::TLS_addr64:
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return EmitLoweredTLSAddr(MI, BB);
case X86::RETPOLINE_CALL32:
case X86::RETPOLINE_CALL64:
case X86::RETPOLINE_TCRETURN32:
case X86::RETPOLINE_TCRETURN64:
return EmitLoweredRetpoline(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
return EmitLoweredCatchPad(MI, BB);
case X86::SEG_ALLOCA_32:
case X86::SEG_ALLOCA_64:
return EmitLoweredSegAlloca(MI, BB);
case X86::TLSCall_32:
case X86::TLSCall_64:
return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_GR8:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
case X86::CMOV_RFP32:
case X86::CMOV_RFP64:
case X86::CMOV_RFP80:
case X86::CMOV_VR128:
case X86::CMOV_VR128X:
case X86::CMOV_VR256:
case X86::CMOV_VR256X:
case X86::CMOV_VR512:
case X86::CMOV_VK2:
case X86::CMOV_VK4:
case X86::CMOV_VK8:
case X86::CMOV_VK16:
case X86::CMOV_VK32:
case X86::CMOV_VK64:
return EmitLoweredSelect(MI, BB);
case X86::RDFLAGS32:
case X86::RDFLAGS64: {
unsigned PushF =
MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
// Permit reads of the EFLAGS and DF registers without them being defined.
// This intrinsic exists to read external processor state in flags, such as
// the trap flag, interrupt flag, and direction flag, none of which are
// modeled by the backend.
assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
"Unexpected register in operand!");
Push->getOperand(2).setIsUndef();
assert(Push->getOperand(3).getReg() == X86::DF &&
"Unexpected register in operand!");
Push->getOperand(3).setIsUndef();
BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::WRFLAGS32:
case X86::WRFLAGS64: {
unsigned Push =
MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
unsigned PopF =
MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
BuildMI(*BB, MI, DL, TII->get(PopF));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::RELEASE_FADD32mr:
case X86::RELEASE_FADD64mr:
return EmitLoweredAtomicFP(MI, BB);
case X86::FP32_TO_INT16_IN_MEM:
case X86::FP32_TO_INT32_IN_MEM:
case X86::FP32_TO_INT64_IN_MEM:
case X86::FP64_TO_INT16_IN_MEM:
case X86::FP64_TO_INT32_IN_MEM:
case X86::FP64_TO_INT64_IN_MEM:
case X86::FP80_TO_INT16_IN_MEM:
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
int CWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
CWFrameIdx);
// Set the high part to be round to zero...
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
// Get the X86 opcode to use.
unsigned Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
}
X86AddressMode AM = getAddressFromInstr(&MI, 0);
addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
.addReg(MI.getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// Thread synchronization.
case X86::MONITOR:
return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
case X86::MONITORX:
return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
// Cache line zero
case X86::CLZERO:
return emitClzero(&MI, BB, Subtarget);
// PKU feature
case X86::WRPKRU:
return emitWRPKRU(MI, BB, Subtarget);
case X86::RDPKRU:
return emitRDPKRU(MI, BB, Subtarget);
// xbegin
case X86::XBEGIN:
return emitXBegin(MI, BB, Subtarget.getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
case X86::VAARG_64:
return EmitVAARG64WithCustomInserter(MI, BB);
case X86::EH_SjLj_SetJmp32:
case X86::EH_SjLj_SetJmp64:
return emitEHSjLjSetJmp(MI, BB);
case X86::EH_SjLj_LongJmp32:
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
case X86::Int_eh_sjlj_setup_dispatch:
return EmitSjLjDispatchBlock(MI, BB);
case TargetOpcode::STATEPOINT:
// As an implementation detail, STATEPOINT shares the STACKMAP format at
// this point in the process. We diverge later.
return emitPatchPoint(MI, BB);
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
case TargetOpcode::PATCHABLE_EVENT_CALL:
return emitXRayCustomEvent(MI, BB);
case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
return emitXRayTypedEvent(MI, BB);
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
// In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
// requires a memory operand. If it happens that current architecture is
// i686 and for current function we need a base pointer
// - which is ESI for i686 - register allocator would not be able to
// allocate registers for an address in form of X(%reg, %reg, Y)
// - there never would be enough unreserved registers during regalloc
// (without the need for base ptr the only option would be X(%edi, %esi, Y).
// We are giving a hand to register allocator by precomputing the address in
// a new vreg using LEA.
// If it is not i686 or there is no base pointer - nothing to do here.
if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
return BB;
// Even though this code does not necessarily needs the base pointer to
// be ESI, we check for that. The reason: if this assert fails, there are
// some changes happened in the compiler base pointer handling, which most
// probably have to be addressed somehow here.
assert(TRI->getBaseRegister() == X86::ESI &&
"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
"base pointer in mind");
MachineRegisterInfo &MRI = MF->getRegInfo();
MVT SPTy = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
X86AddressMode AM = getAddressFromInstr(&MI, 0);
// Regalloc does not need any help when the memory operand of CMPXCHG8B
// does not use index register.
if (AM.IndexReg == X86::NoRegister)
return BB;
// After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
// four operand definitions that are E[ABCD] registers. We skip them and
// then insert the LEA.
MachineBasicBlock::iterator MBBI(MI);
while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) ||
MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX))
--MBBI;
addFullAddress(
BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
setDirectAddressInInstr(&MI, 0, computedAddrVReg);
return BB;
}
case X86::LCMPXCHG16B:
return BB;
case X86::LCMPXCHG8B_SAVE_EBX:
case X86::LCMPXCHG16B_SAVE_RBX: {
unsigned BasePtr =
MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
if (!BB->isLiveIn(BasePtr))
BB->addLiveIn(BasePtr);
return BB;
}
}
}
//===----------------------------------------------------------------------===//
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
bool
X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
const APInt &Demanded,
TargetLoweringOpt &TLO) const {
// Only optimize Ands to prevent shrinking a constant that could be
// matched by movzx.
if (Op.getOpcode() != ISD::AND)
return false;
EVT VT = Op.getValueType();
// Ignore vectors.
if (VT.isVector())
return false;
unsigned Size = VT.getSizeInBits();
// Make sure the RHS really is a constant.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
if (!C)
return false;
const APInt &Mask = C->getAPIntValue();
// Clear all non-demanded bits initially.
APInt ShrunkMask = Mask & Demanded;
// Find the width of the shrunk mask.
unsigned Width = ShrunkMask.getActiveBits();
// If the mask is all 0s there's nothing to do here.
if (Width == 0)
return false;
// Find the next power of 2 width, rounding up to a byte.
Width = PowerOf2Ceil(std::max(Width, 8U));
// Truncate the width to size to handle illegal types.
Width = std::min(Width, Size);
// Calculate a possible zero extend mask for this constant.
APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
// If we aren't changing the mask, just return true to keep it and prevent
// the caller from optimizing.
if (ZeroExtendMask == Mask)
return true;
// Make sure the new mask can be represented by a combination of mask bits
// and non-demanded bits.
if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
return false;
// Replace the constant with the zero extend mask.
SDLoc DL(Op);
SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
return TLO.CombineTo(Op, NewOp);
}
void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert((Opc >= ISD::BUILTIN_OP_END ||
Opc == ISD::INTRINSIC_WO_CHAIN ||
Opc == ISD::INTRINSIC_W_CHAIN ||
Opc == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::SETCC:
Known.Zero.setBitsFrom(1);
break;
case X86ISD::MOVMSK: {
unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
Known.Zero.setBitsFrom(NumLoBits);
break;
}
case X86ISD::PEXTRB:
case X86ISD::PEXTRW: {
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
Op.getConstantOperandVal(1));
Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
Known = Known.zextOrTrunc(BitWidth);
Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
break;
}
case X86ISD::VSRAI:
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
Known.setAllZero();
break;
}
Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
unsigned ShAmt = ShiftImm->getZExtValue();
if (Opc == X86ISD::VSHLI) {
Known.Zero <<= ShAmt;
Known.One <<= ShAmt;
// Low bits are known zero.
Known.Zero.setLowBits(ShAmt);
} else if (Opc == X86ISD::VSRLI) {
Known.Zero.lshrInPlace(ShAmt);
Known.One.lshrInPlace(ShAmt);
// High bits are known zero.
Known.Zero.setHighBits(ShAmt);
} else {
Known.Zero.ashrInPlace(ShAmt);
Known.One.ashrInPlace(ShAmt);
}
}
break;
}
case X86ISD::PACKUS: {
// PACKUS is just a truncation if the upper half is zero.
APInt DemandedLHS, DemandedRHS;
getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
Known.One = APInt::getAllOnesValue(BitWidth * 2);
Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
KnownBits Known2;
if (!!DemandedLHS) {
Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
}
if (!!DemandedRHS) {
Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
}
if (Known.countMinLeadingZeros() < BitWidth)
Known.resetAll();
Known = Known.trunc(BitWidth);
break;
}
case X86ISD::CMOV: {
Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
// If we don't know any bits, early out.
if (Known.isUnknown())
break;
KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
// Only known if known in both the LHS and RHS.
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
break;
}
}
// Handle target shuffles.
// TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
if (isTargetShuffle(Opc)) {
bool IsUnary;
SmallVector<int, 64> Mask;
SmallVector<SDValue, 2> Ops;
if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
IsUnary)) {
unsigned NumOps = Ops.size();
unsigned NumElts = VT.getVectorNumElements();
if (Mask.size() == NumElts) {
SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
Known.Zero.setAllBits(); Known.One.setAllBits();
for (unsigned i = 0; i != NumElts; ++i) {
if (!DemandedElts[i])
continue;
int M = Mask[i];
if (M == SM_SentinelUndef) {
// For UNDEF elements, we don't know anything about the common state
// of the shuffle result.
Known.resetAll();
break;
} else if (M == SM_SentinelZero) {
Known.One.clearAllBits();
continue;
}
assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range");
unsigned OpIdx = (unsigned)M / NumElts;
unsigned EltIdx = (unsigned)M % NumElts;
if (Ops[OpIdx].getValueType() != VT) {
// TODO - handle target shuffle ops with different value types.
Known.resetAll();
break;
}
DemandedOps[OpIdx].setBit(EltIdx);
}
// Known bits are the values that are shared by every demanded element.
for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
if (!DemandedOps[i])
continue;
KnownBits Known2 =
DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
}
}
}
}
}
unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
unsigned Depth) const {
unsigned VTBits = Op.getScalarValueSizeInBits();
unsigned Opcode = Op.getOpcode();
switch (Opcode) {
case X86ISD::SETCC_CARRY:
// SETCC_CARRY sets the dest to ~0 for true or 0 for false.
return VTBits;
case X86ISD::VTRUNC: {
// TODO: Add DemandedElts support.
SDValue Src = Op.getOperand(0);
unsigned NumSrcBits = Src.getScalarValueSizeInBits();
assert(VTBits < NumSrcBits && "Illegal truncation input type");
unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
if (Tmp > (NumSrcBits - VTBits))
return Tmp - (NumSrcBits - VTBits);
return 1;
}
case X86ISD::PACKSS: {
// PACKSS is just a truncation if the sign bits extend to the packed size.
APInt DemandedLHS, DemandedRHS;
getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
DemandedRHS);
unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
if (!!DemandedLHS)
Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
if (!!DemandedRHS)
Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
unsigned Tmp = std::min(Tmp0, Tmp1);
if (Tmp > (SrcBits - VTBits))
return Tmp - (SrcBits - VTBits);
return 1;
}
case X86ISD::VSHLI: {
SDValue Src = Op.getOperand(0);
APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
if (ShiftVal.uge(VTBits))
return VTBits; // Shifted all bits out --> zero.
unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
if (ShiftVal.uge(Tmp))
return 1; // Shifted all sign bits out --> unknown.
return Tmp - ShiftVal.getZExtValue();
}
case X86ISD::VSRAI: {
SDValue Src = Op.getOperand(0);
APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
if (ShiftVal.uge(VTBits - 1))
return VTBits; // Sign splat.
unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
ShiftVal += Tmp;
return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
}
case X86ISD::PCMPGT:
case X86ISD::PCMPEQ:
case X86ISD::CMPP:
case X86ISD::VPCOM:
case X86ISD::VPCOMU:
// Vector compares return zero/all-bits result values.
return VTBits;
case X86ISD::CMOV: {
unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp0 == 1) return 1; // Early out.
unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
return std::min(Tmp0, Tmp1);
}
}
// Fallback case.
return 1;
}
SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
return N->getOperand(0);
return N;
}
// Attempt to match a combined shuffle mask against supported unary shuffle
// instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
bool AllowFloatDomain, bool AllowIntDomain,
SDValue &V1, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) {
unsigned NumMaskElts = Mask.size();
unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
// Match against a VZEXT_MOVL vXi32 zero-extending instruction.
if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
Shuffle = X86ISD::VZEXT_MOVL;
SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
return true;
}
// Match against a ZERO_EXTEND_VECTOR_INREG/VZEXT instruction.
// TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
(MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
unsigned MaxScale = 64 / MaskEltSize;
for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
bool Match = true;
unsigned NumDstElts = NumMaskElts / Scale;
for (unsigned i = 0; i != NumDstElts && Match; ++i) {
Match &= isUndefOrEqual(Mask[i * Scale], (int)i);
Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
}
if (Match) {
unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
MVT::getIntegerVT(MaskEltSize);
SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
if (SrcVT.getVectorNumElements() == NumDstElts)
Shuffle = unsigned(ISD::ZERO_EXTEND);
else
Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG);
DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
DstVT = MVT::getVectorVT(DstVT, NumDstElts);
return true;
}
}
}
// Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
isUndefOrEqual(Mask[0], 0) &&
isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
Shuffle = X86ISD::VZEXT_MOVL;
SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
return true;
}
// Check if we have SSE3 which will let us use MOVDDUP etc. The
// instructions are no slower than UNPCKLPD but has the option to
// fold the input operand into even an unaligned memory load.
if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
if (!Subtarget.hasAVX2() && isTargetShuffleEquivalent(Mask, {0, 0})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
}
}
if (MaskVT.is256BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v4f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
}
}
if (MaskVT.is512BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX512() &&
"AVX512 required for 512-bit vector shuffles");
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v8f64;
return true;
}
if (isTargetShuffleEquivalent(
Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
}
if (isTargetShuffleEquivalent(
Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
}
}
// Attempt to match against broadcast-from-vector.
if (Subtarget.hasAVX2()) {
SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
SrcVT = DstVT = MaskVT;
Shuffle = X86ISD::VBROADCAST;
return true;
}
}
return false;
}
// Attempt to match a combined shuffle mask against supported unary immediate
// permute instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
const APInt &Zeroable,
bool AllowFloatDomain,
bool AllowIntDomain,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &ShuffleVT,
unsigned &PermuteImm) {
unsigned NumMaskElts = Mask.size();
unsigned InputSizeInBits = MaskVT.getSizeInBits();
unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
bool ContainsZeros =
llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
// Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
if (!ContainsZeros && MaskScalarSizeInBits == 64) {
// Check for lane crossing permutes.
if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
// PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
Shuffle = X86ISD::VPERMI;
ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
PermuteImm = getV4X86ShuffleImm(Mask);
return true;
}
if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
SmallVector<int, 4> RepeatedMask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
Shuffle = X86ISD::VPERMI;
ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
PermuteImm = getV4X86ShuffleImm(RepeatedMask);
return true;
}
}
} else if (AllowFloatDomain && Subtarget.hasAVX()) {
// VPERMILPD can permute with a non-repeating shuffle.
Shuffle = X86ISD::VPERMILPI;
ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
PermuteImm = 0;
for (int i = 0, e = Mask.size(); i != e; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
PermuteImm |= (M & 1) << i;
}
return true;
}
}
// Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
// AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
// had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
!ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
// Narrow the repeated mask to create 32-bit element permutes.
SmallVector<int, 4> WordMask = RepeatedMask;
if (MaskScalarSizeInBits == 64)
scaleShuffleMask<int>(2, RepeatedMask, WordMask);
Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
PermuteImm = getV4X86ShuffleImm(WordMask);
return true;
}
}
// Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
ArrayRef<int> LoMask(Mask.data() + 0, 4);
ArrayRef<int> HiMask(Mask.data() + 4, 4);
// PSHUFLW: permute lower 4 elements only.
if (isUndefOrInRange(LoMask, 0, 4) &&
isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
Shuffle = X86ISD::PSHUFLW;
ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
PermuteImm = getV4X86ShuffleImm(LoMask);
return true;
}
// PSHUFHW: permute upper 4 elements only.
if (isUndefOrInRange(HiMask, 4, 8) &&
isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
// Offset the HiMask so that we can create the shuffle immediate.
int OffsetHiMask[4];
for (int i = 0; i != 4; ++i)
OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
Shuffle = X86ISD::PSHUFHW;
ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
return true;
}
}
}
// Attempt to match against byte/bit shifts.
// FIXME: Add 512-bit support.
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle,
MaskScalarSizeInBits, Mask,
0, Zeroable, Subtarget);
if (0 < ShiftAmt) {
PermuteImm = (unsigned)ShiftAmt;
return true;
}
}
return false;
}
// Attempt to match a combined unary shuffle mask against supported binary
// shuffle instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
bool AllowFloatDomain, bool AllowIntDomain,
SDValue &V1, SDValue &V2, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
bool IsUnary) {
unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
if (MaskVT.is128BitVector()) {
if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
V2 = V1;
V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
V2 = V1;
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
(AllowFloatDomain || !Subtarget.hasSSE41())) {
std::swap(V1, V2);
Shuffle = X86ISD::MOVSD;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
(AllowFloatDomain || !Subtarget.hasSSE41())) {
Shuffle = X86ISD::MOVSS;
SrcVT = DstVT = MVT::v4f32;
return true;
}
}
// Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
Subtarget)) {
DstVT = MaskVT;
return true;
}
}
// Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
(MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
DAG, Subtarget)) {
SrcVT = DstVT = MaskVT;
if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
return true;
}
}
return false;
}
static bool matchBinaryPermuteVectorShuffle(
MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
unsigned NumMaskElts = Mask.size();
unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
// Attempt to match against PALIGNR byte rotate.
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
int ByteRotation = matchVectorShuffleAsByteRotate(MaskVT, V1, V2, Mask);
if (0 < ByteRotation) {
Shuffle = X86ISD::PALIGNR;
ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
PermuteImm = ByteRotation;
return true;
}
}
// Attempt to combine to X86ISD::BLENDI.
if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
(Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
(MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
uint64_t BlendMask = 0;
bool ForceV1Zero = false, ForceV2Zero = false;
SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero,
BlendMask)) {
if (MaskVT == MVT::v16i16) {
// We can only use v16i16 PBLENDW if the lanes are repeated.
SmallVector<int, 8> RepeatedMask;
if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
RepeatedMask)) {
assert(RepeatedMask.size() == 8 &&
"Repeated mask size doesn't match!");
PermuteImm = 0;
for (int i = 0; i < 8; ++i)
if (RepeatedMask[i] >= 8)
PermuteImm |= 1 << i;
V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
Shuffle = X86ISD::BLENDI;
ShuffleVT = MaskVT;
return true;
}
} else {
// Determine a type compatible with X86ISD::BLENDI.
ShuffleVT = MaskVT;
if (Subtarget.hasAVX2()) {
if (ShuffleVT == MVT::v4i64)
ShuffleVT = MVT::v8i32;
else if (ShuffleVT == MVT::v2i64)
ShuffleVT = MVT::v4i32;
} else {
if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
ShuffleVT = MVT::v8i16;
else if (ShuffleVT == MVT::v4i64)
ShuffleVT = MVT::v4f64;
else if (ShuffleVT == MVT::v8i32)
ShuffleVT = MVT::v8f32;
}
if (!ShuffleVT.isFloatingPoint()) {
int Scale = EltSizeInBits / ShuffleVT.getScalarSizeInBits();
BlendMask =
scaleVectorShuffleBlendMask(BlendMask, NumMaskElts, Scale);
ShuffleVT = MVT::getIntegerVT(EltSizeInBits / Scale);
ShuffleVT = MVT::getVectorVT(ShuffleVT, NumMaskElts * Scale);
}
V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
PermuteImm = (unsigned)BlendMask;
Shuffle = X86ISD::BLENDI;
return true;
}
}
}
// Attempt to combine to INSERTPS.
if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
MaskVT.is128BitVector()) {
if (Zeroable.getBoolValue() &&
matchVectorShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
Shuffle = X86ISD::INSERTPS;
ShuffleVT = MVT::v4f32;
return true;
}
}
// Attempt to combine to SHUFPD.
if (AllowFloatDomain && EltSizeInBits == 64 &&
((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
if (matchVectorShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) {
Shuffle = X86ISD::SHUFP;
ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
return true;
}
}
// Attempt to combine to SHUFPS.
if (AllowFloatDomain && EltSizeInBits == 32 &&
((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
SmallVector<int, 4> RepeatedMask;
if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
// Match each half of the repeated mask, to determine if its just
// referencing one of the vectors, is zeroable or entirely undef.
auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
int M0 = RepeatedMask[Offset];
int M1 = RepeatedMask[Offset + 1];
if (isUndefInRange(RepeatedMask, Offset, 2)) {
return DAG.getUNDEF(MaskVT);
} else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
S0 = (SM_SentinelUndef == M0 ? -1 : 0);
S1 = (SM_SentinelUndef == M1 ? -1 : 1);
return getZeroVector(MaskVT, Subtarget, DAG, DL);
} else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
return V1;
} else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
return V2;
}
return SDValue();
};
int ShufMask[4] = {-1, -1, -1, -1};
SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
if (Lo && Hi) {
V1 = Lo;
V2 = Hi;
Shuffle = X86ISD::SHUFP;
ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
PermuteImm = getV4X86ShuffleImm(ShufMask);
return true;
}
}
}
return false;
}
/// Combine an arbitrary chain of shuffles into a single instruction if
/// possible.
///
/// This is the leaf of the recursive combine below. When we have found some
/// chain of single-use x86 shuffle instructions and accumulated the combined
/// shuffle mask represented by them, this will try to pattern match that mask
/// into either a single instruction if there is a special purpose instruction
/// for this operation, or into a PSHUFB instruction which is a fully general
/// instruction but should only be used to replace chains over a certain depth.
static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
ArrayRef<int> BaseMask, int Depth,
bool HasVariableMask,
bool AllowVariableMask, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
assert((Inputs.size() == 1 || Inputs.size() == 2) &&
"Unexpected number of shuffle inputs!");
// Find the inputs that enter the chain. Note that multiple uses are OK
// here, we're not going to remove the operands we find.
bool UnaryShuffle = (Inputs.size() == 1);
SDValue V1 = peekThroughBitcasts(Inputs[0]);
SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
: peekThroughBitcasts(Inputs[1]));
MVT VT1 = V1.getSimpleValueType();
MVT VT2 = V2.getSimpleValueType();
MVT RootVT = Root.getSimpleValueType();
assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
VT2.getSizeInBits() == RootVT.getSizeInBits() &&
"Vector size mismatch");
SDLoc DL(Root);
SDValue Res;
unsigned NumBaseMaskElts = BaseMask.size();
if (NumBaseMaskElts == 1) {
assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
return DAG.getBitcast(RootVT, V1);
}
unsigned RootSizeInBits = RootVT.getSizeInBits();
unsigned NumRootElts = RootVT.getVectorNumElements();
unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
(RootVT.isFloatingPoint() && Depth >= 2) ||
(RootVT.is256BitVector() && !Subtarget.hasAVX2());
// Don't combine if we are a AVX512/EVEX target and the mask element size
// is different from the root element size - this would prevent writemasks
// from being reused.
// TODO - this currently prevents all lane shuffles from occurring.
// TODO - check for writemasks usage instead of always preventing combining.
// TODO - attempt to narrow Mask back to writemask size.
bool IsEVEXShuffle =
RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
// TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
// Handle 128-bit lane shuffles of 256-bit vectors.
// If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
// we need to use the zeroing feature.
// TODO - this should support binary shuffles.
if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
!(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
!isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
return SDValue(); // Nothing to do!
MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
unsigned PermMask = 0;
PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
Res = DAG.getBitcast(ShuffleVT, V1);
Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
DAG.getUNDEF(ShuffleVT),
DAG.getConstant(PermMask, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// For masks that have been widened to 128-bit elements or more,
// narrow back down to 64-bit elements.
SmallVector<int, 64> Mask;
if (BaseMaskEltSizeInBits > 64) {
assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
int MaskScale = BaseMaskEltSizeInBits / 64;
scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
} else {
Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
}
unsigned NumMaskElts = Mask.size();
unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
// Determine the effective mask value type.
FloatDomain &= (32 <= MaskEltSizeInBits);
MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
: MVT::getIntegerVT(MaskEltSizeInBits);
MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
// Only allow legal mask types.
if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
return SDValue();
// Attempt to match the mask against known shuffle patterns.
MVT ShuffleSrcVT, ShuffleVT;
unsigned Shuffle, PermuteImm;
// Which shuffle domains are permitted?
// Permit domain crossing at higher combine depths.
bool AllowFloatDomain = FloatDomain || (Depth > 3);
bool AllowIntDomain = (!FloatDomain || (Depth > 3)) && Subtarget.hasSSE2() &&
(!MaskVT.is256BitVector() || Subtarget.hasAVX2());
// Determine zeroable mask elements.
APInt Zeroable(NumMaskElts, 0);
for (unsigned i = 0; i != NumMaskElts; ++i)
if (isUndefOrZero(Mask[i]))
Zeroable.setBit(i);
if (UnaryShuffle) {
// If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
// directly if we don't shuffle the lower element and we shuffle the upper
// (zero) elements within themselves.
if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
(V1.getScalarValueSizeInBits() % MaskEltSizeInBits) == 0) {
unsigned Scale = V1.getScalarValueSizeInBits() / MaskEltSizeInBits;
ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
return DAG.getBitcast(RootVT, V1);
}
}
SDValue NewV1 = V1; // Save operand in case early exit happens.
if (matchUnaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
NewV1, DL, DAG, Subtarget, Shuffle,
ShuffleSrcVT, ShuffleVT) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
return DAG.getBitcast(RootVT, Res);
}
if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
AllowIntDomain, Subtarget, Shuffle,
ShuffleVT, PermuteImm) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
Res = DAG.getBitcast(ShuffleVT, V1);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
DAG.getConstant(PermuteImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
}
SDValue NewV1 = V1; // Save operands in case early exit happens.
SDValue NewV2 = V2;
if (matchBinaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
NewV1, NewV2, DL, DAG, Subtarget, Shuffle,
ShuffleSrcVT, ShuffleVT, UnaryShuffle) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
return DAG.getBitcast(RootVT, Res);
}
NewV1 = V1; // Save operands in case early exit happens.
NewV2 = V2;
if (matchBinaryPermuteVectorShuffle(
MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
DAG.getConstant(PermuteImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// Typically from here on, we need an integer version of MaskVT.
MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
// Annoyingly, SSE4A instructions don't map into the above match helpers.
if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
uint64_t BitLen, BitIdx;
if (matchVectorShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
Zeroable)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI)
return SDValue(); // Nothing to do!
V1 = DAG.getBitcast(IntMaskVT, V1);
Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
if (matchVectorShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI)
return SDValue(); // Nothing to do!
V1 = DAG.getBitcast(IntMaskVT, V1);
V2 = DAG.getBitcast(IntMaskVT, V2);
Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
}
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
return SDValue();
// Depth threshold above which we can efficiently use variable mask shuffles.
int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3;
AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
bool MaskContainsZeros =
any_of(Mask, [](int M) { return M == SM_SentinelZero; });
if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
// If we have a single input lane-crossing shuffle then lower to VPERMV.
if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
((Subtarget.hasAVX2() &&
(MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
Res = DAG.getBitcast(MaskVT, V1);
Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
return DAG.getBitcast(RootVT, Res);
}
// Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
// vector as the second source.
if (UnaryShuffle && AllowVariableMask &&
((Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasVLX() &&
(MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
// Adjust shuffle mask - replace SM_SentinelZero with second source index.
for (unsigned i = 0; i != NumMaskElts; ++i)
if (Mask[i] == SM_SentinelZero)
Mask[i] = NumMaskElts + i;
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
Res = DAG.getBitcast(MaskVT, V1);
SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
return DAG.getBitcast(RootVT, Res);
}
// If we have a dual input lane-crossing shuffle then lower to VPERMV3.
if (AllowVariableMask && !MaskContainsZeros &&
((Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasVLX() &&
(MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
V1 = DAG.getBitcast(MaskVT, V1);
V2 = DAG.getBitcast(MaskVT, V2);
Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
return DAG.getBitcast(RootVT, Res);
}
return SDValue();
}
// See if we can combine a single input shuffle with zeros to a bit-mask,
// which is much simpler than any shuffle.
if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
APInt UndefElts(NumMaskElts, 0);
SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef) {
UndefElts.setBit(i);
continue;
}
if (M == SM_SentinelZero)
continue;
EltBits[i] = AllOnes;
}
SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
Res = DAG.getBitcast(MaskVT, V1);
unsigned AndOpcode =
FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
return DAG.getBitcast(RootVT, Res);
}
// If we have a single input shuffle with different shuffle patterns in the
// the 128-bit lanes use the variable mask to VPERMILPS.
// TODO Combine other mask types at higher depths.
if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
(MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
SmallVector<SDValue, 16> VPermIdx;
for (int M : Mask) {
SDValue Idx =
M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
VPermIdx.push_back(Idx);
}
SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
Res = DAG.getBitcast(MaskVT, V1);
Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
return DAG.getBitcast(RootVT, Res);
}
// With XOP, binary shuffles of 128/256-bit floating point vectors can combine
// to VPERMIL2PD/VPERMIL2PS.
if (AllowVariableMask && Subtarget.hasXOP() &&
(MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
MaskVT == MVT::v8f32)) {
// VPERMIL2 Operation.
// Bits[3] - Match Bit.
// Bits[2:1] - (Per Lane) PD Shuffle Mask.
// Bits[2:0] - (Per Lane) PS Shuffle Mask.
unsigned NumLanes = MaskVT.getSizeInBits() / 128;
unsigned NumEltsPerLane = NumMaskElts / NumLanes;
SmallVector<int, 8> VPerm2Idx;
unsigned M2ZImm = 0;
for (int M : Mask) {
if (M == SM_SentinelUndef) {
VPerm2Idx.push_back(-1);
continue;
}
if (M == SM_SentinelZero) {
M2ZImm = 2;
VPerm2Idx.push_back(8);
continue;
}
int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
VPerm2Idx.push_back(Index);
}
V1 = DAG.getBitcast(MaskVT, V1);
V2 = DAG.getBitcast(MaskVT, V2);
SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
DAG.getConstant(M2ZImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// If we have 3 or more shuffle instructions or a chain involving a variable
// mask, we can replace them with a single PSHUFB instruction profitably.
// Intel's manuals suggest only using PSHUFB if doing so replacing 5
// instructions, but in practice PSHUFB tends to be *very* fast so we're
// more aggressive.
if (UnaryShuffle && AllowVariableMask &&
((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
(RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
(RootVT.is512BitVector() && Subtarget.hasBWI()))) {
SmallVector<SDValue, 16> PSHUFBMask;
int NumBytes = RootVT.getSizeInBits() / 8;
int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / Ratio];
if (M == SM_SentinelUndef) {
PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
if (M == SM_SentinelZero) {
PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
continue;
}
M = Ratio * M + i % Ratio;
assert((M / 16) == (i / 16) && "Lane crossing detected");
PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
Res = DAG.getBitcast(ByteVT, V1);
SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
return DAG.getBitcast(RootVT, Res);
}
// With XOP, if we have a 128-bit binary input shuffle we can always combine
// to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
// slower than PSHUFB on targets that support both.
if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
// VPPERM Mask Operation
// Bits[4:0] - Byte Index (0 - 31)
// Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
SmallVector<SDValue, 16> VPPERMMask;
int NumBytes = 16;
int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / Ratio];
if (M == SM_SentinelUndef) {
VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
if (M == SM_SentinelZero) {
VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
continue;
}
M = Ratio * M + i % Ratio;
VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::v16i8;
V1 = DAG.getBitcast(ByteVT, V1);
V2 = DAG.getBitcast(ByteVT, V2);
SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
return DAG.getBitcast(RootVT, Res);
}
// Failed to find any combines.
return SDValue();
}
// Attempt to constant fold all of the constant source ops.
// Returns true if the entire shuffle is folded to a constant.
// TODO: Extend this to merge multiple constant Ops and update the mask.
static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
ArrayRef<int> Mask, SDValue Root,
bool HasVariableMask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Root.getSimpleValueType();
unsigned SizeInBits = VT.getSizeInBits();
unsigned NumMaskElts = Mask.size();
unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
unsigned NumOps = Ops.size();
// Extract constant bits from each source op.
bool OneUseConstantOp = false;
SmallVector<APInt, 16> UndefEltsOps(NumOps);
SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
for (unsigned i = 0; i != NumOps; ++i) {
SDValue SrcOp = Ops[i];
OneUseConstantOp |= SrcOp.hasOneUse();
if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
RawBitsOps[i]))
return SDValue();
}
// Only fold if at least one of the constants is only used once or
// the combined shuffle has included a variable mask shuffle, this
// is to avoid constant pool bloat.
if (!OneUseConstantOp && !HasVariableMask)
return SDValue();
// Shuffle the constant bits according to the mask.
APInt UndefElts(NumMaskElts, 0);
APInt ZeroElts(NumMaskElts, 0);
APInt ConstantElts(NumMaskElts, 0);
SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
APInt::getNullValue(MaskSizeInBits));
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef) {
UndefElts.setBit(i);
continue;
} else if (M == SM_SentinelZero) {
ZeroElts.setBit(i);
continue;
}
assert(0 <= M && M < (int)(NumMaskElts * NumOps));
unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
if (SrcUndefElts[SrcMaskIdx]) {
UndefElts.setBit(i);
continue;
}
auto &SrcEltBits = RawBitsOps[SrcOpIdx];
APInt &Bits = SrcEltBits[SrcMaskIdx];
if (!Bits) {
ZeroElts.setBit(i);
continue;
}
ConstantElts.setBit(i);
ConstantBitData[i] = Bits;
}
assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
// Create the constant data.
MVT MaskSVT;
if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
else
MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
SDLoc DL(Root);
SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
return DAG.getBitcast(VT, CstOp);
}
/// Fully generic combining of x86 shuffle instructions.
///
/// This should be the last combine run over the x86 shuffle instructions. Once
/// they have been fully optimized, this will recursively consider all chains
/// of single-use shuffle instructions, build a generic model of the cumulative
/// shuffle operation, and check for simpler instructions which implement this
/// operation. We use this primarily for two purposes:
///
/// 1) Collapse generic shuffles to specialized single instructions when
/// equivalent. In most cases, this is just an encoding size win, but
/// sometimes we will collapse multiple generic shuffles into a single
/// special-purpose shuffle.
/// 2) Look for sequences of shuffle instructions with 3 or more total
/// instructions, and replace them with the slightly more expensive SSSE3
/// PSHUFB instruction if available. We do this as the last combining step
/// to ensure we avoid using PSHUFB if we can implement the shuffle with
/// a suitable short sequence of other instructions. The PSHUFB will either
/// use a register or have to read from memory and so is slightly (but only
/// slightly) more expensive than the other shuffle instructions.
///
/// Because this is inherently a quadratic operation (for each shuffle in
/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
/// This should never be an issue in practice as the shuffle lowering doesn't
/// produce sequences of more than 8 instructions.
///
/// FIXME: We will currently miss some cases where the redundant shuffling
/// would simplify under the threshold for PSHUFB formation because of
/// combine-ordering. To fix this, we should do the redundant instruction
/// combining in this recursive walk.
static SDValue combineX86ShufflesRecursively(
ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Bound the depth of our recursive combine because this is ultimately
// quadratic in nature.
const unsigned MaxRecursionDepth = 8;
if (Depth > MaxRecursionDepth)
return SDValue();
// Directly rip through bitcasts to find the underlying operand.
SDValue Op = SrcOps[SrcOpIndex];
Op = peekThroughOneUseBitcasts(Op);
MVT VT = Op.getSimpleValueType();
if (!VT.isVector())
return SDValue(); // Bail if we hit a non-vector.
assert(Root.getSimpleValueType().isVector() &&
"Shuffles operate on vector types!");
assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
"Can only combine shuffles of the same vector register size.");
// Extract target shuffle mask and resolve sentinels and inputs.
SmallVector<int, 64> OpMask;
SmallVector<SDValue, 2> OpInputs;
if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, DAG))
return SDValue();
// TODO - Add support for more than 2 inputs.
if (2 < OpInputs.size())
return SDValue();
SDValue Input0 = (OpInputs.size() > 0 ? OpInputs[0] : SDValue());
SDValue Input1 = (OpInputs.size() > 1 ? OpInputs[1] : SDValue());
// Add the inputs to the Ops list, avoiding duplicates.
SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
if (!Input)
return -1;
// Attempt to find an existing match.
SDValue InputBC = peekThroughBitcasts(Input);
for (int i = 0, e = Ops.size(); i < e; ++i)
if (InputBC == peekThroughBitcasts(Ops[i]))
return i;
// Match failed - should we replace an existing Op?
if (InsertionPoint >= 0) {
Ops[InsertionPoint] = Input;
return InsertionPoint;
}
// Add to the end of the Ops list.
Ops.push_back(Input);
return Ops.size() - 1;
};
int InputIdx0 = AddOp(Input0, SrcOpIndex);
int InputIdx1 = AddOp(Input1, -1);
assert(((RootMask.size() > OpMask.size() &&
RootMask.size() % OpMask.size() == 0) ||
(OpMask.size() > RootMask.size() &&
OpMask.size() % RootMask.size() == 0) ||
OpMask.size() == RootMask.size()) &&
"The smaller number of elements must divide the larger.");
// This function can be performance-critical, so we rely on the power-of-2
// knowledge that we have about the mask sizes to replace div/rem ops with
// bit-masks and shifts.
assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
assert((RootRatio == 1 || OpRatio == 1) &&
"Must not have a ratio for both incoming and op masks!");
assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
// Merge this shuffle operation's mask into our accumulated mask. Note that
// this shuffle's mask will be the first applied to the input, followed by the
// root mask to get us all the way to the root value arrangement. The reason
// for this order is that we are recursing up the operation chain.
for (unsigned i = 0; i < MaskWidth; ++i) {
unsigned RootIdx = i >> RootRatioLog2;
if (RootMask[RootIdx] < 0) {
// This is a zero or undef lane, we're done.
Mask[i] = RootMask[RootIdx];
continue;
}
unsigned RootMaskedIdx =
RootRatio == 1
? RootMask[RootIdx]
: (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
// Just insert the scaled root mask value if it references an input other
// than the SrcOp we're currently inserting.
if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
(((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
Mask[i] = RootMaskedIdx;
continue;
}
RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
if (OpMask[OpIdx] < 0) {
// The incoming lanes are zero or undef, it doesn't matter which ones we
// are using.
Mask[i] = OpMask[OpIdx];
continue;
}
// Ok, we have non-zero lanes, map them through to one of the Op's inputs.
unsigned OpMaskedIdx =
OpRatio == 1
? OpMask[OpIdx]
: (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
if (OpMask[OpIdx] < (int)OpMask.size()) {
assert(0 <= InputIdx0 && "Unknown target shuffle input");
OpMaskedIdx += InputIdx0 * MaskWidth;
} else {
assert(0 <= InputIdx1 && "Unknown target shuffle input");
OpMaskedIdx += InputIdx1 * MaskWidth;
}
Mask[i] = OpMaskedIdx;
}
// Handle the all undef/zero cases early.
if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
return DAG.getUNDEF(Root.getValueType());
// TODO - should we handle the mixed zero/undef case as well? Just returning
// a zero mask will lose information on undef elements possibly reducing
// future combine possibilities.
if (all_of(Mask, [](int Idx) { return Idx < 0; }))
return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
SDLoc(Root));
// Remove unused shuffle source ops.
resolveTargetShuffleInputsAndMask(Ops, Mask);
assert(!Ops.empty() && "Shuffle with no inputs detected");
HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
// Update the list of shuffle nodes that have been combined so far.
SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
SrcNodes.end());
CombinedNodes.push_back(Op.getNode());
// See if we can recurse into each shuffle source op (if it's a target
// shuffle). The source op should only be generally combined if it either has
// a single use (i.e. current Op) or all its users have already been combined,
// if not then we can still combine but should prevent generation of variable
// shuffles to avoid constant pool bloat.
// Don't recurse if we already have more source ops than we can combine in
// the remaining recursion depth.
if (Ops.size() < (MaxRecursionDepth - Depth)) {
for (int i = 0, e = Ops.size(); i < e; ++i) {
bool AllowVar = false;
if (Ops[i].getNode()->hasOneUse() ||
SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
AllowVar = AllowVariableMask;
if (SDValue Res = combineX86ShufflesRecursively(
Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
AllowVar, DAG, Subtarget))
return Res;
}
}
// Attempt to constant fold all of the constant source ops.
if (SDValue Cst = combineX86ShufflesConstants(
Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
return Cst;
// We can only combine unary and binary shuffle mask cases.
if (Ops.size() > 2)
return SDValue();
// Minor canonicalization of the accumulated shuffle mask to make it easier
// to match below. All this does is detect masks with sequential pairs of
// elements, and shrink them to the half-width mask. It does this in a loop
// so it will reduce the size of the mask to the minimal width mask which
// performs an equivalent shuffle.
SmallVector<int, 64> WidenedMask;
while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
Mask = std::move(WidenedMask);
}
// Canonicalization of binary shuffle masks to improve pattern matching by
// commuting the inputs.
if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
ShuffleVectorSDNode::commuteMask(Mask);
std::swap(Ops[0], Ops[1]);
}
// Finally, try to combine into a single shuffle instruction.
return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
AllowVariableMask, DAG, Subtarget);
}
/// Get the PSHUF-style mask from PSHUF node.
///
/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
/// PSHUF-style masks that can be reused with such instructions.
static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
SmallVector<SDValue, 2> Ops;
bool IsUnary;
bool HaveMask =
getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
(void)HaveMask;
assert(HaveMask);
// If we have more than 128-bits, only the low 128-bits of shuffle mask
// matter. Check that the upper masks are repeats and remove them.
if (VT.getSizeInBits() > 128) {
int LaneElts = 128 / VT.getScalarSizeInBits();
#ifndef NDEBUG
for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
for (int j = 0; j < LaneElts; ++j)
assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
"Mask doesn't repeat in high 128-bit lanes!");
#endif
Mask.resize(LaneElts);
}
switch (N.getOpcode()) {
case X86ISD::PSHUFD:
return Mask;
case X86ISD::PSHUFLW:
Mask.resize(4);
return Mask;
case X86ISD::PSHUFHW:
Mask.erase(Mask.begin(), Mask.begin() + 4);
for (int &M : Mask)
M -= 4;
return Mask;
default:
llvm_unreachable("No valid shuffle instruction found!");
}
}
/// Search for a combinable shuffle across a chain ending in pshufd.
///
/// We walk up the chain and look for a combinable shuffle, skipping over
/// shuffles that we could hoist this shuffle's transformation past without
/// altering anything.
static SDValue
combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(N.getOpcode() == X86ISD::PSHUFD &&
"Called with something other than an x86 128-bit half shuffle!");
SDLoc DL(N);
// Walk up a single-use chain looking for a combinable shuffle. Keep a stack
// of the shuffles in the chain so that we can form a fresh chain to replace
// this one.
SmallVector<SDValue, 8> Chain;
SDValue V = N.getOperand(0);
for (; V.hasOneUse(); V = V.getOperand(0)) {
switch (V.getOpcode()) {
default:
return SDValue(); // Nothing combined!
case ISD::BITCAST:
// Skip bitcasts as we always know the type for the target specific
// instructions.
continue;
case X86ISD::PSHUFD:
// Found another dword shuffle.
break;
case X86ISD::PSHUFLW:
// Check that the low words (being shuffled) are the identity in the
// dword shuffle, and the high words are self-contained.
if (Mask[0] != 0 || Mask[1] != 1 ||
!(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
return SDValue();
Chain.push_back(V);
continue;
case X86ISD::PSHUFHW:
// Check that the high words (being shuffled) are the identity in the
// dword shuffle, and the low words are self-contained.
if (Mask[2] != 2 || Mask[3] != 3 ||
!(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
return SDValue();
Chain.push_back(V);
continue;
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
// For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
// shuffle into a preceding word shuffle.
if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
V.getSimpleValueType().getVectorElementType() != MVT::i16)
return SDValue();
// Search for a half-shuffle which we can combine with.
unsigned CombineOp =
V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
if (V.getOperand(0) != V.getOperand(1) ||
!V->isOnlyUserOf(V.getOperand(0).getNode()))
return SDValue();
Chain.push_back(V);
V = V.getOperand(0);
do {
switch (V.getOpcode()) {
default:
return SDValue(); // Nothing to combine.
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
if (V.getOpcode() == CombineOp)
break;
Chain.push_back(V);
LLVM_FALLTHROUGH;
case ISD::BITCAST:
V = V.getOperand(0);
continue;
}
break;
} while (V.hasOneUse());
break;
}
// Break out of the loop if we break out of the switch.
break;
}
if (!V.hasOneUse())
// We fell out of the loop without finding a viable combining instruction.
return SDValue();
// Merge this node's mask and our incoming mask.
SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
for (int &M : Mask)
M = VMask[M];
V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Rebuild the chain around this new shuffle.
while (!Chain.empty()) {
SDValue W = Chain.pop_back_val();
if (V.getValueType() != W.getOperand(0).getValueType())
V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
switch (W.getOpcode()) {
default:
llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
break;
case X86ISD::PSHUFD:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
break;
}
}
if (V.getValueType() != N.getValueType())
V = DAG.getBitcast(N.getValueType(), V);
// Return the new chain to replace N.
return V;
}
/// Try to combine x86 target specific shuffles.
static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
unsigned Opcode = N.getOpcode();
// Combine binary shuffle of 2 similar 'Horizontal' instructions into a
// single instruction.
if (VT.getScalarSizeInBits() == 64 &&
(Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
Opcode == X86ISD::UNPCKL)) {
auto BC0 = peekThroughBitcasts(N.getOperand(0));
auto BC1 = peekThroughBitcasts(N.getOperand(1));
EVT VT0 = BC0.getValueType();
EVT VT1 = BC1.getValueType();
unsigned Opcode0 = BC0.getOpcode();
unsigned Opcode1 = BC1.getOpcode();
if (Opcode0 == Opcode1 && VT0 == VT1 &&
(Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
SDValue Lo, Hi;
if (Opcode == X86ISD::MOVSD) {
Lo = BC1.getOperand(0);
Hi = BC0.getOperand(1);
} else {
Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
}
SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
return DAG.getBitcast(VT, Horiz);
}
}
switch (Opcode) {
case X86ISD::VBROADCAST: {
// If broadcasting from another shuffle, attempt to simplify it.
// TODO - we really need a general SimplifyDemandedVectorElts mechanism.
SDValue Src = N.getOperand(0);
SDValue BC = peekThroughBitcasts(Src);
EVT SrcVT = Src.getValueType();
EVT BCVT = BC.getValueType();
if (isTargetShuffle(BC.getOpcode()) &&
VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
SM_SentinelUndef);
for (unsigned i = 0; i != Scale; ++i)
DemandedMask[i] = i;
if (SDValue Res = combineX86ShufflesRecursively(
{BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
DAG.getBitcast(SrcVT, Res));
}
return SDValue();
}
case X86ISD::PSHUFD:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
Mask = getPSHUFShuffleMask(N);
assert(Mask.size() == 4);
break;
case X86ISD::MOVSD:
case X86ISD::MOVSS: {
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
// Canonicalize scalar FPOps:
// MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
// If commutable, allow OP(N1[0], N0[0]).
unsigned Opcode1 = N1.getOpcode();
if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
Opcode1 == ISD::FDIV) {
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
if (N10 == N0 ||
(N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
if (N10 != N0)
std::swap(N10, N11);
MVT SVT = VT.getVectorElementType();
SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
return DAG.getNode(Opcode, DL, VT, N0, SclVec);
}
}
return SDValue();
}
case X86ISD::INSERTPS: {
assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
SDValue Op2 = N.getOperand(2);
unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
unsigned ZeroMask = InsertPSMask & 0xF;
// If we zero out all elements from Op0 then we don't need to reference it.
if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
// If we zero out the element from Op1 then we don't need to reference it.
if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
DAG.getConstant(InsertPSMask, DL, MVT::i8));
// Attempt to merge insertps Op1 with an inner target shuffle node.
SmallVector<int, 8> TargetMask1;
SmallVector<SDValue, 2> Ops1;
if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
int M = TargetMask1[SrcIdx];
if (isUndefOrZero(M)) {
// Zero/UNDEF insertion - zero out element and remove dependency.
InsertPSMask |= (1u << DstIdx);
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
// Update insertps mask srcidx and reference the source input directly.
assert(0 <= M && M < 8 && "Shuffle index out of range");
InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
Op1 = Ops1[M < 4 ? 0 : 1];
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
// Attempt to merge insertps Op0 with an inner target shuffle node.
SmallVector<int, 8> TargetMask0;
SmallVector<SDValue, 2> Ops0;
if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
return SDValue();
bool Updated = false;
bool UseInput00 = false;
bool UseInput01 = false;
for (int i = 0; i != 4; ++i) {
int M = TargetMask0[i];
if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
// No change if element is already zero or the inserted element.
continue;
} else if (isUndefOrZero(M)) {
// If the target mask is undef/zero then we must zero the element.
InsertPSMask |= (1u << i);
Updated = true;
continue;
}
// The input vector element must be inline.
if (M != i && M != (i + 4))
return SDValue();
// Determine which inputs of the target shuffle we're using.
UseInput00 |= (0 <= M && M < 4);
UseInput01 |= (4 <= M);
}
// If we're not using both inputs of the target shuffle then use the
// referenced input directly.
if (UseInput00 && !UseInput01) {
Updated = true;
Op0 = Ops0[0];
} else if (!UseInput00 && UseInput01) {
Updated = true;
Op0 = Ops0[1];
}
if (Updated)
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
return SDValue();
}
default:
return SDValue();
}
// Nuke no-op shuffles that show up after combining.
if (isNoopShuffleMask(Mask))
return N.getOperand(0);
// Look for simplifications involving one or two shuffle instructions.
SDValue V = N.getOperand(0);
switch (N.getOpcode()) {
default:
break;
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
// See if this reduces to a PSHUFD which is no more expensive and can
// combine with more operations. Note that it has to at least flip the
// dwords as otherwise it would have been removed as a no-op.
if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
int DMask[] = {0, 1, 2, 3};
int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
DMask[DOffset + 0] = DOffset + 1;
DMask[DOffset + 1] = DOffset + 0;
MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
V = DAG.getBitcast(DVT, V);
V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
return DAG.getBitcast(VT, V);
}
// Look for shuffle patterns which can be implemented as a single unpack.
// FIXME: This doesn't handle the location of the PSHUFD generically, and
// only works when we have a PSHUFD followed by two half-shuffles.
if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
(V.getOpcode() == X86ISD::PSHUFLW ||
V.getOpcode() == X86ISD::PSHUFHW) &&
V.getOpcode() != N.getOpcode() &&
V.hasOneUse()) {
SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
int WordMask[8];
for (int i = 0; i < 4; ++i) {
WordMask[i + NOffset] = Mask[i] + NOffset;
WordMask[i + VOffset] = VMask[i] + VOffset;
}
// Map the word mask through the DWord mask.
int MappedMask[8];
for (int i = 0; i < 8; ++i)
MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
// We can replace all three shuffles with an unpack.
V = DAG.getBitcast(VT, D.getOperand(0));
return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
: X86ISD::UNPCKH,
DL, VT, V, V);
}
}
}
break;
case X86ISD::PSHUFD:
if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
return NewN;
break;
}
return SDValue();
}
/// Checks if the shuffle mask takes subsequent elements
/// alternately from two vectors.
/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
int ParitySrc[2] = {-1, -1};
unsigned Size = Mask.size();
for (unsigned i = 0; i != Size; ++i) {
int M = Mask[i];
if (M < 0)
continue;
// Make sure we are using the matching element from the input.
if ((M % Size) != i)
return false;
// Make sure we use the same input for all elements of the same parity.
int Src = M / Size;
if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
return false;
ParitySrc[i % 2] = Src;
}
// Make sure each input is used.
if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
return false;
Op0Even = ParitySrc[0] == 0;
return true;
}
/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
/// are written to the parameters \p Opnd0 and \p Opnd1.
///
/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
/// so it is easier to generically match. We also insert dummy vector shuffle
/// nodes for the operands which explicitly discard the lanes which are unused
/// by this operation to try to flow through the rest of the combiner
/// the fact that they're unused.
static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
bool &IsSubAdd) {
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
!VT.getSimpleVT().isFloatingPoint())
return false;
// We only handle target-independent shuffles.
// FIXME: It would be easy and harmless to use the target shuffle mask
// extraction tool to support more.
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
SDValue V1 = N->getOperand(0);
SDValue V2 = N->getOperand(1);
// Make sure we have an FADD and an FSUB.
if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
(V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
V1.getOpcode() == V2.getOpcode())
return false;
// If there are other uses of these operations we can't fold them.
if (!V1->hasOneUse() || !V2->hasOneUse())
return false;
// Ensure that both operations have the same operands. Note that we can
// commute the FADD operands.
SDValue LHS, RHS;
if (V1.getOpcode() == ISD::FSUB) {
LHS = V1->getOperand(0); RHS = V1->getOperand(1);
if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
(V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
return false;
} else {
assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
LHS = V2->getOperand(0); RHS = V2->getOperand(1);
if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
(V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
return false;
}
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
bool Op0Even;
if (!isAddSubOrSubAddMask(Mask, Op0Even))
return false;
// It's a subadd if the vector in the even parity is an FADD.
IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
: V2->getOpcode() == ISD::FADD;
Opnd0 = LHS;
Opnd1 = RHS;
return true;
}
/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
static SDValue combineShuffleToFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// We only handle target-independent shuffles.
// FIXME: It would be easy and harmless to use the target shuffle mask
// extraction tool to support more.
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return SDValue();
MVT VT = N->getSimpleValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
return SDValue();
// We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue FMAdd = Op0, FMSub = Op1;
if (FMSub.getOpcode() != X86ISD::FMSUB)
std::swap(FMAdd, FMSub);
if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
FMAdd.getOperand(2) != FMSub.getOperand(2))
return SDValue();
// Check for correct shuffle mask.
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
bool Op0Even;
if (!isAddSubOrSubAddMask(Mask, Op0Even))
return SDValue();
// FMAddSub takes zeroth operand from FMSub node.
SDLoc DL(N);
bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
FMAdd.getOperand(2));
}
/// Try to combine a shuffle into a target-specific add-sub or
/// mul-add-sub node.
static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
return V;
SDValue Opnd0, Opnd1;
bool IsSubAdd;
if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
return SDValue();
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
// Try to generate X86ISD::FMADDSUB node here.
SDValue Opnd2;
if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
}
if (IsSubAdd)
return SDValue();
// Do not generate X86ISD::ADDSUB node for 512-bit types even though
// the ADDSUB idiom has been successfully recognized. There are no known
// X86 targets with 512-bit ADDSUB instructions!
if (VT.is512BitVector())
return SDValue();
return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}
// We are looking for a shuffle where both sources are concatenated with undef
// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
// if we can express this as a single-source shuffle, that's preferable.
static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
return SDValue();
EVT VT = N->getValueType(0);
// We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
if (VT.getVectorElementType() != MVT::i32 &&
VT.getVectorElementType() != MVT::i64 &&
VT.getVectorElementType() != MVT::f32 &&
VT.getVectorElementType() != MVT::f64)
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Check that both sources are concats with undef.
if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
!N1.getOperand(1).isUndef())
return SDValue();
// Construct the new shuffle mask. Elements from the first source retain their
// index, but elements from the second source no longer need to skip an undef.
SmallVector<int, 8> Mask;
int NumElts = VT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
for (int Elt : SVOp->getMask())
Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
SDLoc DL(N);
SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
N1.getOperand(0));
return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
}
/// Eliminate a redundant shuffle of a horizontal math op.
static SDValue foldShuffleOfHorizOp(SDNode *N) {
if (N->getOpcode() != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
return SDValue();
SDValue HOp = N->getOperand(0);
if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
return SDValue();
// 128-bit horizontal math instructions are defined to operate on adjacent
// lanes of each operand as:
// v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
// ...similarly for v2f64 and v8i16.
// TODO: Handle UNDEF operands.
if (HOp.getOperand(0) != HOp.getOperand(1))
return SDValue();
// When the operands of a horizontal math op are identical, the low half of
// the result is the same as the high half. If the shuffle is also replicating
// low and high halves, we don't need the shuffle.
// shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
// TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
// but this should be tied to whatever horizontal op matching and shuffle
// canonicalization are producing.
if (HOp.getValueSizeInBits() == 128 &&
(isTargetShuffleEquivalent(Mask, {0, 0}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
return HOp;
if (HOp.getValueSizeInBits() == 256 &&
(isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
isTargetShuffleEquivalent(
Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
return HOp;
return SDValue();
}
static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If we have legalized the vector types, look for blends of FADD and FSUB
// nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
if (TLI.isTypeLegal(VT)) {
if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
return AddSub;
if (SDValue HAddSub = foldShuffleOfHorizOp(N))
return HAddSub;
}
// During Type Legalization, when promoting illegal vector types,
// the backend might introduce new shuffle dag nodes and bitcasts.
//
// This code performs the following transformation:
// fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
// (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
//
// We do this only if both the bitcast and the BINOP dag nodes have
// one use. Also, perform this transformation only if the new binary
// operation is legal. This is to avoid introducing dag nodes that
// potentially need to be further expanded (or custom lowered) into a
// less optimal sequence of dag nodes.
if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
N->getOpcode() == ISD::VECTOR_SHUFFLE &&
N->getOperand(0).getOpcode() == ISD::BITCAST &&
N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue BC0 = N0.getOperand(0);
EVT SVT = BC0.getValueType();
unsigned Opcode = BC0.getOpcode();
unsigned NumElts = VT.getVectorNumElements();
if (BC0.hasOneUse() && SVT.isVector() &&
SVT.getVectorNumElements() * 2 == NumElts &&
TLI.isOperationLegal(Opcode, VT)) {
bool CanFold = false;
switch (Opcode) {
default : break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
// isOperationLegal lies for integer ops on floating point types.
CanFold = VT.isInteger();
break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
// isOperationLegal lies for floating point ops on integer types.
CanFold = VT.isFloatingPoint();
break;
}
unsigned SVTNumElts = SVT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
CanFold = SVOp->getMaskElt(i) < 0;
if (CanFold) {
SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
}
}
}
// Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
// load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
// consecutive, non-overlapping, and in the right order.
SmallVector<SDValue, 16> Elts;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
Elts.push_back(Elt);
continue;
}
Elts.clear();
break;
}
if (Elts.size() == VT.getVectorNumElements())
if (SDValue LD =
EltsFromConsecutiveLoads(VT, Elts, dl, DAG, Subtarget, true))
return LD;
// For AVX2, we sometimes want to combine
// (vector_shuffle <mask> (concat_vectors t1, undef)
// (concat_vectors t2, undef))
// Into:
// (vector_shuffle <mask> (concat_vectors t1, t2), undef)
// Since the latter can be efficiently lowered with VPERMD/VPERMQ
if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
return ShufConcat;
if (isTargetShuffle(N->getOpcode())) {
SDValue Op(N, 0);
if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
return Shuffle;
// Try recursively combining arbitrary sequences of x86 shuffle
// instructions into higher-order shuffles. We do this after combining
// specific PSHUF instruction sequences into their minimal form so that we
// can evaluate how many specialized shuffle instructions are involved in
// a particular chain.
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return Res;
// Simplify source operands based on shuffle mask.
// TODO - merge this into combineX86ShufflesRecursively.
APInt KnownUndef, KnownZero;
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
return SDValue(N, 0);
}
// Look for a truncating shuffle to v2i32 of a PMULUDQ where one of the
// operands is an extend from v2i32 to v2i64. Turn it into a pmulld.
// FIXME: This can probably go away once we default to widening legalization.
if (Subtarget.hasSSE41() && VT == MVT::v4i32 &&
N->getOpcode() == ISD::VECTOR_SHUFFLE &&
N->getOperand(0).getOpcode() == ISD::BITCAST &&
N->getOperand(0).getOperand(0).getOpcode() == X86ISD::PMULUDQ) {
SDValue BC = N->getOperand(0);
SDValue MULUDQ = BC.getOperand(0);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
ArrayRef<int> Mask = SVOp->getMask();
if (BC.hasOneUse() && MULUDQ.hasOneUse() &&
Mask[0] == 0 && Mask[1] == 2 && Mask[2] == -1 && Mask[3] == -1) {
SDValue Op0 = MULUDQ.getOperand(0);
SDValue Op1 = MULUDQ.getOperand(1);
if (Op0.getOpcode() == ISD::BITCAST &&
Op0.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
Op0.getOperand(0).getValueType() == MVT::v4i32) {
ShuffleVectorSDNode *SVOp0 =
cast<ShuffleVectorSDNode>(Op0.getOperand(0));
ArrayRef<int> Mask2 = SVOp0->getMask();
if (Mask2[0] == 0 && Mask2[1] == -1 &&
Mask2[2] == 1 && Mask2[3] == -1) {
Op0 = SVOp0->getOperand(0);
Op1 = DAG.getBitcast(MVT::v4i32, Op1);
Op1 = DAG.getVectorShuffle(MVT::v4i32, dl, Op1, Op1, Mask);
return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
}
}
if (Op1.getOpcode() == ISD::BITCAST &&
Op1.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
Op1.getOperand(0).getValueType() == MVT::v4i32) {
ShuffleVectorSDNode *SVOp1 =
cast<ShuffleVectorSDNode>(Op1.getOperand(0));
ArrayRef<int> Mask2 = SVOp1->getMask();
if (Mask2[0] == 0 && Mask2[1] == -1 &&
Mask2[2] == 1 && Mask2[3] == -1) {
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
Op0 = DAG.getVectorShuffle(MVT::v4i32, dl, Op0, Op0, Mask);
Op1 = SVOp1->getOperand(0);
return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
}
}
}
}
return SDValue();
}
bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
TargetLoweringOpt &TLO, unsigned Depth) const {
int NumElts = DemandedElts.getBitWidth();
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
// Handle special case opcodes.
switch (Opc) {
case X86ISD::VSHL:
case X86ISD::VSRL:
case X86ISD::VSRA: {
// We only need the bottom 64-bits of the (128-bit) shift amount.
SDValue Amt = Op.getOperand(1);
MVT AmtVT = Amt.getSimpleValueType();
assert(AmtVT.is128BitVector() && "Unexpected value type");
APInt AmtUndef, AmtZero;
unsigned NumAmtElts = AmtVT.getVectorNumElements();
APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
Depth + 1))
return true;
LLVM_FALLTHROUGH;
}
case X86ISD::VSHLI:
case X86ISD::VSRLI:
case X86ISD::VSRAI: {
SDValue Src = Op.getOperand(0);
APInt SrcUndef;
if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
Depth + 1))
return true;
// TODO convert SrcUndef to KnownUndef.
break;
}
case X86ISD::CVTSI2P:
case X86ISD::CVTUI2P: {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
APInt SrcUndef, SrcZero;
APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
Depth + 1))
return true;
break;
}
case X86ISD::PACKSS:
case X86ISD::PACKUS: {
APInt DemandedLHS, DemandedRHS;
getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
APInt SrcUndef, SrcZero;
if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, SrcUndef,
SrcZero, TLO, Depth + 1))
return true;
if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, SrcUndef,
SrcZero, TLO, Depth + 1))
return true;
break;
}
case X86ISD::VBROADCAST: {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
if (!SrcVT.isVector())
return false;
// Don't bother broadcasting if we just need the 0'th element.
if (DemandedElts == 1) {
if(Src.getValueType() != VT)
Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
SDLoc(Op));
return TLO.CombineTo(Op, Src);
}
APInt SrcUndef, SrcZero;
APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
Depth + 1))
return true;
break;
}
case X86ISD::PSHUFB: {
// TODO - simplify other variable shuffle masks.
SDValue Mask = Op.getOperand(1);
APInt MaskUndef, MaskZero;
if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
Depth + 1))
return true;
break;
}
}
// Simplify target shuffles.
if (!isTargetShuffle(Opc) || !VT.isSimple())
return false;
// Get target shuffle mask.
bool IsUnary;
SmallVector<int, 64> OpMask;
SmallVector<SDValue, 2> OpInputs;
if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs,
OpMask, IsUnary))
return false;
// Shuffle inputs must be the same type as the result.
if (llvm::any_of(OpInputs,
[VT](SDValue V) { return VT != V.getValueType(); }))
return false;
// Clear known elts that might have been set above.
KnownZero.clearAllBits();
KnownUndef.clearAllBits();
// Check if shuffle mask can be simplified to undef/zero/identity.
int NumSrcs = OpInputs.size();
for (int i = 0; i != NumElts; ++i) {
int &M = OpMask[i];
if (!DemandedElts[i])
M = SM_SentinelUndef;
else if (0 <= M && OpInputs[M / NumElts].isUndef())
M = SM_SentinelUndef;
}
if (isUndefInRange(OpMask, 0, NumElts)) {
KnownUndef.setAllBits();
return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
}
if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
KnownZero.setAllBits();
return TLO.CombineTo(
Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
}
for (int Src = 0; Src != NumSrcs; ++Src)
if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
return TLO.CombineTo(Op, OpInputs[Src]);
// Attempt to simplify inputs.
for (int Src = 0; Src != NumSrcs; ++Src) {
int Lo = Src * NumElts;
APInt SrcElts = APInt::getNullValue(NumElts);
for (int i = 0; i != NumElts; ++i)
if (DemandedElts[i]) {
int M = OpMask[i] - Lo;
if (0 <= M && M < NumElts)
SrcElts.setBit(M);
}
APInt SrcUndef, SrcZero;
if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
TLO, Depth + 1))
return true;
}
// Extract known zero/undef elements.
// TODO - Propagate input undef/zero elts.
for (int i = 0; i != NumElts; ++i) {
if (OpMask[i] == SM_SentinelUndef)
KnownUndef.setBit(i);
if (OpMask[i] == SM_SentinelZero)
KnownZero.setBit(i);
}
return false;
}
bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op, const APInt &OriginalDemandedBits,
const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
unsigned Depth) const {
EVT VT = Op.getValueType();
unsigned BitWidth = OriginalDemandedBits.getBitWidth();
unsigned Opc = Op.getOpcode();
switch(Opc) {
case X86ISD::PMULDQ:
case X86ISD::PMULUDQ: {
// PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
KnownBits KnownOp;
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
// FIXME: Can we bound this better?
APInt DemandedMask = APInt::getLowBitsSet(64, 32);
if (SimplifyDemandedBits(LHS, DemandedMask, KnownOp, TLO, Depth + 1))
return true;
if (SimplifyDemandedBits(RHS, DemandedMask, KnownOp, TLO, Depth + 1))
return true;
break;
}
case X86ISD::VSHLI: {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
if (ShiftImm->getAPIntValue().uge(BitWidth))
break;
unsigned ShAmt = ShiftImm->getZExtValue();
APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
// If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
// single shift. We can do this if the bottom bits (which are shifted
// out) are never demanded.
if (Op0.getOpcode() == X86ISD::VSRLI &&
OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
int Diff = ShAmt - Shift2Imm->getZExtValue();
if (Diff == 0)
return TLO.CombineTo(Op, Op0.getOperand(0));
unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
SDValue NewShift = TLO.DAG.getNode(
NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
TLO.DAG.getConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
return TLO.CombineTo(Op, NewShift);
}
}
}
if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
TLO, Depth + 1))
return true;
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
Known.Zero <<= ShAmt;
Known.One <<= ShAmt;
// Low bits known zero.
Known.Zero.setLowBits(ShAmt);
}
break;
}
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(BitWidth))
break;
unsigned ShAmt = ShiftImm->getZExtValue();
APInt DemandedMask = OriginalDemandedBits << ShAmt;
if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
OriginalDemandedElts, Known, TLO, Depth + 1))
return true;
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
Known.Zero.lshrInPlace(ShAmt);
Known.One.lshrInPlace(ShAmt);
// High bits known zero.
Known.Zero.setHighBits(ShAmt);
}
break;
}
case X86ISD::VSRAI: {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
if (ShiftImm->getAPIntValue().uge(BitWidth))
break;
unsigned ShAmt = ShiftImm->getZExtValue();
APInt DemandedMask = OriginalDemandedBits << ShAmt;
// If we just want the sign bit then we don't need to shift it.
if (OriginalDemandedBits.isSignMask())
return TLO.CombineTo(Op, Op0);
// fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
SDValue Op00 = Op0.getOperand(0);
unsigned NumSignBits =
TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
if (ShAmt < NumSignBits)
return TLO.CombineTo(Op, Op00);
}
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
DemandedMask.setSignBit();
if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
TLO, Depth + 1))
return true;
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
Known.Zero.lshrInPlace(ShAmt);
Known.One.lshrInPlace(ShAmt);
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
if (Known.Zero[BitWidth - ShAmt - 1] ||
OriginalDemandedBits.countLeadingZeros() >= ShAmt)
return TLO.CombineTo(
Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
// High bits are known one.
if (Known.One[BitWidth - ShAmt - 1])
Known.One.setHighBits(ShAmt);
}
break;
}
case X86ISD::MOVMSK: {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
unsigned SrcBits = SrcVT.getScalarSizeInBits();
unsigned NumElts = SrcVT.getVectorNumElements();
// If we don't need the sign bits at all just return zero.
if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
// Only demand the vector elements of the sign bits we need.
APInt KnownUndef, KnownZero;
APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
TLO, Depth + 1))
return true;
Known.Zero = KnownZero.zextOrSelf(BitWidth);
Known.Zero.setHighBits(BitWidth - NumElts);
// MOVMSK only uses the MSB from each vector element.
KnownBits KnownSrc;
if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
KnownSrc, TLO, Depth + 1))
return true;
if (KnownSrc.One[SrcBits - 1])
Known.One.setLowBits(NumElts);
else if (KnownSrc.Zero[SrcBits - 1])
Known.Zero.setLowBits(NumElts);
return false;
}
}
return TargetLowering::SimplifyDemandedBitsForTargetNode(
Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
}
/// Check if a vector extract from a target-specific shuffle of a load can be
/// folded into a single element load.
/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
/// shuffles have been custom lowered so we need to handle those here.
static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SDValue InVec = N->getOperand(0);
SDValue EltNo = N->getOperand(1);
EVT EltVT = N->getValueType(0);
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
EVT OriginalVT = InVec.getValueType();
// Peek through bitcasts, don't duplicate a load with other uses.
InVec = peekThroughOneUseBitcasts(InVec);
EVT CurrentVT = InVec.getValueType();
if (!CurrentVT.isVector() ||
CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
return SDValue();
if (!isTargetShuffle(InVec.getOpcode()))
return SDValue();
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
SmallVector<int, 16> ShuffleMask;
SmallVector<SDValue, 2> ShuffleOps;
bool UnaryShuffle;
if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
ShuffleOps, ShuffleMask, UnaryShuffle))
return SDValue();
// Select the input vector, guarding against out of range extract vector.
unsigned NumElems = CurrentVT.getVectorNumElements();
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
if (Idx == SM_SentinelZero)
return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
: DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
if (Idx == SM_SentinelUndef)
return DAG.getUNDEF(EltVT);
// Bail if any mask element is SM_SentinelZero - getVectorShuffle below
// won't handle it.
if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
return SDValue();
assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0] : ShuffleOps[1];
// If inputs to shuffle are the same for both ops, then allow 2 uses
unsigned AllowedUses =
(ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
if (LdNode.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
return SDValue();
AllowedUses = 1; // only allow 1 load use if we have a bitcast
LdNode = LdNode.getOperand(0);
}
if (!ISD::isNormalLoad(LdNode.getNode()))
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
return SDValue();
// If there's a bitcast before the shuffle, check if the load type and
// alignment is valid.
unsigned Align = LN0->getAlignment();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
EltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
return SDValue();
// All checks match so transform back to vector_shuffle so that DAG combiner
// can finish the job
SDLoc dl(N);
// Create shuffle node taking into account the case that its a unary shuffle
SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
ShuffleMask);
Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
EltNo);
}
// Try to match patterns such as
// (i16 bitcast (v16i1 x))
// ->
// (i16 movmsk (16i8 sext (v16i1 x)))
// before the illegal vector is scalarized on subtargets that don't have legal
// vxi1 types.
static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
const X86Subtarget &Subtarget) {
EVT VT = BitCast.getValueType();
SDValue N0 = BitCast.getOperand(0);
EVT VecVT = N0->getValueType(0);
if (!VT.isScalarInteger() || !VecVT.isSimple())
return SDValue();
// If the input is a truncate from v16i8 or v32i8 go ahead and use a
// movmskb even with avx512. This will be better than truncating to vXi1 and
// using a kmov. This can especially help KNL if the input is a v16i8/v32i8
// vpcmpeqb/vpcmpgtb.
bool IsTruncated = N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
(N0.getOperand(0).getValueType() == MVT::v16i8 ||
N0.getOperand(0).getValueType() == MVT::v32i8 ||
N0.getOperand(0).getValueType() == MVT::v64i8);
// With AVX512 vxi1 types are legal and we prefer using k-regs.
// MOVMSK is supported in SSE2 or later.
if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
return SDValue();
// There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
// v8f64. So all legal 128-bit and 256-bit vectors are covered except for
// v8i16 and v16i16.
// For these two cases, we can shuffle the upper element bytes to a
// consecutive sequence at the start of the vector and treat the results as
// v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
// for v16i16 this is not the case, because the shuffle is expensive, so we
// avoid sign-extending to this type entirely.
// For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
// (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
MVT SExtVT;
switch (VecVT.getSimpleVT().SimpleTy) {
default:
return SDValue();
case MVT::v2i1:
SExtVT = MVT::v2i64;
break;
case MVT::v4i1:
SExtVT = MVT::v4i32;
// For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
// sign-extend to a 256-bit operation to avoid truncation.
if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
N0->getOperand(0).getValueType().is256BitVector()) {
SExtVT = MVT::v4i64;
}
break;
case MVT::v8i1:
SExtVT = MVT::v8i16;
// For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
// sign-extend to a 256-bit operation to match the compare.
// If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
// 256-bit because the shuffle is cheaper than sign extending the result of
// the compare.
if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
(N0->getOperand(0).getValueType().is256BitVector() ||
N0->getOperand(0).getValueType().is512BitVector())) {
SExtVT = MVT::v8i32;
}
break;
case MVT::v16i1:
SExtVT = MVT::v16i8;
// For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
// it is not profitable to sign-extend to 256-bit because this will
// require an extra cross-lane shuffle which is more expensive than
// truncating the result of the compare to 128-bits.
break;
case MVT::v32i1:
SExtVT = MVT::v32i8;
break;
case MVT::v64i1:
// If we have AVX512F, but not AVX512BW and the input is truncated from
// v64i8 checked earlier. Then split the input and make two pmovmskbs.
if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
SExtVT = MVT::v64i8;
break;
}
return SDValue();
};
SDLoc DL(BitCast);
SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, N0);
if (SExtVT == MVT::v64i8) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
DAG.getConstant(32, DL, MVT::i8));
V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
} else if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
V = getPMOVMSKB(DL, V, DAG, Subtarget);
} else {
if (SExtVT == MVT::v8i16)
V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
DAG.getUNDEF(MVT::v8i16));
V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
}
return DAG.getZExtOrTrunc(V, DL, VT);
}
// Convert a vXi1 constant build vector to the same width scalar integer.
static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
EVT SrcVT = Op.getValueType();
assert(SrcVT.getVectorElementType() == MVT::i1 &&
"Expected a vXi1 vector");
assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
"Expected a constant build vector");
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
return DAG.getConstant(Imm, SDLoc(Op), IntVT);
}
static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
if (!DCI.isBeforeLegalizeOps())
return SDValue();
// Only do this if we have k-registers.
if (!Subtarget.hasAVX512())
return SDValue();
EVT DstVT = N->getValueType(0);
SDValue Op = N->getOperand(0);
EVT SrcVT = Op.getValueType();
if (!Op.hasOneUse())
return SDValue();
// Look for logic ops.
if (Op.getOpcode() != ISD::AND &&
Op.getOpcode() != ISD::OR &&
Op.getOpcode() != ISD::XOR)
return SDValue();
// Make sure we have a bitcast between mask registers and a scalar type.
if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
DstVT.isScalarInteger()) &&
!(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
SrcVT.isScalarInteger()))
return SDValue();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
LHS.getOperand(0).getValueType() == DstVT)
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
DAG.getBitcast(DstVT, RHS));
if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
RHS.getOperand(0).getValueType() == DstVT)
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
// If the RHS is a vXi1 build vector, this is a good reason to flip too.
// Most of these have to move a constant from the scalar domain anyway.
if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
RHS = combinevXi1ConstantToInteger(RHS, DAG);
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
DAG.getBitcast(DstVT, LHS), RHS);
}
return SDValue();
}
static SDValue createMMXBuildVector(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
unsigned NumElts = N.getNumOperands();
auto *BV = cast<BuildVectorSDNode>(N);
SDValue Splat = BV->getSplatValue();
// Build MMX element from integer GPR or SSE float values.
auto CreateMMXElement = [&](SDValue V) {
if (V.isUndef())
return DAG.getUNDEF(MVT::x86mmx);
if (V.getValueType().isFloatingPoint()) {
if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
V = DAG.getBitcast(MVT::v2i64, V);
return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
}
V = DAG.getBitcast(MVT::i32, V);
} else {
V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
}
return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
};
// Convert build vector ops to MMX data in the bottom elements.
SmallVector<SDValue, 8> Ops;
// Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
if (Splat) {
if (Splat.isUndef())
return DAG.getUNDEF(MVT::x86mmx);
Splat = CreateMMXElement(Splat);
if (Subtarget.hasSSE1()) {
// Unpack v8i8 to splat i8 elements to lowest 16-bits.
if (NumElts == 8)
Splat = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
Splat);
// Use PSHUFW to repeat 16-bit elements.
unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
return DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
DAG.getConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32), Splat,
DAG.getConstant(ShufMask, DL, MVT::i8));
}
Ops.append(NumElts, Splat);
} else {
for (unsigned i = 0; i != NumElts; ++i)
Ops.push_back(CreateMMXElement(N.getOperand(i)));
}
// Use tree of PUNPCKLs to build up general MMX vector.
while (Ops.size() > 1) {
unsigned NumOps = Ops.size();
unsigned IntrinOp =
(NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
: (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
: Intrinsic::x86_mmx_punpcklbw));
SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
for (unsigned i = 0; i != NumOps; i += 2)
Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
Ops[i], Ops[i + 1]);
Ops.resize(NumOps / 2);
}
return Ops[0];
}
static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SrcVT = N0.getValueType();
// Try to match patterns such as
// (i16 bitcast (v16i1 x))
// ->
// (i16 movmsk (16i8 sext (v16i1 x)))
// before the setcc result is scalarized on subtargets that don't have legal
// vxi1 types.
if (DCI.isBeforeLegalize()) {
if (SDValue V = combineBitcastvxi1(DAG, SDValue(N, 0), Subtarget))
return V;
// If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
// type, widen both sides to avoid a trip through memory.
if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
Subtarget.hasAVX512()) {
SDLoc dl(N);
N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
N0 = DAG.getBitcast(MVT::v8i1, N0);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
DAG.getIntPtrConstant(0, dl));
}
// If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
// type, widen both sides to avoid a trip through memory.
if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
Subtarget.hasAVX512()) {
SDLoc dl(N);
unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
Ops[0] = N0;
N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
N0 = DAG.getBitcast(MVT::i8, N0);
return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
}
}
// Since MMX types are special and don't usually play with other vector types,
// it's better to handle them early to be sure we emit efficient code by
// avoiding store-load conversions.
if (VT == MVT::x86mmx) {
// Detect MMX constant vectors.
APInt UndefElts;
SmallVector<APInt, 1> EltBits;
if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
SDLoc DL(N0);
// Handle zero-extension of i32 with MOVD.
if (EltBits[0].countLeadingZeros() >= 32)
return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
// Else, bitcast to a double.
// TODO - investigate supporting sext 32-bit immediates on x86_64.
APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
}
// Detect bitcasts to x86mmx low word.
if (N0.getOpcode() == ISD::BUILD_VECTOR &&
(SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
bool LowUndef = true, AllUndefOrZero = true;
for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
SDValue Op = N0.getOperand(i);
LowUndef &= Op.isUndef() || (i >= e/2);
AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
}
if (AllUndefOrZero) {
SDValue N00 = N0.getOperand(0);
SDLoc dl(N00);
N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
: DAG.getZExtOrTrunc(N00, dl, MVT::i32);
return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
}
}
// Detect bitcasts of 64-bit build vectors and convert to a
// MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
// lowest element.
if (N0.getOpcode() == ISD::BUILD_VECTOR &&
(SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
SrcVT == MVT::v8i8))
return createMMXBuildVector(N0, DAG, Subtarget);
// Detect bitcasts between element or subvector extraction to x86mmx.
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
isNullConstant(N0.getOperand(1))) {
SDValue N00 = N0.getOperand(0);
if (N00.getValueType().is128BitVector())
return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
DAG.getBitcast(MVT::v2i64, N00));
}
// Detect bitcasts from FP_TO_SINT to x86mmx.
if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
SDLoc DL(N0);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
DAG.getUNDEF(MVT::v2i32));
return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
DAG.getBitcast(MVT::v2i64, Res));
}
}
// Try to remove a bitcast of constant vXi1 vector. We have to legalize
// most of these to scalar anyway.
if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
return combinevXi1ConstantToInteger(N0, DAG);
}
if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
isa<ConstantSDNode>(N0)) {
auto *C = cast<ConstantSDNode>(N0);
if (C->isAllOnesValue())
return DAG.getConstant(1, SDLoc(N0), VT);
if (C->isNullValue())
return DAG.getConstant(0, SDLoc(N0), VT);
}
// Try to remove bitcasts from input and output of mask arithmetic to
// remove GPR<->K-register crossings.
if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
return V;
// Convert a bitcasted integer logic operation that has one bitcasted
// floating-point operand into a floating-point logic operation. This may
// create a load of a constant, but that is cheaper than materializing the
// constant in an integer register and transferring it to an SSE register or
// transferring the SSE operand to integer register and back.
unsigned FPOpcode;
switch (N0.getOpcode()) {
case ISD::AND: FPOpcode = X86ISD::FAND; break;
case ISD::OR: FPOpcode = X86ISD::FOR; break;
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
default: return SDValue();
}
if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
(Subtarget.hasSSE2() && VT == MVT::f64)))
return SDValue();
SDValue LogicOp0 = N0.getOperand(0);
SDValue LogicOp1 = N0.getOperand(1);
SDLoc DL0(N0);
// bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
!isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
}
// bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
!isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
}
return SDValue();
}
// Given a select, detect the following pattern:
// 1: %2 = zext <N x i8> %0 to <N x i32>
// 2: %3 = zext <N x i8> %1 to <N x i32>
// 3: %4 = sub nsw <N x i32> %2, %3
// 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
// 5: %6 = sub nsw <N x i32> zeroinitializer, %4
// 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
// This is useful as it is the input into a SAD pattern.
static bool detectZextAbsDiff(const SDValue &Select, SDValue &Op0,
SDValue &Op1) {
// Check the condition of the select instruction is greater-than.
SDValue SetCC = Select->getOperand(0);
if (SetCC.getOpcode() != ISD::SETCC)
return false;
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
if (CC != ISD::SETGT && CC != ISD::SETLT)
return false;
SDValue SelectOp1 = Select->getOperand(1);
SDValue SelectOp2 = Select->getOperand(2);
// The following instructions assume SelectOp1 is the subtraction operand
// and SelectOp2 is the negation operand.
// In the case of SETLT this is the other way around.
if (CC == ISD::SETLT)
std::swap(SelectOp1, SelectOp2);
// The second operand of the select should be the negation of the first
// operand, which is implemented as 0 - SelectOp1.
if (!(SelectOp2.getOpcode() == ISD::SUB &&
ISD::isBuildVectorAllZeros(SelectOp2.getOperand(0).getNode()) &&
SelectOp2.getOperand(1) == SelectOp1))
return false;
// The first operand of SetCC is the first operand of the select, which is the
// difference between the two input vectors.
if (SetCC.getOperand(0) != SelectOp1)
return false;
// In SetLT case, The second operand of the comparison can be either 1 or 0.
APInt SplatVal;
if ((CC == ISD::SETLT) &&
!((ISD::isConstantSplatVector(SetCC.getOperand(1).getNode(), SplatVal) &&
SplatVal.isOneValue()) ||
(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()))))
return false;
// In SetGT case, The second operand of the comparison can be either -1 or 0.
if ((CC == ISD::SETGT) &&
!(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
return false;
// The first operand of the select is the difference between the two input
// vectors.
if (SelectOp1.getOpcode() != ISD::SUB)
return false;
Op0 = SelectOp1.getOperand(0);
Op1 = SelectOp1.getOperand(1);
// Check if the operands of the sub are zero-extended from vectors of i8.
if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
Op1.getOpcode() != ISD::ZERO_EXTEND ||
Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
return false;
return true;
}
// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
// to these zexts.
static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
const SDValue &Zext1, const SDLoc &DL,
const X86Subtarget &Subtarget) {
// Find the appropriate width for the PSADBW.
EVT InVT = Zext0.getOperand(0).getValueType();
unsigned RegSize = std::max(128u, InVT.getSizeInBits());
// "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
// fill in the missing vector elements with 0.
unsigned NumConcat = RegSize / InVT.getSizeInBits();
SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
Ops[0] = Zext0.getOperand(0);
MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
Ops[0] = Zext1.getOperand(0);
SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
// Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
};
MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
PSADBWBuilder);
}
// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
// PHMINPOSUW.
static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Bail without SSE41.
if (!Subtarget.hasSSE41())
return SDValue();
EVT ExtractVT = Extract->getValueType(0);
if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
return SDValue();
// Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
ISD::NodeType BinOp;
SDValue Src = DAG.matchBinOpReduction(
Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN});
if (!Src)
return SDValue();
EVT SrcVT = Src.getValueType();
EVT SrcSVT = SrcVT.getScalarType();
if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
return SDValue();
SDLoc DL(Extract);
SDValue MinPos = Src;
// First, reduce the source down to 128-bit, applying BinOp to lo/hi.
while (SrcVT.getSizeInBits() > 128) {
unsigned NumElts = SrcVT.getVectorNumElements();
unsigned NumSubElts = NumElts / 2;
SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
unsigned SubSizeInBits = SrcVT.getSizeInBits();
SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
}
assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
(SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
"Unexpected value type");
// PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
// to flip the value accordingly.
SDValue Mask;
unsigned MaskEltsBits = ExtractVT.getSizeInBits();
if (BinOp == ISD::SMAX)
Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
else if (BinOp == ISD::SMIN)
Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
else if (BinOp == ISD::UMAX)
Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
if (Mask)
MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
// For v16i8 cases we need to perform UMIN on pairs of byte elements,
// shuffling each upper element down and insert zeros. This means that the
// v16i8 UMIN will leave the upper element as zero, performing zero-extension
// ready for the PHMINPOS.
if (ExtractVT == MVT::i8) {
SDValue Upper = DAG.getVectorShuffle(
SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
{1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
}
// Perform the PHMINPOS on a v8i16 vector,
MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
MinPos = DAG.getBitcast(SrcVT, MinPos);
if (Mask)
MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
DAG.getIntPtrConstant(0, DL));
}
// Attempt to replace an all_of/any_of style horizontal reduction with a MOVMSK.
static SDValue combineHorizontalPredicateResult(SDNode *Extract,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Bail without SSE2 or with AVX512VL (which uses predicate registers).
if (!Subtarget.hasSSE2() || Subtarget.hasVLX())
return SDValue();
EVT ExtractVT = Extract->getValueType(0);
unsigned BitWidth = ExtractVT.getSizeInBits();
if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
ExtractVT != MVT::i8)
return SDValue();
// Check for OR(any_of) and AND(all_of) horizontal reduction patterns.
ISD::NodeType BinOp;
SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
if (!Match)
return SDValue();
// EXTRACT_VECTOR_ELT can require implicit extension of the vector element
// which we can't support here for now.
if (Match.getScalarValueSizeInBits() != BitWidth)
return SDValue();
// We require AVX2 for PMOVMSKB for v16i16/v32i8;
unsigned MatchSizeInBits = Match.getValueSizeInBits();
if (!(MatchSizeInBits == 128 ||
(MatchSizeInBits == 256 &&
((Subtarget.hasAVX() && BitWidth >= 32) || Subtarget.hasAVX2()))))
return SDValue();
// Don't bother performing this for 2-element vectors.
if (Match.getValueType().getVectorNumElements() <= 2)
return SDValue();
// Check that we are extracting a reduction of all sign bits.
if (DAG.ComputeNumSignBits(Match) != BitWidth)
return SDValue();
// For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
MVT MaskVT;
if (64 == BitWidth || 32 == BitWidth)
MaskVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
MatchSizeInBits / BitWidth);
else
MaskVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
APInt CompareBits;
ISD::CondCode CondCode;
if (BinOp == ISD::OR) {
// any_of -> MOVMSK != 0
CompareBits = APInt::getNullValue(32);
CondCode = ISD::CondCode::SETNE;
} else {
// all_of -> MOVMSK == ((1 << NumElts) - 1)
CompareBits = APInt::getLowBitsSet(32, MaskVT.getVectorNumElements());
CondCode = ISD::CondCode::SETEQ;
}
// Perform the select as i32/i64 and then truncate to avoid partial register
// stalls.
unsigned ResWidth = std::max(BitWidth, 32u);
EVT ResVT = EVT::getIntegerVT(*DAG.getContext(), ResWidth);
SDLoc DL(Extract);
SDValue Zero = DAG.getConstant(0, DL, ResVT);
SDValue Ones = DAG.getAllOnesConstant(DL, ResVT);
SDValue Res = DAG.getBitcast(MaskVT, Match);
Res = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Res);
Res = DAG.getSelectCC(DL, Res, DAG.getConstant(CompareBits, DL, MVT::i32),
Ones, Zero, CondCode);
return DAG.getSExtOrTrunc(Res, DL, ExtractVT);
}
static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// PSADBW is only supported on SSE2 and up.
if (!Subtarget.hasSSE2())
return SDValue();
// Verify the type we're extracting from is any integer type above i16.
EVT VT = Extract->getOperand(0).getValueType();
if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
return SDValue();
unsigned RegSize = 128;
if (Subtarget.useBWIRegs())
RegSize = 512;
else if (Subtarget.hasAVX())
RegSize = 256;
// We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
// TODO: We should be able to handle larger vectors by splitting them before
// feeding them into several SADs, and then reducing over those.
if (RegSize / VT.getVectorNumElements() < 8)
return SDValue();
// Match shuffle + add pyramid.
ISD::NodeType BinOp;
SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
// The operand is expected to be zero extended from i8
// (verified in detectZextAbsDiff).
// In order to convert to i64 and above, additional any/zero/sign
// extend is expected.
// The zero extend from 32 bit has no mathematical effect on the result.
// Also the sign extend is basically zero extend
// (extends the sign bit which is zero).
// So it is correct to skip the sign/zero extend instruction.
if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
Root.getOpcode() == ISD::ZERO_EXTEND ||
Root.getOpcode() == ISD::ANY_EXTEND))
Root = Root.getOperand(0);
// If there was a match, we want Root to be a select that is the root of an
// abs-diff pattern.
if (!Root || (Root.getOpcode() != ISD::VSELECT))
return SDValue();
// Check whether we have an abs-diff pattern feeding into the select.
SDValue Zext0, Zext1;
if (!detectZextAbsDiff(Root, Zext0, Zext1))
return SDValue();
// Create the SAD instruction.
SDLoc DL(Extract);
SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
// If the original vector was wider than 8 elements, sum over the results
// in the SAD vector.
unsigned Stages = Log2_32(VT.getVectorNumElements());
MVT SadVT = SAD.getSimpleValueType();
if (Stages > 3) {
unsigned SadElems = SadVT.getVectorNumElements();
for(unsigned i = Stages - 3; i > 0; --i) {
SmallVector<int, 16> Mask(SadElems, -1);
for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
Mask[j] = MaskEnd + j;
SDValue Shuffle =
DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
}
}
MVT Type = Extract->getSimpleValueType(0);
unsigned TypeSizeInBits = Type.getSizeInBits();
// Return the lowest TypeSizeInBits bits.
MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
SAD = DAG.getBitcast(ResVT, SAD);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
Extract->getOperand(1));
}
// Attempt to peek through a target shuffle and extract the scalar from the
// source.
static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SDValue Src = N->getOperand(0);
SDValue Idx = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT SrcVT = Src.getValueType();
EVT SrcSVT = SrcVT.getVectorElementType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
// Don't attempt this for boolean mask vectors or unknown extraction indices.
if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
return SDValue();
// Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
if (X86ISD::VBROADCAST == Src.getOpcode() &&
Src.getOperand(0).getValueType() == VT)
return Src.getOperand(0);
// Resolve the target shuffle inputs and mask.
SmallVector<int, 16> Mask;
SmallVector<SDValue, 2> Ops;
if (!resolveTargetShuffleInputs(peekThroughBitcasts(Src), Ops, Mask, DAG))
return SDValue();
// Attempt to narrow/widen the shuffle mask to the correct size.
if (Mask.size() != NumSrcElts) {
if ((NumSrcElts % Mask.size()) == 0) {
SmallVector<int, 16> ScaledMask;
int Scale = NumSrcElts / Mask.size();
scaleShuffleMask<int>(Scale, Mask, ScaledMask);
Mask = std::move(ScaledMask);
} else if ((Mask.size() % NumSrcElts) == 0) {
// Simplify Mask based on demanded element.
int ExtractIdx = (int)N->getConstantOperandVal(1);
int Scale = Mask.size() / NumSrcElts;
int Lo = Scale * ExtractIdx;
int Hi = Scale * (ExtractIdx + 1);
for (int i = 0, e = (int)Mask.size(); i != e; ++i)
if (i < Lo || Hi <= i)
Mask[i] = SM_SentinelUndef;
SmallVector<int, 16> WidenedMask;
while (Mask.size() > NumSrcElts &&
canWidenShuffleElements(Mask, WidenedMask))
Mask = std::move(WidenedMask);
// TODO - investigate support for wider shuffle masks with known upper
// undef/zero elements for implicit zero-extension.
}
}
// Check if narrowing/widening failed.
if (Mask.size() != NumSrcElts)
return SDValue();
int SrcIdx = Mask[N->getConstantOperandVal(1)];
SDLoc dl(N);
// If the shuffle source element is undef/zero then we can just accept it.
if (SrcIdx == SM_SentinelUndef)
return DAG.getUNDEF(VT);
if (SrcIdx == SM_SentinelZero)
return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
: DAG.getConstant(0, dl, VT);
SDValue SrcOp = Ops[SrcIdx / Mask.size()];
SrcOp = DAG.getBitcast(SrcVT, SrcOp);
SrcIdx = SrcIdx % Mask.size();
// We can only extract other elements from 128-bit vectors and in certain
// circumstances, depending on SSE-level.
// TODO: Investigate using extract_subvector for larger vectors.
// TODO: Investigate float/double extraction if it will be just stored.
if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
assert(SrcSVT == VT && "Unexpected extraction type");
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
DAG.getIntPtrConstant(SrcIdx, dl));
}
if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
(SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
"Unexpected extraction type");
unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
DAG.getIntPtrConstant(SrcIdx, dl));
return DAG.getZExtOrTrunc(ExtOp, dl, VT);
}
return SDValue();
}
/// Detect vector gather/scatter index generation and convert it from being a
/// bunch of shuffles and extracts into a somewhat faster sequence.
/// For i686, the best sequence is apparently storing the value and loading
/// scalars back, while for x64 we should use 64-bit extracts and shifts.
static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
return NewOp;
// TODO - Remove this once we can handle the implicit zero-extension of
// X86ISD::PEXTRW/X86ISD::PEXTRB in:
// XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and
// combineBasicSADPattern.
if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
return NewOp;
SDValue InputVector = N->getOperand(0);
SDValue EltIdx = N->getOperand(1);
EVT SrcVT = InputVector.getValueType();
EVT VT = N->getValueType(0);
SDLoc dl(InputVector);
// Detect mmx extraction of all bits as a i64. It works better as a bitcast.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
SDValue MMXSrc = InputVector.getOperand(0);
// The bitcast source is a direct mmx result.
if (MMXSrc.getValueType() == MVT::x86mmx)
return DAG.getBitcast(VT, InputVector);
}
// Detect mmx to i32 conversion through a v2i32 elt extract.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
SDValue MMXSrc = InputVector.getOperand(0);
// The bitcast source is a direct mmx result.
if (MMXSrc.getValueType() == MVT::x86mmx)
return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
}
if (VT == MVT::i1 && InputVector.getOpcode() == ISD::BITCAST &&
isa<ConstantSDNode>(EltIdx) &&
isa<ConstantSDNode>(InputVector.getOperand(0))) {
uint64_t ExtractedElt = N->getConstantOperandVal(1);
auto *InputC = cast<ConstantSDNode>(InputVector.getOperand(0));
const APInt &InputValue = InputC->getAPIntValue();
uint64_t Res = InputValue[ExtractedElt];
return DAG.getConstant(Res, dl, MVT::i1);
}
// Check whether this extract is the root of a sum of absolute differences
// pattern. This has to be done here because we really want it to happen
// pre-legalization,
if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
return SAD;
// Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
return Cmp;
// Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
return MinMax;
return SDValue();
}
/// If a vector select has an operand that is -1 or 0, try to simplify the
/// select to a bitwise logic operation.
/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
static SDValue
combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = LHS.getValueType();
EVT CondVT = Cond.getValueType();
SDLoc DL(N);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (N->getOpcode() != ISD::VSELECT)
return SDValue();
assert(CondVT.isVector() && "Vector select expects a vector selector!");
bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
// Check if the first operand is all zeros and Cond type is vXi1.
// This situation only applies to avx512.
if (TValIsAllZeros && Subtarget.hasAVX512() && Cond.hasOneUse() &&
CondVT.getVectorElementType() == MVT::i1) {
// Invert the cond to not(cond) : xor(op,allones)=not(op)
SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
// Vselect cond, op1, op2 = Vselect not(cond), op2, op1
return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
}
// To use the condition operand as a bitwise mask, it must have elements that
// are the same size as the select elements. Ie, the condition operand must
// have already been promoted from the IR select condition type <N x i1>.
// Don't check if the types themselves are equal because that excludes
// vector floating-point selects.
if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
// Try to invert the condition if true value is not all 1s and false value is
// not all 0s.
if (!TValIsAllOnes && !FValIsAllZeros &&
// Check if the selector will be produced by CMPP*/PCMP*.
Cond.getOpcode() == ISD::SETCC &&
// Check if SETCC has already been promoted.
TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
CondVT) {
bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
if (TValIsAllZeros || FValIsAllOnes) {
SDValue CC = Cond.getOperand(2);
ISD::CondCode NewCC =
ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
Cond.getOperand(0).getValueType().isInteger());
Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
NewCC);
std::swap(LHS, RHS);
TValIsAllOnes = FValIsAllOnes;
FValIsAllZeros = TValIsAllZeros;
}
}
// Cond value must be 'sign splat' to be converted to a logical op.
if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
return SDValue();
// vselect Cond, 111..., 000... -> Cond
if (TValIsAllOnes && FValIsAllZeros)
return DAG.getBitcast(VT, Cond);
if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
return SDValue();
// vselect Cond, 111..., X -> or Cond, X
if (TValIsAllOnes) {
SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
return DAG.getBitcast(VT, Or);
}
// vselect Cond, X, 000... -> and Cond, X
if (FValIsAllZeros) {
SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
return DAG.getBitcast(VT, And);
}
// vselect Cond, 000..., X -> andn Cond, X
if (TValIsAllZeros) {
MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
return DAG.getBitcast(VT, AndN);
}
return SDValue();
}
static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
SDLoc DL(N);
auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
if (!TrueC || !FalseC)
return SDValue();
// Don't do this for crazy integer types.
EVT VT = N->getValueType(0);
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
// We're going to use the condition bit in math or logic ops. We could allow
// this with a wider condition value (post-legalization it becomes an i8),
// but if nothing is creating selects that late, it doesn't matter.
if (Cond.getValueType() != MVT::i1)
return SDValue();
// A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
// 3, 5, or 9 with i32/i64, so those get transformed too.
// TODO: For constants that overflow or do not differ by power-of-2 or small
// multiplier, convert to 'and' + 'add'.
const APInt &TrueVal = TrueC->getAPIntValue();
const APInt &FalseVal = FalseC->getAPIntValue();
bool OV;
APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
if (OV)
return SDValue();
APInt AbsDiff = Diff.abs();
if (AbsDiff.isPowerOf2() ||
((VT == MVT::i32 || VT == MVT::i64) &&
(AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
// We need a positive multiplier constant for shift/LEA codegen. The 'not'
// of the condition can usually be folded into a compare predicate, but even
// without that, the sequence should be cheaper than a CMOV alternative.
if (TrueVal.slt(FalseVal)) {
Cond = DAG.getNOT(DL, Cond, MVT::i1);
std::swap(TrueC, FalseC);
}
// select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
// Multiply condition by the difference if non-one.
if (!AbsDiff.isOneValue())
R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
// Add the base if non-zero.
if (!FalseC->isNullValue())
R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
return R;
}
return SDValue();
}
/// If this is a *dynamic* select (non-constant condition) and we can match
/// this node with one of the variable blend instructions, restructure the
/// condition so that blends can use the high (sign) bit of each element.
/// This function will also call SimplfiyDemandedBits on already created
/// BLENDV to perform additional simplifications.
static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
if ((N->getOpcode() != ISD::VSELECT &&
N->getOpcode() != X86ISD::BLENDV) ||
ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
// Don't optimize before the condition has been transformed to a legal type
// and don't ever optimize vector selects that map to AVX512 mask-registers.
unsigned BitWidth = Cond.getScalarValueSizeInBits();
if (BitWidth < 8 || BitWidth > 64)
return SDValue();
// We can only handle the cases where VSELECT is directly legal on the
// subtarget. We custom lower VSELECT nodes with constant conditions and
// this makes it hard to see whether a dynamic VSELECT will correctly
// lower, so we both check the operation's status and explicitly handle the
// cases where a *dynamic* blend will fail even though a constant-condition
// blend could be custom lowered.
// FIXME: We should find a better way to handle this class of problems.
// Potentially, we should combine constant-condition vselect nodes
// pre-legalization into shuffles and not mark as many types as custom
// lowered.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = N->getValueType(0);
if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
return SDValue();
// FIXME: We don't support i16-element blends currently. We could and
// should support them by making *all* the bits in the condition be set
// rather than just the high bit and using an i8-element blend.
if (VT.getVectorElementType() == MVT::i16)
return SDValue();
// Dynamic blending was only available from SSE4.1 onward.
if (VT.is128BitVector() && !Subtarget.hasSSE41())
return SDValue();
// Byte blends are only available in AVX2
if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
return SDValue();
// There are no 512-bit blend instructions that use sign bits.
if (VT.is512BitVector())
return SDValue();
// TODO: Add other opcodes eventually lowered into BLEND.
for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
UI != UE; ++UI)
if ((UI->getOpcode() != ISD::VSELECT &&
UI->getOpcode() != X86ISD::BLENDV) ||
UI.getOperandNo() != 0)
return SDValue();
APInt DemandedMask(APInt::getSignMask(BitWidth));
KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
return SDValue();
// If we changed the computation somewhere in the DAG, this change will
// affect all users of Cond. Update all the nodes so that we do not use
// the generic VSELECT anymore. Otherwise, we may perform wrong
// optimizations as we messed with the actual expectation for the vector
// boolean values.
for (SDNode *U : Cond->uses()) {
if (U->getOpcode() == X86ISD::BLENDV)
continue;
SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
Cond, U->getOperand(1), U->getOperand(2));
DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
DCI.AddToWorklist(U);
}
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
/// Do target-specific dag combines on SELECT and VSELECT nodes.
static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
// Try simplification again because we use this function to optimize
// BLENDV nodes that are not handled by the generic combiner.
if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
return V;
EVT VT = LHS.getValueType();
EVT CondVT = Cond.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Convert vselects with constant condition into shuffles.
if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
DCI.isBeforeLegalizeOps()) {
SmallVector<int, 64> Mask;
if (createShuffleMaskFromVSELECT(Mask, Cond))
return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
}
// If we have SSE[12] support, try to form min/max nodes. SSE min/max
// instructions match the semantics of the common C idiom x<y?x:y but not
// x<=y?x:y, because of how they handle negative zero (which can be
// ignored in unsafe-math mode).
// We also try to create v2f32 min/max nodes, which we later widen to v4f32.
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
VT != MVT::f80 && VT != MVT::f128 &&
(TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
(Subtarget.hasSSE2() ||
(Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
// Check for x CC y ? x : y.
if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
DAG.isEqualTo(RHS, Cond.getOperand(1))) {
switch (CC) {
default: break;
case ISD::SETULT:
// Converting this to a min would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS)))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMIN;
break;
case ISD::SETOLE:
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
break;
Opcode = X86ISD::FMIN;
break;
case ISD::SETULE:
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOGE:
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
break;
Opcode = X86ISD::FMAX;
break;
case ISD::SETUGT:
// Converting this to a max would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS)))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMAX;
break;
case ISD::SETUGE:
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
Opcode = X86ISD::FMAX;
break;
}
// Check for x CC y ? y : x -- a min/max with reversed arms.
} else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
DAG.isEqualTo(RHS, Cond.getOperand(0))) {
switch (CC) {
default: break;
case ISD::SETOGE:
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS))) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMIN;
break;
case ISD::SETUGT:
// Converting this to a min would handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
(!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
break;
Opcode = X86ISD::FMIN;
break;
case ISD::SETUGE:
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETULT:
// Converting this to a max would handle NaNs incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
Opcode = X86ISD::FMAX;
break;
case ISD::SETOLE:
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) &&
!DAG.isKnownNeverZeroFloat(RHS)) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMAX;
break;
case ISD::SETULE:
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
Opcode = X86ISD::FMAX;
break;
}
}
if (Opcode)
return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
}
// Some mask scalar intrinsics rely on checking if only one bit is set
// and implement it in C code like this:
// A[0] = (U & 1) ? A[0] : W[0];
// This creates some redundant instructions that break pattern matching.
// fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
SDValue AndNode = Cond.getOperand(0);
if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
isNullConstant(Cond.getOperand(1)) &&
isOneConstant(AndNode.getOperand(1))) {
// LHS and RHS swapped due to
// setcc outputting 1 when AND resulted in 0 and vice versa.
AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
}
}
// v16i8 (select v16i1, v16i8, v16i8) does not have a proper
// lowering on KNL. In this case we convert it to
// v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
// The same situation all vectors of i8 and i16 without BWI.
// Make sure we extend these even before type legalization gets a chance to
// split wide vectors.
// Since SKX these selects have a proper lowering.
if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
CondVT.getVectorElementType() == MVT::i1 &&
(ExperimentalVectorWideningLegalization ||
VT.getVectorNumElements() > 4) &&
(VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16)) {
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
}
if (SDValue V = combineSelectOfTwoConstants(N, DAG))
return V;
// Canonicalize max and min:
// (x > y) ? x : y -> (x >= y) ? x : y
// (x < y) ? x : y -> (x <= y) ? x : y
// This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
// the need for an extra compare
// against zero. e.g.
// (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
// subl %esi, %edi
// testl %edi, %edi
// movl $0, %eax
// cmovgl %edi, %eax
// =>
// xorl %eax, %eax
// subl %esi, $edi
// cmovsl %eax, %edi
if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
DAG.isEqualTo(RHS, Cond.getOperand(1))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
switch (CC) {
default: break;
case ISD::SETLT:
case ISD::SETGT: {
ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
Cond.getOperand(0), Cond.getOperand(1), NewCC);
return DAG.getSelect(DL, VT, Cond, LHS, RHS);
}
}
}
// Match VSELECTs into subs with unsigned saturation.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// psubus is available in SSE2 for i8 and i16 vectors.
Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
isPowerOf2_32(VT.getVectorNumElements()) &&
(VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16)) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
// Check if one of the arms of the VSELECT is a zero vector. If it's on the
// left side invert the predicate to simplify logic below.
SDValue Other;
if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
Other = RHS;
CC = ISD::getSetCCInverse(CC, true);
} else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
Other = LHS;
}
if (Other.getNode() && Other->getNumOperands() == 2 &&
Other->getOperand(0) == Cond.getOperand(0)) {
SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
SDValue CondRHS = Cond->getOperand(1);
// Look for a general sub with unsigned saturation first.
// x >= y ? x-y : 0 --> subus x, y
// x > y ? x-y : 0 --> subus x, y
if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
if (isa<BuildVectorSDNode>(CondRHS)) {
// If the RHS is a constant we have to reverse the const
// canonicalization.
// x > C-1 ? x+-C : 0 --> subus x, C
// TODO: Handle build_vectors with undef elements.
auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
return Cond->getAPIntValue() == (-Op->getAPIntValue() - 1);
};
if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT)) {
OpRHS = DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), OpRHS);
return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
}
// Another special case: If C was a sign bit, the sub has been
// canonicalized into a xor.
// FIXME: Would it be better to use computeKnownBits to determine
// whether it's safe to decanonicalize the xor?
// x s< 0 ? x^C : 0 --> subus x, C
if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
OpRHSConst->getAPIntValue().isSignMask()) {
// Note that we have to rebuild the RHS constant here to ensure we
// don't rely on particular values of undef lanes.
OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
}
}
}
}
}
}
// Match VSELECTs into add with unsigned saturation.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// paddus is available in SSE2 for i8 and i16 vectors.
Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
isPowerOf2_32(VT.getVectorNumElements()) &&
(VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16)) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
SDValue CondLHS = Cond->getOperand(0);
SDValue CondRHS = Cond->getOperand(1);
// Check if one of the arms of the VSELECT is vector with all bits set.
// If it's on the left side invert the predicate to simplify logic below.
SDValue Other;
if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
Other = RHS;
CC = ISD::getSetCCInverse(CC, true);
} else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
Other = LHS;
}
if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
// Canonicalize condition operands.
if (CC == ISD::SETUGE) {
std::swap(CondLHS, CondRHS);
CC = ISD::SETULE;
}
// We can test against either of the addition operands.
// x <= x+y ? x+y : ~0 --> addus x, y
// x+y >= x ? x+y : ~0 --> addus x, y
if (CC == ISD::SETULE && Other == CondRHS &&
(OpLHS == CondLHS || OpRHS == CondLHS))
return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
CondLHS == OpLHS) {
// If the RHS is a constant we have to reverse the const
// canonicalization.
// x > ~C ? x+C : ~0 --> addus x, C
auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
return Cond->getAPIntValue() == ~Op->getAPIntValue();
};
if (CC == ISD::SETULE &&
ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
}
}
}
// Early exit check
if (!TLI.isTypeLegal(VT))
return SDValue();
if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
return V;
// Custom action for SELECT MMX
if (VT == MVT::x86mmx) {
LHS = DAG.getBitcast(MVT::i64, LHS);
RHS = DAG.getBitcast(MVT::i64, RHS);
SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
return DAG.getBitcast(VT, newSelect);
}
return SDValue();
}
/// Combine:
/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
/// to:
/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
/// Note that this is only legal for some op/cc combinations.
static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// This combine only operates on CMP-like nodes.
if (!(Cmp.getOpcode() == X86ISD::CMP ||
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// Can't replace the cmp if it has more uses than the one we're looking at.
// FIXME: We would like to be able to handle this, but would need to make sure
// all uses were updated.
if (!Cmp.hasOneUse())
return SDValue();
// This only applies to variations of the common case:
// (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
// (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
// (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
// (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
// Using the proper condcodes (see below), overflow is checked for.
// FIXME: We can generalize both constraints:
// - XOR/OR/AND (if they were made to survive AtomicExpand)
// - LHS != 1
// if the result is compared.
SDValue CmpLHS = Cmp.getOperand(0);
SDValue CmpRHS = Cmp.getOperand(1);
if (!CmpLHS.hasOneUse())
return SDValue();
unsigned Opc = CmpLHS.getOpcode();
if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
return SDValue();
SDValue OpRHS = CmpLHS.getOperand(2);
auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
if (!OpRHSC)
return SDValue();
APInt Addend = OpRHSC->getAPIntValue();
if (Opc == ISD::ATOMIC_LOAD_SUB)
Addend = -Addend;
auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
if (!CmpRHSC)
return SDValue();
APInt Comparison = CmpRHSC->getAPIntValue();
// If the addend is the negation of the comparison value, then we can do
// a full comparison by emitting the atomic arithmetic as a locked sub.
if (Comparison == -Addend) {
// The CC is fine, but we need to rewrite the LHS of the comparison as an
// atomic sub.
auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
auto AtomicSub = DAG.getAtomic(
ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
/*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
/*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
AN->getMemOperand());
auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
DAG.getUNDEF(CmpLHS.getValueType()));
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
return LockOp;
}
// We can handle comparisons with zero in a number of cases by manipulating
// the CC used.
if (!Comparison.isNullValue())
return SDValue();
if (CC == X86::COND_S && Addend == 1)
CC = X86::COND_LE;
else if (CC == X86::COND_NS && Addend == 1)
CC = X86::COND_G;
else if (CC == X86::COND_G && Addend == -1)
CC = X86::COND_GE;
else if (CC == X86::COND_LE && Addend == -1)
CC = X86::COND_L;
else
return SDValue();
SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
DAG.getUNDEF(CmpLHS.getValueType()));
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
return LockOp;
}
// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
//
// Simplify the following patterns:
// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
// to (Op EFLAGS Cond)
//
// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
// to (Op EFLAGS !Cond)
//
// where Op could be BRCOND or CMOV.
//
static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
// This combine only operates on CMP-like nodes.
if (!(Cmp.getOpcode() == X86ISD::CMP ||
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// Quit if not used as a boolean value.
if (CC != X86::COND_E && CC != X86::COND_NE)
return SDValue();
// Check CMP operands. One of them should be 0 or 1 and the other should be
// an SetCC or extended from it.
SDValue Op1 = Cmp.getOperand(0);
SDValue Op2 = Cmp.getOperand(1);
SDValue SetCC;
const ConstantSDNode* C = nullptr;
bool needOppositeCond = (CC == X86::COND_E);
bool checkAgainstTrue = false; // Is it a comparison against 1?
if ((C = dyn_cast<ConstantSDNode>(Op1)))
SetCC = Op2;
else if ((C = dyn_cast<ConstantSDNode>(Op2)))
SetCC = Op1;
else // Quit if all operands are not constants.
return SDValue();
if (C->getZExtValue() == 1) {
needOppositeCond = !needOppositeCond;
checkAgainstTrue = true;
} else if (C->getZExtValue() != 0)
// Quit if the constant is neither 0 or 1.
return SDValue();
bool truncatedToBoolWithAnd = false;
// Skip (zext $x), (trunc $x), or (and $x, 1) node.
while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
SetCC.getOpcode() == ISD::TRUNCATE ||
SetCC.getOpcode() == ISD::AND) {
if (SetCC.getOpcode() == ISD::AND) {
int OpIdx = -1;
if (isOneConstant(SetCC.getOperand(0)))
OpIdx = 1;
if (isOneConstant(SetCC.getOperand(1)))
OpIdx = 0;
if (OpIdx < 0)
break;
SetCC = SetCC.getOperand(OpIdx);
truncatedToBoolWithAnd = true;
} else
SetCC = SetCC.getOperand(0);
}
switch (SetCC.getOpcode()) {
case X86ISD::SETCC_CARRY:
// Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
// simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
// i.e. it's a comparison against true but the result of SETCC_CARRY is not
// truncated to i1 using 'and'.
if (checkAgainstTrue && !truncatedToBoolWithAnd)
break;
assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
"Invalid use of SETCC_CARRY!");
LLVM_FALLTHROUGH;
case X86ISD::SETCC:
// Set the condition code or opposite one if necessary.
CC = X86::CondCode(SetCC.getConstantOperandVal(0));
if (needOppositeCond)
CC = X86::GetOppositeBranchCondition(CC);
return SetCC.getOperand(1);
case X86ISD::CMOV: {
// Check whether false/true value has canonical one, i.e. 0 or 1.
ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
// Quit if true value is not a constant.
if (!TVal)
return SDValue();
// Quit if false value is not a constant.
if (!FVal) {
SDValue Op = SetCC.getOperand(0);
// Skip 'zext' or 'trunc' node.
if (Op.getOpcode() == ISD::ZERO_EXTEND ||
Op.getOpcode() == ISD::TRUNCATE)
Op = Op.getOperand(0);
// A special case for rdrand/rdseed, where 0 is set if false cond is
// found.
if ((Op.getOpcode() != X86ISD::RDRAND &&
Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
return SDValue();
}
// Quit if false value is not the constant 0 or 1.
bool FValIsFalse = true;
if (FVal && FVal->getZExtValue() != 0) {
if (FVal->getZExtValue() != 1)
return SDValue();
// If FVal is 1, opposite cond is needed.
needOppositeCond = !needOppositeCond;
FValIsFalse = false;
}
// Quit if TVal is not the constant opposite of FVal.
if (FValIsFalse && TVal->getZExtValue() != 1)
return SDValue();
if (!FValIsFalse && TVal->getZExtValue() != 0)
return SDValue();
CC = X86::CondCode(SetCC.getConstantOperandVal(2));
if (needOppositeCond)
CC = X86::GetOppositeBranchCondition(CC);
return SetCC.getOperand(3);
}
}
return SDValue();
}
/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
/// Match:
/// (X86or (X86setcc) (X86setcc))
/// (X86cmp (and (X86setcc) (X86setcc)), 0)
static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
X86::CondCode &CC1, SDValue &Flags,
bool &isAnd) {
if (Cond->getOpcode() == X86ISD::CMP) {
if (!isNullConstant(Cond->getOperand(1)))
return false;
Cond = Cond->getOperand(0);
}
isAnd = false;
SDValue SetCC0, SetCC1;
switch (Cond->getOpcode()) {
default: return false;
case ISD::AND:
case X86ISD::AND:
isAnd = true;
LLVM_FALLTHROUGH;
case ISD::OR:
case X86ISD::OR:
SetCC0 = Cond->getOperand(0);
SetCC1 = Cond->getOperand(1);
break;
};
// Make sure we have SETCC nodes, using the same flags value.
if (SetCC0.getOpcode() != X86ISD::SETCC ||
SetCC1.getOpcode() != X86ISD::SETCC ||
SetCC0->getOperand(1) != SetCC1->getOperand(1))
return false;
CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
Flags = SetCC0->getOperand(1);
return true;
}
// When legalizing carry, we create carries via add X, -1
// If that comes from an actual carry, via setcc, we use the
// carry directly.
static SDValue combineCarryThroughADD(SDValue EFLAGS) {
if (EFLAGS.getOpcode() == X86ISD::ADD) {
if (isAllOnesConstant(EFLAGS.getOperand(1))) {
SDValue Carry = EFLAGS.getOperand(0);
while (Carry.getOpcode() == ISD::TRUNCATE ||
Carry.getOpcode() == ISD::ZERO_EXTEND ||
Carry.getOpcode() == ISD::SIGN_EXTEND ||
Carry.getOpcode() == ISD::ANY_EXTEND ||
(Carry.getOpcode() == ISD::AND &&
isOneConstant(Carry.getOperand(1))))
Carry = Carry.getOperand(0);
if (Carry.getOpcode() == X86ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC_CARRY) {
if (Carry.getConstantOperandVal(0) == X86::COND_B)
return Carry.getOperand(1);
}
}
}
return SDValue();
}
/// Optimize an EFLAGS definition used according to the condition code \p CC
/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
/// uses of chain values.
static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (CC == X86::COND_B)
if (SDValue Flags = combineCarryThroughADD(EFLAGS))
return Flags;
if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
return R;
return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
}
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue FalseOp = N->getOperand(0);
SDValue TrueOp = N->getOperand(1);
X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
SDValue Cond = N->getOperand(3);
// Try to simplify the EFLAGS and condition code operands.
// We can't always do this as FCMOV only supports a subset of X86 cond.
if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
Flags};
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
// If this is a select between two integer constants, try to do some
// optimizations. Note that the operands are ordered the opposite of SELECT
// operands.
if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
// Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
// larger than FalseC (the false value).
if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
CC = X86::GetOppositeBranchCondition(CC);
std::swap(TrueC, FalseC);
std::swap(TrueOp, FalseOp);
}
// Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
// This is efficient for any integer data type (including i8/i16) and
// shift amount.
if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
Cond = getSETCC(CC, Cond, DL, DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
unsigned ShAmt = TrueC->getAPIntValue().logBase2();
Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
DAG.getConstant(ShAmt, DL, MVT::i8));
return Cond;
}
// Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
// for any integer data type, including i8/i16.
if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
Cond = getSETCC(CC, Cond, DL, DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
FalseC->getValueType(0), Cond);
Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
SDValue(FalseC, 0));
return Cond;
}
// Optimize cases that will turn into an LEA instruction. This requires
// an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
bool isFastMultiplier = false;
if (Diff < 10) {
switch ((unsigned char)Diff) {
default: break;
case 1: // result = add base, cond
case 2: // result = lea base( , cond*2)
case 3: // result = lea base(cond, cond*2)
case 4: // result = lea base( , cond*4)
case 5: // result = lea base(cond, cond*4)
case 8: // result = lea base( , cond*8)
case 9: // result = lea base(cond, cond*8)
isFastMultiplier = true;
break;
}
}
if (isFastMultiplier) {
APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
Cond = getSETCC(CC, Cond, DL ,DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
Cond);
// Scale the condition by the difference.
if (Diff != 1)
Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
DAG.getConstant(Diff, DL, Cond.getValueType()));
// Add the base if non-zero.
if (FalseC->getAPIntValue() != 0)
Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
SDValue(FalseC, 0));
return Cond;
}
}
}
}
// Handle these cases:
// (select (x != c), e, c) -> select (x != c), e, x),
// (select (x == c), c, e) -> select (x == c), x, e)
// where the c is an integer constant, and the "select" is the combination
// of CMOV and CMP.
//
// The rationale for this change is that the conditional-move from a constant
// needs two instructions, however, conditional-move from a register needs
// only one instruction.
//
// CAVEAT: By replacing a constant with a symbolic value, it may obscure
// some instruction-combining opportunities. This opt needs to be
// postponed as late as possible.
//
if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
// the DCI.xxxx conditions are provided to postpone the optimization as
// late as possible.
ConstantSDNode *CmpAgainst = nullptr;
if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
(CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
!isa<ConstantSDNode>(Cond.getOperand(0))) {
if (CC == X86::COND_NE &&
CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
CC = X86::GetOppositeBranchCondition(CC);
std::swap(TrueOp, FalseOp);
}
if (CC == X86::COND_E &&
CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
SDValue Ops[] = { FalseOp, Cond.getOperand(0),
DAG.getConstant(CC, DL, MVT::i8), Cond };
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
}
// Fold and/or of setcc's to double CMOV:
// (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
// (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
//
// This combine lets us generate:
// cmovcc1 (jcc1 if we don't have CMOV)
// cmovcc2 (same)
// instead of:
// setcc1
// setcc2
// and/or
// cmovne (jne if we don't have CMOV)
// When we can't use the CMOV instruction, it might increase branch
// mispredicts.
// When we can use CMOV, or when there is no mispredict, this improves
// throughput and reduces register pressure.
//
if (CC == X86::COND_NE) {
SDValue Flags;
X86::CondCode CC0, CC1;
bool isAndSetCC;
if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
if (isAndSetCC) {
std::swap(FalseOp, TrueOp);
CC0 = X86::GetOppositeBranchCondition(CC0);
CC1 = X86::GetOppositeBranchCondition(CC1);
}
SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
Flags};
SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
return CMOV;
}
}
// Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
// (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
// Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
// (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
if ((CC == X86::COND_NE || CC == X86::COND_E) &&
Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
SDValue Add = TrueOp;
SDValue Const = FalseOp;
// Canonicalize the condition code for easier matching and output.
if (CC == X86::COND_E)
std::swap(Add, Const);
// We might have replaced the constant in the cmov with the LHS of the
// compare. If so change it to the RHS of the compare.
if (Const == Cond.getOperand(0))
Const = Cond.getOperand(1);
// Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
(Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
EVT VT = N->getValueType(0);
// This should constant fold.
SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
DAG.getConstant(X86::COND_NE, DL, MVT::i8),
Cond);
return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
}
}
return SDValue();
}
/// Different mul shrinking modes.
enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
EVT VT = N->getOperand(0).getValueType();
if (VT.getScalarSizeInBits() != 32)
return false;
assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
unsigned SignBits[2] = {1, 1};
bool IsPositive[2] = {false, false};
for (unsigned i = 0; i < 2; i++) {
SDValue Opd = N->getOperand(i);
SignBits[i] = DAG.ComputeNumSignBits(Opd);
IsPositive[i] = DAG.SignBitIsZero(Opd);
}
bool AllPositive = IsPositive[0] && IsPositive[1];
unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
// When ranges are from -128 ~ 127, use MULS8 mode.
if (MinSignBits >= 25)
Mode = MULS8;
// When ranges are from 0 ~ 255, use MULU8 mode.
else if (AllPositive && MinSignBits >= 24)
Mode = MULU8;
// When ranges are from -32768 ~ 32767, use MULS16 mode.
else if (MinSignBits >= 17)
Mode = MULS16;
// When ranges are from 0 ~ 65535, use MULU16 mode.
else if (AllPositive && MinSignBits >= 16)
Mode = MULU16;
else
return false;
return true;
}
/// When the operands of vector mul are extended from smaller size values,
/// like i8 and i16, the type of mul may be shrinked to generate more
/// efficient code. Two typical patterns are handled:
/// Pattern1:
/// %2 = sext/zext <N x i8> %1 to <N x i32>
/// %4 = sext/zext <N x i8> %3 to <N x i32>
// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
/// %5 = mul <N x i32> %2, %4
///
/// Pattern2:
/// %2 = zext/sext <N x i16> %1 to <N x i32>
/// %4 = zext/sext <N x i16> %3 to <N x i32>
/// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
/// %5 = mul <N x i32> %2, %4
///
/// There are four mul shrinking modes:
/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
/// generate pmullw+sext32 for it (MULS8 mode).
/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
/// generate pmullw+zext32 for it (MULU8 mode).
/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
/// generate pmullw+pmulhw for it (MULS16 mode).
/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
/// generate pmullw+pmulhuw for it (MULU16 mode).
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Check for legality
// pmullw/pmulhw are not supported by SSE.
if (!Subtarget.hasSSE2())
return SDValue();
// Check for profitability
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
ShrinkMode Mode;
if (!canReduceVMulWidth(N, DAG, Mode))
return SDValue();
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getOperand(0).getValueType();
unsigned NumElts = VT.getVectorNumElements();
if ((NumElts % 2) != 0)
return SDValue();
unsigned RegSize = 128;
MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
// Shrink the operands of mul.
SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
if (ExperimentalVectorWideningLegalization ||
NumElts >= OpsVT.getVectorNumElements()) {
// Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
// lower part is needed.
SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
if (Mode == MULU8 || Mode == MULS8)
return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
DL, VT, MulLo);
MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
// Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
// the higher part is also needed.
SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
ReducedVT, NewN0, NewN1);
// Repack the lower part and higher part result of mul into a wider
// result.
// Generate shuffle functioning as punpcklwd.
SmallVector<int, 16> ShuffleMask(NumElts);
for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
ShuffleMask[2 * i] = i;
ShuffleMask[2 * i + 1] = i + NumElts;
}
SDValue ResLo =
DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
ResLo = DAG.getBitcast(ResVT, ResLo);
// Generate shuffle functioning as punpckhwd.
for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
ShuffleMask[2 * i] = i + NumElts / 2;
ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
}
SDValue ResHi =
DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
ResHi = DAG.getBitcast(ResVT, ResHi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
}
// When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
// to legalize the mul explicitly because implicit legalization for type
// <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
// instructions which will not exist when we explicitly legalize it by
// extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
// <4 x i16> undef).
//
// Legalize the operands of mul.
// FIXME: We may be able to handle non-concatenated vectors by insertion.
unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
if ((RegSize % ReducedSizeInBits) != 0)
return SDValue();
SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
DAG.getUNDEF(ReducedVT));
Ops[0] = NewN0;
NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
Ops[0] = NewN1;
NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
if (Mode == MULU8 || Mode == MULS8) {
// Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
// part is needed.
SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
// convert the type of mul result to VT.
MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
: ISD::SIGN_EXTEND_VECTOR_INREG,
DL, ResVT, Mul);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
}
// Generate the lower and higher part of mul: pmulhw/pmulhuw. For
// MULU16/MULS16, both parts are needed.
SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
OpsVT, NewN0, NewN1);
// Repack the lower part and higher part result of mul into a wider
// result. Make sure the type of mul result is VT.
MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
Res = DAG.getBitcast(ResVT, Res);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
}
static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
EVT VT, const SDLoc &DL) {
auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(Mult, DL, VT));
Result = DAG.getNode(ISD::SHL, DL, VT, Result,
DAG.getConstant(Shift, DL, MVT::i8));
Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
N->getOperand(0));
return Result;
};
auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(Mul1, DL, VT));
Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
DAG.getConstant(Mul2, DL, VT));
Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
N->getOperand(0));
return Result;
};
switch (MulAmt) {
default:
break;
case 11:
// mul x, 11 => add ((shl (mul x, 5), 1), x)
return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
case 21:
// mul x, 21 => add ((shl (mul x, 5), 2), x)
return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
case 41:
// mul x, 41 => add ((shl (mul x, 5), 3), x)
return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
case 22:
// mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
case 19:
// mul x, 19 => add ((shl (mul x, 9), 1), x)
return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
case 37:
// mul x, 37 => add ((shl (mul x, 9), 2), x)
return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
case 73:
// mul x, 73 => add ((shl (mul x, 9), 3), x)
return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
case 13:
// mul x, 13 => add ((shl (mul x, 3), 2), x)
return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
case 23:
// mul x, 23 => sub ((shl (mul x, 3), 3), x)
return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
case 26:
// mul x, 26 => add ((mul (mul x, 5), 5), x)
return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
case 28:
// mul x, 28 => add ((mul (mul x, 9), 3), x)
return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
case 29:
// mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
}
// Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
// by a single LEA.
// First check if this a sum of two power of 2s because that's easy. Then
// count how many zeros are up to the first bit.
// TODO: We can do this even without LEA at a cost of two shifts and an add.
if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
unsigned ScaleShift = countTrailingZeros(MulAmt);
if (ScaleShift >= 1 && ScaleShift < 4) {
unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(ShiftAmt, DL, MVT::i8));
SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(ScaleShift, DL, MVT::i8));
return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
}
}
return SDValue();
}
// If the upper 17 bits of each element are zero then we can use PMADDWD,
// which is always at least as quick as PMULLD, except on KNL.
static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
if (Subtarget.isPMADDWDSlow())
return SDValue();
EVT VT = N->getValueType(0);
// Only support vXi32 vectors.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
return SDValue();
// Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
// Also allow v2i32 if it will be widened.
MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
if (!((ExperimentalVectorWideningLegalization && VT == MVT::v2i32) ||
DAG.getTargetLoweringInfo().isTypeLegal(WVT)))
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// If we are zero extending two steps without SSE4.1, its better to reduce
// the vmul width instead.
if (!Subtarget.hasSSE41() &&
(N0.getOpcode() == ISD::ZERO_EXTEND &&
N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
(N1.getOpcode() == ISD::ZERO_EXTEND &&
N1.getOperand(0).getScalarValueSizeInBits() <= 8))
return SDValue();
APInt Mask17 = APInt::getHighBitsSet(32, 17);
if (!DAG.MaskedValueIsZero(N1, Mask17) ||
!DAG.MaskedValueIsZero(N0, Mask17))
return SDValue();
// Use SplitOpsAndApply to handle AVX splitting.
auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
{ DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
PMADDWDBuilder);
}
static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
EVT VT = N->getValueType(0);
// Only support vXi64 vectors.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
VT.getVectorNumElements() < 2 ||
!isPowerOf2_32(VT.getVectorNumElements()))
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// MULDQ returns the 64-bit result of the signed multiplication of the lower
// 32-bits. We can lower with this if the sign bits stretch that far.
if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
DAG.ComputeNumSignBits(N1) > 32) {
auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
PMULDQBuilder, /*CheckBWI*/false);
}
// If the upper bits are zero we can use a single pmuludq.
APInt Mask = APInt::getHighBitsSet(64, 32);
if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
PMULUDQBuilder, /*CheckBWI*/false);
}
return SDValue();
}
/// Optimize a single multiply with constant into two operations in order to
/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
return V;
if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
return V;
if (DCI.isBeforeLegalize() && VT.isVector())
return reduceVMULWidth(N, DAG, Subtarget);
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
if (VT != MVT::i64 && VT != MVT::i32)
return SDValue();
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!C)
return SDValue();
if (isPowerOf2_64(C->getZExtValue()))
return SDValue();
int64_t SignMulAmt = C->getSExtValue();
assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
SDLoc DL(N);
if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(AbsMulAmt, DL, VT));
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
NewMul);
return NewMul;
}
uint64_t MulAmt1 = 0;
uint64_t MulAmt2 = 0;
if ((AbsMulAmt % 9) == 0) {
MulAmt1 = 9;
MulAmt2 = AbsMulAmt / 9;
} else if ((AbsMulAmt % 5) == 0) {
MulAmt1 = 5;
MulAmt2 = AbsMulAmt / 5;
} else if ((AbsMulAmt % 3) == 0) {
MulAmt1 = 3;
MulAmt2 = AbsMulAmt / 3;
}
SDValue NewMul;
// For negative multiply amounts, only allow MulAmt2 to be a power of 2.
if (MulAmt2 &&
(isPowerOf2_64(MulAmt2) ||
(SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
if (isPowerOf2_64(MulAmt2) &&
!(SignMulAmt >= 0 && N->hasOneUse() &&
N->use_begin()->getOpcode() == ISD::ADD))
// If second multiplifer is pow2, issue it first. We want the multiply by
// 3, 5, or 9 to be folded into the addressing mode unless the lone use
// is an add. Only do this for positive multiply amounts since the
// negate would prevent it from being used as an address mode anyway.
std::swap(MulAmt1, MulAmt2);
if (isPowerOf2_64(MulAmt1))
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(MulAmt1, DL, VT));
if (isPowerOf2_64(MulAmt2))
NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
DAG.getConstant(MulAmt2, DL, VT));
// Negate the result.
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
NewMul);
} else if (!Subtarget.slowLEA())
NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
if (!NewMul) {
assert(C->getZExtValue() != 0 &&
C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
"Both cases that could cause potential overflows should have "
"already been handled.");
if (isPowerOf2_64(AbsMulAmt - 1)) {
// (mul x, 2^N + 1) => (add (shl x, N), x)
NewMul = DAG.getNode(
ISD::ADD, DL, VT, N->getOperand(0),
DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
MVT::i8)));
// To negate, subtract the number from zero
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), NewMul);
} else if (isPowerOf2_64(AbsMulAmt + 1)) {
// (mul x, 2^N - 1) => (sub (shl x, N), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt + 1),
DL, MVT::i8));
// To negate, reverse the operands of the subtract.
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
else
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
} else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
// (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt - 2),
DL, MVT::i8));
NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
} else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
// (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt + 2),
DL, MVT::i8));
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
}
}
return NewMul;
}
static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N0.getValueType();
// fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
// since the result of setcc_c is all zero's or all ones.
if (VT.isInteger() && !VT.isVector() &&
N1C && N0.getOpcode() == ISD::AND &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask <<= N1C->getAPIntValue();
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
// we carefully interrogate the mask to make sure we are semantics
// preserving.
// The transform is not safe if the result of C1 << C2 exceeds the bitwidth
// of the underlying setcc_c operation if the setcc_c was zero extended.
// Consider the following example:
// zext(setcc_c) -> i32 0x0000FFFF
// c1 -> i32 0x0000FFFF
// c2 -> i32 0x00000001
// (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
// (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = true;
} else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = true;
} else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
N00.getOpcode() == ISD::ANY_EXTEND) &&
N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
}
if (MaskOK && Mask != 0) {
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
}
}
// Hardware support for vector shifts is sparse which makes us scalarize the
// vector operations in many cases. Also, on sandybridge ADD is faster than
// shl.
// (shl V, 1) -> add V,V
if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
assert(N0.getValueType().isVector() && "Invalid vector shift type");
// We shift all of the values by one. In many cases we do not have
// hardware support for this operation. This is better expressed as an ADD
// of two values.
if (N1SplatC->getAPIntValue() == 1)
return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
}
return SDValue();
}
static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned Size = VT.getSizeInBits();
// fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
// into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
// into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
// depending on sign of (SarConst - [56,48,32,24,16])
// sexts in X86 are MOVs. The MOVs have the same code size
// as above SHIFTs (only SHIFT on 1 has lower code size).
// However the MOVs have 2 advantages to a SHIFT:
// 1. MOVs can write to a register that differs from source
// 2. MOVs accept memory operands
if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
N0.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
EVT CVT = N1.getValueType();
if (SarConst.isNegative())
return SDValue();
for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
unsigned ShiftSize = SVT.getSizeInBits();
// skipping types without corresponding sext/zext and
// ShlConst that is not one of [56,48,32,24,16]
if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
continue;
SDLoc DL(N);
SDValue NN =
DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
SarConst = SarConst - (Size - ShiftSize);
if (SarConst == 0)
return NN;
else if (SarConst.isNegative())
return DAG.getNode(ISD::SHL, DL, VT, NN,
DAG.getConstant(-SarConst, DL, CVT));
else
return DAG.getNode(ISD::SRA, DL, VT, NN,
DAG.getConstant(SarConst, DL, CVT));
}
return SDValue();
}
static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// Only do this on the last DAG combine as it can interfere with other
// combines.
if (!DCI.isAfterLegalizeDAG())
return SDValue();
// Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
// TODO: This is a generic DAG combine that became an x86-only combine to
// avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
// and-not ('andn').
if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
return SDValue();
auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!ShiftC || !AndC)
return SDValue();
// If we can shrink the constant mask below 8-bits or 32-bits, then this
// transform should reduce code size. It may also enable secondary transforms
// from improved known-bits analysis or instruction selection.
APInt MaskVal = AndC->getAPIntValue();
// If this can be matched by a zero extend, don't optimize.
if (MaskVal.isMask()) {
unsigned TO = MaskVal.countTrailingOnes();
if (TO >= 8 && isPowerOf2_32(TO))
return SDValue();
}
APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
unsigned OldMaskSize = MaskVal.getMinSignedBits();
unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
(OldMaskSize > 32 && NewMaskSize <= 32)) {
// srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
SDLoc DL(N);
SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
}
return SDValue();
}
static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (N->getOpcode() == ISD::SHL)
if (SDValue V = combineShiftLeft(N, DAG))
return V;
if (N->getOpcode() == ISD::SRA)
if (SDValue V = combineShiftRightArithmetic(N, DAG))
return V;
if (N->getOpcode() == ISD::SRL)
if (SDValue V = combineShiftRightLogical(N, DAG, DCI))
return V;
return SDValue();
}
static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
"Unexpected shift opcode");
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
unsigned DstBitsPerElt = VT.getScalarSizeInBits();
unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
"Unexpected PACKSS/PACKUS input type");
bool IsSigned = (X86ISD::PACKSS == Opcode);
// Constant Folding.
APInt UndefElts0, UndefElts1;
SmallVector<APInt, 32> EltBits0, EltBits1;
if ((N0->isUndef() || N->isOnlyUserOf(N0.getNode())) &&
(N1->isUndef() || N->isOnlyUserOf(N1.getNode())) &&
getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumDstElts = VT.getVectorNumElements();
unsigned NumSrcElts = NumDstElts / 2;
unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
APInt Undefs(NumDstElts, 0);
SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
if (UndefElts[SrcIdx]) {
Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
continue;
}
APInt &Val = EltBits[SrcIdx];
if (IsSigned) {
// PACKSS: Truncate signed value with signed saturation.
// Source values less than dst minint are saturated to minint.
// Source values greater than dst maxint are saturated to maxint.
if (Val.isSignedIntN(DstBitsPerElt))
Val = Val.trunc(DstBitsPerElt);
else if (Val.isNegative())
Val = APInt::getSignedMinValue(DstBitsPerElt);
else
Val = APInt::getSignedMaxValue(DstBitsPerElt);
} else {
// PACKUS: Truncate signed value with unsigned saturation.
// Source values less than zero are saturated to zero.
// Source values greater than dst maxuint are saturated to maxuint.
if (Val.isIntN(DstBitsPerElt))
Val = Val.trunc(DstBitsPerElt);
else if (Val.isNegative())
Val = APInt::getNullValue(DstBitsPerElt);
else
Val = APInt::getAllOnesValue(DstBitsPerElt);
}
Bits[Lane * NumDstEltsPerLane + Elt] = Val;
}
}
return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
}
// Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
// truncate to create a larger truncate.
if (Subtarget.hasAVX512() &&
N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
N0.getOperand(0).getValueType() == MVT::v8i32) {
if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
(!IsSigned &&
DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
if (Subtarget.hasVLX())
return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
// Widen input to v16i32 so we can truncate that.
SDLoc dl(N);
SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
}
}
// Attempt to combine as shuffle.
SDValue Op(N, 0);
if (SDValue Res =
combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false,
/*AllowVarMask*/ true, DAG, Subtarget))
return Res;
return SDValue();
}
static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
X86ISD::VSRL == N->getOpcode()) &&
"Unexpected shift opcode");
EVT VT = N->getValueType(0);
// Shift zero -> zero.
if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
return DAG.getConstant(0, SDLoc(N), VT);
APInt KnownUndef, KnownZero;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
KnownZero, DCI))
return SDValue(N, 0);
return SDValue();
}
static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
X86ISD::VSRLI == Opcode) &&
"Unexpected shift opcode");
bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
"Unexpected value type");
assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
// Out of range logical bit shifts are guaranteed to be zero.
// Out of range arithmetic bit shifts splat the sign bit.
unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
if (ShiftVal >= NumBitsPerElt) {
if (LogicalShift)
return DAG.getConstant(0, SDLoc(N), VT);
else
ShiftVal = NumBitsPerElt - 1;
}
// Shift N0 by zero -> N0.
if (!ShiftVal)
return N0;
// Shift zero -> zero.
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return DAG.getConstant(0, SDLoc(N), VT);
// Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
// clamped to (NumBitsPerElt - 1).
if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
unsigned NewShiftVal = ShiftVal + ShiftVal2;
if (NewShiftVal >= NumBitsPerElt)
NewShiftVal = NumBitsPerElt - 1;
return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
DAG.getConstant(NewShiftVal, SDLoc(N), MVT::i8));
}
// We can decode 'whole byte' logical bit shifts as shuffles.
if (LogicalShift && (ShiftVal % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return Res;
}
// Constant Folding.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
if (N->isOnlyUserOf(N0.getNode()) &&
getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
assert(EltBits.size() == VT.getVectorNumElements() &&
"Unexpected shift value type");
for (APInt &Elt : EltBits) {
if (X86ISD::VSHLI == Opcode)
Elt <<= ShiftVal;
else if (X86ISD::VSRAI == Opcode)
Elt.ashrInPlace(ShiftVal);
else
Elt.lshrInPlace(ShiftVal);
}
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
APInt::getAllOnesValue(NumBitsPerElt), DCI))
return SDValue(N, 0);
return SDValue();
}
static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
assert(
((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) ||
(N->getOpcode() == X86ISD::PINSRW &&
N->getValueType(0) == MVT::v8i16)) &&
"Unexpected vector insertion");
// Attempt to combine PINSRB/PINSRW patterns to a shuffle.
SDValue Op(N, 0);
if (SDValue Res =
combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false,
/*AllowVarMask*/ true, DAG, Subtarget))
return Res;
return SDValue();
}
/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
/// OR -> CMPNEQSS.
static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned opcode;
// SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
// we're requiring SSE2 for both.
if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
SDValue CMP1 = N1->getOperand(1);
SDLoc DL(N);
// The SETCCs should both refer to the same CMP.
if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
return SDValue();
SDValue CMP00 = CMP0->getOperand(0);
SDValue CMP01 = CMP0->getOperand(1);
EVT VT = CMP00.getValueType();
if (VT == MVT::f32 || VT == MVT::f64) {
bool ExpectingFlags = false;
// Check for any users that want flags:
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
!ExpectingFlags && UI != UE; ++UI)
switch (UI->getOpcode()) {
default:
case ISD::BR_CC:
case ISD::BRCOND:
case ISD::SELECT:
ExpectingFlags = true;
break;
case ISD::CopyToReg:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
break;
}
if (!ExpectingFlags) {
enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
X86::CondCode tmp = cc0;
cc0 = cc1;
cc1 = tmp;
}
if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
(cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
// FIXME: need symbolic constants for these magic numbers.
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
if (Subtarget.hasAVX512()) {
SDValue FSetCC =
DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
DAG.getConstant(x86cc, DL, MVT::i8));
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
DAG.getConstant(0, DL, MVT::v16i1),
FSetCC, DAG.getIntPtrConstant(0, DL));
return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
N->getSimpleValueType(0));
}
SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
CMP00.getValueType(), CMP00, CMP01,
DAG.getConstant(x86cc, DL,
MVT::i8));
bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
if (is64BitFP && !Subtarget.is64Bit()) {
// On a 32-bit target, we cannot bitcast the 64-bit float to a
// 64-bit integer, since that's not a legal type. Since
// OnesOrZeroesF is all ones of all zeroes, we don't need all the
// bits, but can do this little dance to extract the lowest 32 bits
// and work with those going forward.
SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
OnesOrZeroesF);
SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
Vector32, DAG.getIntPtrConstant(0, DL));
IntVT = MVT::i32;
}
SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
DAG.getConstant(1, DL, IntVT));
SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
ANDed);
return OneBitOfTruth;
}
}
}
}
return SDValue();
}
/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::AND);
MVT VT = N->getSimpleValueType(0);
if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
return SDValue();
SDValue X, Y;
SDValue N0 = peekThroughBitcasts(N->getOperand(0));
SDValue N1 = peekThroughBitcasts(N->getOperand(1));
if (N0.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) {
X = N0.getOperand(0);
Y = N1;
} else if (N1.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) {
X = N1.getOperand(0);
Y = N0;
} else
return SDValue();
X = DAG.getBitcast(VT, X);
Y = DAG.getBitcast(VT, Y);
return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
}
// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
// register. In most cases we actually compare or select YMM-sized registers
// and mixing the two types creates horrible code. This method optimizes
// some of the transition sequences.
// Even with AVX-512 this is still useful for removing casts around logical
// operations on vXi1 mask types.
static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
assert(VT.isVector() && "Expected vector type");
assert((N->getOpcode() == ISD::ANY_EXTEND ||
N->getOpcode() == ISD::ZERO_EXTEND ||
N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
SDValue Narrow = N->getOperand(0);
EVT NarrowVT = Narrow.getValueType();
if (Narrow->getOpcode() != ISD::XOR &&
Narrow->getOpcode() != ISD::AND &&
Narrow->getOpcode() != ISD::OR)
return SDValue();
SDValue N0 = Narrow->getOperand(0);
SDValue N1 = Narrow->getOperand(1);
SDLoc DL(Narrow);
// The Left side has to be a trunc.
if (N0.getOpcode() != ISD::TRUNCATE)
return SDValue();
// The type of the truncated inputs.
if (N0->getOperand(0).getValueType() != VT)
return SDValue();
// The right side has to be a 'trunc' or a constant vector.
bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getValueType() == VT;
if (!RHSTrunc &&
!ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
return SDValue();
// Set N0 and N1 to hold the inputs to the new wide operation.
N0 = N0->getOperand(0);
if (RHSTrunc)
N1 = N1->getOperand(0);
else
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
// Generate the wide operation.
SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
unsigned Opcode = N->getOpcode();
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::ANY_EXTEND:
return Op;
case ISD::ZERO_EXTEND:
return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
case ISD::SIGN_EXTEND:
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
Op, DAG.getValueType(NarrowVT));
}
}
/// If both input operands of a logic op are being cast from floating point
/// types, try to convert this into a floating point logic node to avoid
/// unnecessary moves from SSE to integer registers.
static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
unsigned FPOpcode = ISD::DELETED_NODE;
if (N->getOpcode() == ISD::AND)
FPOpcode = X86ISD::FAND;
else if (N->getOpcode() == ISD::OR)
FPOpcode = X86ISD::FOR;
else if (N->getOpcode() == ISD::XOR)
FPOpcode = X86ISD::FXOR;
assert(FPOpcode != ISD::DELETED_NODE &&
"Unexpected input node for FP logic conversion");
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
((Subtarget.hasSSE1() && VT == MVT::i32) ||
(Subtarget.hasSSE2() && VT == MVT::i64))) {
SDValue N00 = N0.getOperand(0);
SDValue N10 = N1.getOperand(0);
EVT N00Type = N00.getValueType();
EVT N10Type = N10.getValueType();
if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) {
SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
return DAG.getBitcast(VT, FPLogic);
}
}
return SDValue();
}
/// If this is a zero/all-bits result that is bitwise-anded with a low bits
/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
/// with a shift-right to eliminate loading the vector constant mask value.
static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
EVT VT0 = Op0.getValueType();
EVT VT1 = Op1.getValueType();
if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
return SDValue();
APInt SplatVal;
if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
!SplatVal.isMask())
return SDValue();
// Don't prevent creation of ANDN.
if (isBitwiseNot(Op0))
return SDValue();
if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
return SDValue();
unsigned EltBitWidth = VT0.getScalarSizeInBits();
if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
return SDValue();
SDLoc DL(N);
unsigned ShiftVal = SplatVal.countTrailingOnes();
SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
return DAG.getBitcast(N->getValueType(0), Shift);
}
// Get the index node from the lowered DAG of a GEP IR instruction with one
// indexing dimension.
static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
if (Ld->isIndexed())
return SDValue();
SDValue Base = Ld->getBasePtr();
if (Base.getOpcode() != ISD::ADD)
return SDValue();
SDValue ShiftedIndex = Base.getOperand(0);
if (ShiftedIndex.getOpcode() != ISD::SHL)
return SDValue();
return ShiftedIndex.getOperand(0);
}
static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
switch (VT.getSizeInBits()) {
default: return false;
case 64: return Subtarget.is64Bit() ? true : false;
case 32: return true;
}
}
return false;
}
// This function recognizes cases where X86 bzhi instruction can replace and
// 'and-load' sequence.
// In case of loading integer value from an array of constants which is defined
// as follows:
//
// int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
//
// then applying a bitwise and on the result with another input.
// It's equivalent to performing bzhi (zero high bits) on the input, with the
// same index of the load.
static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Node->getSimpleValueType(0);
SDLoc dl(Node);
// Check if subtarget has BZHI instruction for the node's type
if (!hasBZHI(Subtarget, VT))
return SDValue();
// Try matching the pattern for both operands.
for (unsigned i = 0; i < 2; i++) {
SDValue N = Node->getOperand(i);
LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
// continue if the operand is not a load instruction
if (!Ld)
return SDValue();
const Value *MemOp = Ld->getMemOperand()->getValue();
if (!MemOp)
return SDValue();
if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
Constant *Init = GV->getInitializer();
Type *Ty = Init->getType();
if (!isa<ConstantDataArray>(Init) ||
!Ty->getArrayElementType()->isIntegerTy() ||
Ty->getArrayElementType()->getScalarSizeInBits() !=
VT.getSizeInBits() ||
Ty->getArrayNumElements() >
Ty->getArrayElementType()->getScalarSizeInBits())
continue;
// Check if the array's constant elements are suitable to our case.
uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
bool ConstantsMatch = true;
for (uint64_t j = 0; j < ArrayElementCount; j++) {
ConstantInt *Elem =
dyn_cast<ConstantInt>(Init->getAggregateElement(j));
if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
ConstantsMatch = false;
break;
}
}
if (!ConstantsMatch)
continue;
// Do the transformation (For 32-bit type):
// -> (and (load arr[idx]), inp)
// <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
// that will be replaced with one bzhi instruction.
SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
// Get the Node which indexes into the array.
SDValue Index = getIndexFromUnindexedLoad(Ld);
if (!Index)
return SDValue();
Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
}
}
}
}
return SDValue();
}
// Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
// Turn it into series of XORs and a setnp.
static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
// We only support 64-bit and 32-bit. 64-bit requires special handling
// unless the 64-bit popcnt instruction is legal.
if (VT != MVT::i32 && VT != MVT::i64)
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// LHS needs to be a single use CTPOP.
if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
return SDValue();
// RHS needs to be 1.
if (!isOneConstant(N1))
return SDValue();
SDLoc DL(N);
SDValue X = N0.getOperand(0);
// If this is 64-bit, its always best to xor the two 32-bit pieces together
// even if we have popcnt.
if (VT == MVT::i64) {
SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
DAG.getNode(ISD::SRL, DL, VT, X,
DAG.getConstant(32, DL, MVT::i8)));
SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
// Generate a 32-bit parity idiom. This will bring us back here if we need
// to expand it too.
SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
DAG.getConstant(1, DL, MVT::i32));
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
}
assert(VT == MVT::i32 && "Unexpected VT!");
// Xor the high and low 16-bits together using a 32-bit operation.
SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
DAG.getConstant(16, DL, MVT::i8));
X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
// Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
// This should allow an h-reg to be used to save a shift.
// FIXME: We only get an h-reg in 32-bit mode.
SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
DAG.getNode(ISD::SRL, DL, VT, X,
DAG.getConstant(8, DL, MVT::i8)));
SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
// Copy the inverse of the parity flag into a register with setcc.
SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
// Zero extend to original type.
return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
}
static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
// If this is SSE1 only convert to FAND to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
return DAG.getBitcast(
MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
}
// Use a 32-bit and+zext if upper bits known zero.
if (VT == MVT::i64 && Subtarget.is64Bit() &&
!isa<ConstantSDNode>(N->getOperand(1))) {
APInt HiMask = APInt::getHighBitsSet(64, 32);
if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
SDLoc dl(N);
SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
}
}
// This must be done before legalization has expanded the ctpop.
if (SDValue V = combineParity(N, DAG, Subtarget))
return V;
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
return R;
if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
return ShiftRight;
if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
return R;
// Attempt to recursively combine a bitmask AND with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return Res;
}
// Attempt to combine a scalar bitmask AND with an extracted shuffle.
if ((VT.getScalarSizeInBits() % 8) == 0 &&
N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
SDValue BitMask = N->getOperand(1);
SDValue SrcVec = N->getOperand(0).getOperand(0);
EVT SrcVecVT = SrcVec.getValueType();
// Check that the constant bitmask masks whole bytes.
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (VT == SrcVecVT.getScalarType() &&
N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
llvm::all_of(EltBits, [](APInt M) {
return M.isNullValue() || M.isAllOnesValue();
})) {
unsigned NumElts = SrcVecVT.getVectorNumElements();
unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
// Create a root shuffle mask from the byte mask and the extracted index.
SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
for (unsigned i = 0; i != Scale; ++i) {
if (UndefElts[i])
continue;
int VecIdx = Scale * Idx + i;
ShuffleMask[VecIdx] =
EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
}
if (SDValue Shuffle = combineX86ShufflesRecursively(
{SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 2,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
N->getOperand(0).getOperand(1));
}
}
return SDValue();
}
// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
if (N->getOpcode() != ISD::OR)
return false;
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Canonicalize AND to LHS.
if (N1.getOpcode() == ISD::AND)
std::swap(N0, N1);
// Attempt to match OR(AND(M,Y),ANDNP(M,X)).
if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
return false;
Mask = N1.getOperand(0);
X = N1.getOperand(1);
// Check to see if the mask appeared in both the AND and ANDNP.
if (N0.getOperand(0) == Mask)
Y = N0.getOperand(1);
else if (N0.getOperand(1) == Mask)
Y = N0.getOperand(0);
else
return false;
// TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
// ANDNP combine allows other combines to happen that prevent matching.
return true;
}
// Try to fold:
// (or (and (m, y), (pandn m, x)))
// into:
// (vselect m, x, y)
// As a special case, try to fold:
// (or (and (m, (sub 0, x)), (pandn m, x)))
// into:
// (sub (xor X, M), M)
static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
EVT VT = N->getValueType(0);
if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
(VT.is256BitVector() && Subtarget.hasInt256())))
return SDValue();
SDValue X, Y, Mask;
if (!matchLogicBlend(N, X, Y, Mask))
return SDValue();
// Validate that X, Y, and Mask are bitcasts, and see through them.
Mask = peekThroughBitcasts(Mask);
X = peekThroughBitcasts(X);
Y = peekThroughBitcasts(Y);
EVT MaskVT = Mask.getValueType();
unsigned EltBits = MaskVT.getScalarSizeInBits();
// TODO: Attempt to handle floating point cases as well?
if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
return SDValue();
SDLoc DL(N);
// Try to match:
// (or (and (M, (sub 0, X)), (pandn M, X)))
// which is a special case of vselect:
// (vselect M, (sub 0, X), X)
// Per:
// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
// We know that, if fNegate is 0 or 1:
// (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
//
// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
// ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
// ( M ? -X : X) == ((X ^ M ) + (M & 1))
// This lets us transform our vselect to:
// (add (xor X, M), (and M, 1))
// And further to:
// (sub (xor X, M), M)
if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT &&
DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT)) {
auto IsNegV = [](SDNode *N, SDValue V) {
return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
};
SDValue V;
if (IsNegV(Y.getNode(), X))
V = X;
else if (IsNegV(X.getNode(), Y))
V = Y;
if (V) {
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask;
// If the negate was on the false side of the select, then
// the operands of the SUB need to be swapped. PR 27251.
// This is because the pattern being matched above is
// (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
// but if the pattern matched was
// (vselect M, X, (sub (0, X))), that is really negation of the pattern
// above, -(vselect M, (sub 0, X), X), and therefore the replacement
// pattern also needs to be a negation of the replacement pattern above.
// And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
// sub accomplishes the negation of the replacement pattern.
if (V == Y)
std::swap(SubOp1, SubOp2);
SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
return DAG.getBitcast(VT, Res);
}
}
// PBLENDVB is only available on SSE 4.1.
if (!Subtarget.hasSSE41())
return SDValue();
MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
X = DAG.getBitcast(BlendVT, X);
Y = DAG.getBitcast(BlendVT, Y);
Mask = DAG.getBitcast(BlendVT, Mask);
Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
return DAG.getBitcast(VT, Mask);
}
// Helper function for combineOrCmpEqZeroToCtlzSrl
// Transforms:
// seteq(cmp x, 0)
// into:
// srl(ctlz x), log2(bitsize(x))
// Input pattern is checked by caller.
static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
SelectionDAG &DAG) {
SDValue Cmp = Op.getOperand(1);
EVT VT = Cmp.getOperand(0).getValueType();
unsigned Log2b = Log2_32(VT.getSizeInBits());
SDLoc dl(Op);
SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
// The result of the shift is true or false, and on X86, the 32-bit
// encoding of shr and lzcnt is more desirable.
SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
DAG.getConstant(Log2b, dl, MVT::i8));
return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
}
// Try to transform:
// zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
// into:
// srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
// Will also attempt to match more generic cases, eg:
// zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
// Only applies if the target supports the FastLZCNT feature.
static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
return SDValue();
auto isORCandidate = [](SDValue N) {
return (N->getOpcode() == ISD::OR && N->hasOneUse());
};
// Check the zero extend is extending to 32-bit or more. The code generated by
// srl(ctlz) for 16-bit or less variants of the pattern would require extra
// instructions to clear the upper bits.
if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
!isORCandidate(N->getOperand(0)))
return SDValue();
// Check the node matches: setcc(eq, cmp 0)
auto isSetCCCandidate = [](SDValue N) {
return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
N->getOperand(1).getOpcode() == X86ISD::CMP &&
isNullConstant(N->getOperand(1).getOperand(1)) &&
N->getOperand(1).getValueType().bitsGE(MVT::i32);
};
SDNode *OR = N->getOperand(0).getNode();
SDValue LHS = OR->getOperand(0);
SDValue RHS = OR->getOperand(1);
// Save nodes matching or(or, setcc(eq, cmp 0)).
SmallVector<SDNode *, 2> ORNodes;
while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
(isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
ORNodes.push_back(OR);
OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
LHS = OR->getOperand(0);
RHS = OR->getOperand(1);
}
// The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
!isORCandidate(SDValue(OR, 0)))
return SDValue();
// We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
// to
// or(srl(ctlz),srl(ctlz)).
// The dag combiner can then fold it into:
// srl(or(ctlz, ctlz)).
EVT VT = OR->getValueType(0);
SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
SDValue Ret, NewRHS;
if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
if (!Ret)
return SDValue();
// Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
while (ORNodes.size() > 0) {
OR = ORNodes.pop_back_val();
LHS = OR->getOperand(0);
RHS = OR->getOperand(1);
// Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
if (RHS->getOpcode() == ISD::OR)
std::swap(LHS, RHS);
EVT VT = OR->getValueType(0);
SDValue NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
if (!NewRHS)
return SDValue();
Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
}
if (Ret)
Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
return Ret;
}
static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// If this is SSE1 only convert to FOR to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
return DAG.getBitcast(MVT::v4i32,
DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N0),
DAG.getBitcast(MVT::v4f32, N1)));
}
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
return R;
// Attempt to recursively combine an OR of shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return Res;
}
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
unsigned Bits = VT.getScalarSizeInBits();
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
// series of shifts/or that would otherwise be generated.
// Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
// have higher latencies and we are not optimizing for size.
if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
return SDValue();
if (!N0.hasOneUse() || !N1.hasOneUse())
return SDValue();
SDValue ShAmt0 = N0.getOperand(1);
if (ShAmt0.getValueType() != MVT::i8)
return SDValue();
SDValue ShAmt1 = N1.getOperand(1);
if (ShAmt1.getValueType() != MVT::i8)
return SDValue();
// Peek through any modulo shift masks.
SDValue ShMsk0;
if (ShAmt0.getOpcode() == ISD::AND &&
isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
ShAmt0.getConstantOperandVal(1) == (Bits - 1)) {
ShMsk0 = ShAmt0;
ShAmt0 = ShAmt0.getOperand(0);
}
SDValue ShMsk1;
if (ShAmt1.getOpcode() == ISD::AND &&
isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
ShAmt1.getConstantOperandVal(1) == (Bits - 1)) {
ShMsk1 = ShAmt1;
ShAmt1 = ShAmt1.getOperand(0);
}
if (ShAmt0.getOpcode() == ISD::TRUNCATE)
ShAmt0 = ShAmt0.getOperand(0);
if (ShAmt1.getOpcode() == ISD::TRUNCATE)
ShAmt1 = ShAmt1.getOperand(0);
SDLoc DL(N);
unsigned Opc = X86ISD::SHLD;
SDValue Op0 = N0.getOperand(0);
SDValue Op1 = N1.getOperand(0);
if (ShAmt0.getOpcode() == ISD::SUB ||
ShAmt0.getOpcode() == ISD::XOR) {
Opc = X86ISD::SHRD;
std::swap(Op0, Op1);
std::swap(ShAmt0, ShAmt1);
std::swap(ShMsk0, ShMsk1);
}
// OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> SHLD( X, Y, C )
// OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> SHRD( X, Y, C )
// OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> SHLD( X, Y, C )
// OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> SHRD( X, Y, C )
// OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> SHLD( X, Y, C )
// OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> SHRD( X, Y, C )
if (ShAmt1.getOpcode() == ISD::SUB) {
SDValue Sum = ShAmt1.getOperand(0);
if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
ShAmt1Op1 = ShAmt1Op1.getOperand(0);
if ((SumC->getAPIntValue() == Bits ||
(SumC->getAPIntValue() == 0 && ShMsk1)) &&
ShAmt1Op1 == ShAmt0)
return DAG.getNode(Opc, DL, VT, Op0, Op1,
DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
}
} else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
return DAG.getNode(Opc, DL, VT,
N0.getOperand(0), N1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL,
MVT::i8, ShAmt0));
} else if (ShAmt1.getOpcode() == ISD::XOR) {
SDValue Mask = ShAmt1.getOperand(1);
if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
unsigned InnerShift = (X86ISD::SHLD == Opc ? ISD::SRL : ISD::SHL);
SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
ShAmt1Op0 = ShAmt1Op0.getOperand(0);
if (MaskC->getSExtValue() == (Bits - 1) &&
(ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
if (Op1.getOpcode() == InnerShift &&
isa<ConstantSDNode>(Op1.getOperand(1)) &&
Op1.getConstantOperandVal(1) == 1) {
return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
}
// Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
Op1.getOperand(0) == Op1.getOperand(1)) {
return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
}
}
}
}
return SDValue();
}
/// Try to turn tests against the signbit in the form of:
/// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
/// into:
/// SETGT(X, -1)
static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
// This is only worth doing if the output type is i8 or i1.
EVT ResultType = N->getValueType(0);
if (ResultType != MVT::i8 && ResultType != MVT::i1)
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// We should be performing an xor against a truncated shift.
if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
return SDValue();
// Make sure we are performing an xor against one.
if (!isOneConstant(N1))
return SDValue();
// SetCC on x86 zero extends so only act on this if it's a logical shift.
SDValue Shift = N0.getOperand(0);
if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
return SDValue();
// Make sure we are truncating from one of i16, i32 or i64.
EVT ShiftTy = Shift.getValueType();
if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
return SDValue();
// Make sure the shift amount extracts the sign bit.
if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1)
return SDValue();
// Create a greater-than comparison against -1.
// N.B. Using SETGE against 0 works but we want a canonical looking
// comparison, using SETGT matches up with what TranslateX86CC.
SDLoc DL(N);
SDValue ShiftOp = Shift.getOperand(0);
EVT ShiftOpTy = ShiftOp.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), ResultType);
SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
if (SetCCResultType != ResultType)
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
return Cond;
}
/// Turn vector tests of the signbit in the form of:
/// xor (sra X, elt_size(X)-1), -1
/// into:
/// pcmpgt X, -1
///
/// This should be called before type legalization because the pattern may not
/// persist after that.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.isSimple())
return SDValue();
switch (VT.getSimpleVT().SimpleTy) {
default: return SDValue();
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
}
// There must be a shift right algebraic before the xor, and the xor must be a
// 'not' operation.
SDValue Shift = N->getOperand(0);
SDValue Ones = N->getOperand(1);
if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
!ISD::isBuildVectorAllOnes(Ones.getNode()))
return SDValue();
// The shift should be smearing the sign bit across each vector element.
auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
if (!ShiftBV)
return SDValue();
EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
auto *ShiftAmt = ShiftBV->getConstantSplatNode();
if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
return SDValue();
// Create a greater-than comparison against -1. We don't use the more obvious
// greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
}
/// Check if truncation with saturation form type \p SrcVT to \p DstVT
/// is valid for the given \p Subtarget.
static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasAVX512())
return false;
// FIXME: Scalar type may be supported if we move it to vector register.
if (!SrcVT.isVector())
return false;
EVT SrcElVT = SrcVT.getScalarType();
EVT DstElVT = DstVT.getScalarType();
if (DstElVT != MVT::i8 && DstElVT != MVT::i16 && DstElVT != MVT::i32)
return false;
if (SrcVT.is512BitVector() || Subtarget.hasVLX())
return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI();
return false;
}
/// Detect patterns of truncation with unsigned saturation:
///
/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
/// Return the source value x to be truncated or SDValue() if the pattern was
/// not matched.
///
/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
/// where C1 >= 0 and C2 is unsigned max of destination type.
///
/// (truncate (smax (smin (x, C2), C1)) to dest_type)
/// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
///
/// These two patterns are equivalent to:
/// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
/// So return the smax(x, C1) value to be truncated or SDValue() if the
/// pattern was not matched.
static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const SDLoc &DL) {
EVT InVT = In.getValueType();
// Saturation with truncation. We truncate from InVT to VT.
assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
"Unexpected types for truncate operation");
// Match min/max and return limit value as a parameter.
auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
if (V.getOpcode() == Opcode &&
ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
return V.getOperand(0);
return SDValue();
};
APInt C1, C2;
if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
// C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
// the element size of the destination type.
if (C2.isMask(VT.getScalarSizeInBits()))
return UMin;
if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
if (MatchMinMax(SMin, ISD::SMAX, C1))
if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
return SMin;
if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
C2.uge(C1)) {
return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
}
return SDValue();
}
/// Detect patterns of truncation with signed saturation:
/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
/// signed_max_of_dest_type)) to dest_type)
/// or:
/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
/// signed_min_of_dest_type)) to dest_type).
/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
unsigned NumDstBits = VT.getScalarSizeInBits();
unsigned NumSrcBits = In.getScalarValueSizeInBits();
assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
auto MatchMinMax = [](SDValue V, unsigned Opcode,
const APInt &Limit) -> SDValue {
APInt C;
if (V.getOpcode() == Opcode &&
ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
return V.getOperand(0);
return SDValue();
};
APInt SignedMax, SignedMin;
if (MatchPackUS) {
SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
SignedMin = APInt(NumSrcBits, 0);
} else {
SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
}
if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
return SMax;
if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
return SMin;
return SDValue();
}
/// Detect a pattern of truncation with signed saturation.
/// The types should allow to use VPMOVSS* instruction on AVX512.
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectAVX512SSatPattern(SDValue In, EVT VT,
const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (!TLI.isTypeLegal(In.getValueType()))
return SDValue();
if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
return SDValue();
return detectSSatPattern(In, VT);
}
/// Detect a pattern of truncation with saturation:
/// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
/// The types should allow to use VPMOVUS* instruction on AVX512.
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectAVX512USatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const SDLoc &DL,
const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (!TLI.isTypeLegal(In.getValueType()))
return SDValue();
if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
return SDValue();
return detectUSatPattern(In, VT, DAG, DL);
}
static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT SVT = VT.getScalarType();
EVT InVT = In.getValueType();
EVT InSVT = InVT.getScalarType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) &&
isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) {
if (auto SSatVal = detectSSatPattern(In, VT))
return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
}
if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) &&
!Subtarget.hasAVX512() &&
(SVT == MVT::i8 || SVT == MVT::i16) &&
(InSVT == MVT::i16 || InSVT == MVT::i32)) {
if (auto USatVal = detectSSatPattern(In, VT, true)) {
// vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
if (SVT == MVT::i8 && InSVT == MVT::i32) {
EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
VT.getVectorNumElements());
SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
DAG, Subtarget);
if (Mid)
return truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
Subtarget);
} else if (SVT == MVT::i8 || Subtarget.hasSSE41())
return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
Subtarget);
}
if (auto SSatVal = detectSSatPattern(In, VT))
return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
Subtarget);
}
return SDValue();
}
/// This function detects the AVG pattern between vectors of unsigned i8/i16,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
/// X86ISD::AVG instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (!VT.isVector())
return SDValue();
EVT InVT = In.getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT ScalarVT = VT.getVectorElementType();
if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
NumElems >= 2 && isPowerOf2_32(NumElems)))
return SDValue();
// InScalarVT is the intermediate type in AVG pattern and it should be greater
// than the original input type (i8/i16).
EVT InScalarVT = InVT.getVectorElementType();
if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
// Detect the following pattern:
//
// %1 = zext <N x i8> %a to <N x i32>
// %2 = zext <N x i8> %b to <N x i32>
// %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
// %4 = add nuw nsw <N x i32> %3, %2
// %5 = lshr <N x i32> %N, <i32 1 x N>
// %6 = trunc <N x i32> %5 to <N x i8>
//
// In AVX512, the last instruction can also be a trunc store.
if (In.getOpcode() != ISD::SRL)
return SDValue();
// A lambda checking the given SDValue is a constant vector and each element
// is in the range [Min, Max].
auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV || !BV->isConstant())
return false;
for (SDValue Op : V->ops()) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
if (!C)
return false;
const APInt &Val = C->getAPIntValue();
if (Val.ult(Min) || Val.ugt(Max))
return false;
}
return true;
};
// Check if each element of the vector is left-shifted by one.
auto LHS = In.getOperand(0);
auto RHS = In.getOperand(1);
if (!IsConstVectorInRange(RHS, 1, 1))
return SDValue();
if (LHS.getOpcode() != ISD::ADD)
return SDValue();
// Detect a pattern of a + b + 1 where the order doesn't matter.
SDValue Operands[3];
Operands[0] = LHS.getOperand(0);
Operands[1] = LHS.getOperand(1);
auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
};
// Take care of the case when one of the operands is a constant vector whose
// element is in the range [1, 256].
if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
Operands[0].getOperand(0).getValueType() == VT) {
// The pattern is detected. Subtract one from the constant vector, then
// demote it and emit X86ISD::AVG instruction.
SDValue VecOnes = DAG.getConstant(1, DL, InVT);
Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Operands[0].getOperand(0), Operands[1] },
AVGBuilder);
}
if (Operands[0].getOpcode() == ISD::ADD)
std::swap(Operands[0], Operands[1]);
else if (Operands[1].getOpcode() != ISD::ADD)
return SDValue();
Operands[2] = Operands[1].getOperand(0);
Operands[1] = Operands[1].getOperand(1);
// Now we have three operands of two additions. Check that one of them is a
// constant vector with ones, and the other two are promoted from i8/i16.
for (int i = 0; i < 3; ++i) {
if (!IsConstVectorInRange(Operands[i], 1, 1))
continue;
std::swap(Operands[i], Operands[2]);
// Check if Operands[0] and Operands[1] are results of type promotion.
for (int j = 0; j < 2; ++j)
if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
Operands[j].getOperand(0).getValueType() != VT)
return SDValue();
// The pattern is detected, emit X86ISD::AVG instruction(s).
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Operands[0].getOperand(0),
Operands[1].getOperand(0) }, AVGBuilder);
}
return SDValue();
}
static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
SDLoc dl(Ld);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// For chips with slow 32-byte unaligned loads, break the 32-byte operation
// into two 16-byte operations. Also split non-temporal aligned loads on
// pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
ISD::LoadExtType Ext = Ld->getExtensionType();
bool Fast;
unsigned AddressSpace = Ld->getAddressSpace();
unsigned Alignment = Ld->getAlignment();
if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
Ext == ISD::NON_EXTLOAD &&
((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
(TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
AddressSpace, Alignment, &Fast) && !Fast))) {
unsigned NumElems = RegVT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
SDValue Ptr = Ld->getBasePtr();
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems/2);
SDValue Load1 =
DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
Alignment, Ld->getMemOperand()->getFlags());
Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
SDValue Load2 =
DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
Ld->getPointerInfo().getWithOffset(16),
MinAlign(Alignment, 16U), Ld->getMemOperand()->getFlags());
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Load1.getValue(1),
Load2.getValue(1));
SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
return DCI.CombineTo(N, NewVec, TF, true);
}
return SDValue();
}
/// If V is a build vector of boolean constants and exactly one of those
/// constants is true, return the operand index of that true element.
/// Otherwise, return -1.
static int getOneTrueElt(SDValue V) {
// This needs to be a build vector of booleans.
// TODO: Checking for the i1 type matches the IR definition for the mask,
// but the mask check could be loosened to i8 or other types. That might
// also require checking more than 'allOnesValue'; eg, the x86 HW
// instructions only require that the MSB is set for each mask element.
// The ISD::MSTORE comments/definition do not specify how the mask operand
// is formatted.
auto *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
return -1;
int TrueIndex = -1;
unsigned NumElts = BV->getValueType(0).getVectorNumElements();
for (unsigned i = 0; i < NumElts; ++i) {
const SDValue &Op = BV->getOperand(i);
if (Op.isUndef())
continue;
auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
if (!ConstNode)
return -1;
if (ConstNode->getAPIntValue().isAllOnesValue()) {
// If we already found a one, this is too many.
if (TrueIndex >= 0)
return -1;
TrueIndex = i;
}
}
return TrueIndex;
}
/// Given a masked memory load/store operation, return true if it has one mask
/// bit set. If it has one mask bit set, then also return the memory address of
/// the scalar element to load/store, the vector index to insert/extract that
/// scalar element, and the alignment for the scalar memory access.
static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
SelectionDAG &DAG, SDValue &Addr,
SDValue &Index, unsigned &Alignment) {
int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
if (TrueMaskElt < 0)
return false;
// Get the address of the one scalar element that is specified by the mask
// using the appropriate offset from the base pointer.
EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
Addr = MaskedOp->getBasePtr();
if (TrueMaskElt != 0) {
unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
}
Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
return true;
}
/// If exactly one element of the mask is set for a non-extending masked load,
/// it is a scalar load and vector insert.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
// TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
// However, some target hooks may need to be added to know when the transform
// is profitable. Endianness would also have to be considered.
SDValue Addr, VecIndex;
unsigned Alignment;
if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
return SDValue();
// Load the one scalar element that is specified by the mask using the
// appropriate offset from the base pointer.
SDLoc DL(ML);
EVT VT = ML->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDValue Load =
DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
Alignment, ML->getMemOperand()->getFlags());
// Insert the loaded element into the appropriate place in the vector.
SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
ML->getPassThru(), Load, VecIndex);
return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
}
static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
return SDValue();
SDLoc DL(ML);
EVT VT = ML->getValueType(0);
// If we are loading the first and last elements of a vector, it is safe and
// always faster to load the whole vector. Replace the masked load with a
// vector load and select.
unsigned NumElts = VT.getVectorNumElements();
BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
if (LoadFirstElt && LoadLastElt) {
SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
ML->getMemOperand());
SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
ML->getPassThru());
return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
}
// Convert a masked load with a constant mask into a masked load and a select.
// This allows the select operation to use a faster kind of select instruction
// (for example, vblendvps -> vblendps).
// Don't try this if the pass-through operand is already undefined. That would
// cause an infinite loop because that's what we're about to create.
if (ML->getPassThru().isUndef())
return SDValue();
// The new masked load has an undef pass-through operand. The select uses the
// original pass-through operand.
SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
ML->getMask(), DAG.getUNDEF(VT),
ML->getMemoryVT(), ML->getMemOperand(),
ML->getExtensionType());
SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
ML->getPassThru());
return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
}
static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
// TODO: Expanding load with constant mask may be optimized as well.
if (Mld->isExpandingLoad())
return SDValue();
if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
return ScalarLoad;
// TODO: Do some AVX512 subsets benefit from this transform?
if (!Subtarget.hasAVX512())
if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
return Blend;
}
if (Mld->getExtensionType() != ISD::SEXTLOAD)
return SDValue();
// Resolve extending loads.
EVT VT = Mld->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
EVT LdVT = Mld->getMemoryVT();
SDLoc dl(Mld);
assert(LdVT != VT && "Cannot extend to the same type");
unsigned ToSz = VT.getScalarSizeInBits();
unsigned FromSz = LdVT.getScalarSizeInBits();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
unsigned SizeRatio = ToSz / FromSz;
assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
LdVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
// Convert PassThru value.
SDValue WidePassThru = DAG.getBitcast(WideVecVT, Mld->getPassThru());
if (!Mld->getPassThru().isUndef()) {
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
WidePassThru = DAG.getVectorShuffle(WideVecVT, dl, WidePassThru,
DAG.getUNDEF(WideVecVT), ShuffleVec);
}
// Prepare the new mask.
SDValue NewMask;
SDValue Mask = Mld->getMask();
if (Mask.getValueType() == VT) {
// Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
ShuffleVec[i] = NumElems * SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
ShuffleVec);
} else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
WidenNumElts);
unsigned NumConcat = WidenNumElts / MaskNumElts;
SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
Ops[0] = Mask;
NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
}
SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
Mld->getBasePtr(), NewMask, WidePassThru,
Mld->getMemoryVT(), Mld->getMemOperand(),
ISD::NON_EXTLOAD);
SDValue NewVec = getExtendInVec(/*Signed*/true, dl, VT, WideLd, DAG);
return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
}
/// If exactly one element of the mask is set for a non-truncating masked store,
/// it is a vector extract and scalar store.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
SelectionDAG &DAG) {
// TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
// However, some target hooks may need to be added to know when the transform
// is profitable. Endianness would also have to be considered.
SDValue Addr, VecIndex;
unsigned Alignment;
if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
return SDValue();
// Extract the one scalar element that is actually being stored.
SDLoc DL(MS);
EVT VT = MS->getValue().getValueType();
EVT EltVT = VT.getVectorElementType();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
MS->getValue(), VecIndex);
// Store that element at the appropriate offset from the base pointer.
return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
Alignment, MS->getMemOperand()->getFlags());
}
static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
if (Mst->isCompressingStore())
return SDValue();
EVT VT = Mst->getValue().getValueType();
if (!Mst->isTruncatingStore()) {
if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
return ScalarStore;
// If the mask value has been legalized to a non-boolean vector, try to
// simplify ops leading up to it. We only demand the MSB of each lane.
SDValue Mask = Mst->getMask();
if (Mask.getScalarValueSizeInBits() != 1) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
return SDValue(N, 0);
}
// TODO: AVX512 targets should also be able to simplify something like the
// pattern above, but that pattern will be different. It will either need to
// match setcc more generally or match PCMPGTM later (in tablegen?).
return SDValue();
}
// Resolve truncating stores.
unsigned NumElems = VT.getVectorNumElements();
EVT StVT = Mst->getMemoryVT();
SDLoc dl(Mst);
assert(StVT != VT && "Cannot truncate to the same type");
unsigned FromSz = VT.getScalarSizeInBits();
unsigned ToSz = StVT.getScalarSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
if (TLI.isTruncStoreLegal(VT, StVT))
return SDValue();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for truncating masked store");
// We are going to use the original vector elt for storing.
// Accumulated smaller vector elements must be a multiple of the store size.
assert (((NumElems * FromSz) % ToSz) == 0 &&
"Unexpected ratio for truncating masked store");
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
ShuffleVec);
SDValue NewMask;
SDValue Mask = Mst->getMask();
if (Mask.getValueType() == VT) {
// Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
ShuffleVec);
} else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
WidenNumElts);
unsigned NumConcat = WidenNumElts / MaskNumElts;
SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
Ops[0] = Mask;
NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
}
return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
Mst->getBasePtr(), NewMask, StVT,
Mst->getMemOperand(), false);
}
static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
StoreSDNode *St = cast<StoreSDNode>(N);
EVT VT = St->getValue().getValueType();
EVT StVT = St->getMemoryVT();
SDLoc dl(St);
SDValue StoredVal = St->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Convert a store of vXi1 into a store of iX and a bitcast.
if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
VT.getVectorElementType() == MVT::i1) {
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
StoredVal = DAG.getBitcast(NewVT, StoredVal);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
// This will avoid a copy to k-register.
if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
StoredVal.getOperand(0).getValueType() == MVT::i8) {
return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
St->getBasePtr(), St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
}
// Widen v2i1/v4i1 stores to v8i1.
if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
Subtarget.hasAVX512()) {
unsigned NumConcats = 8 / VT.getVectorNumElements();
SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
Ops[0] = StoredVal;
StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// Turn vXi1 stores of constants into a scalar store.
if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
// If its a v64i1 store without 64-bit support, we need two stores.
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
StoredVal->ops().slice(0, 32));
Lo = combinevXi1ConstantToInteger(Lo, DAG);
SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
StoredVal->ops().slice(32, 32));
Hi = combinevXi1ConstantToInteger(Hi, DAG);
unsigned Alignment = St->getAlignment();
SDValue Ptr0 = St->getBasePtr();
SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
SDValue Ch0 =
DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
Alignment, St->getMemOperand()->getFlags());
SDValue Ch1 =
DAG.getStore(St->getChain(), dl, Hi, Ptr1,
St->getPointerInfo().getWithOffset(4),
MinAlign(Alignment, 4U),
St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// If we are saving a concatenation of two XMM registers and 32-byte stores
// are slow, such as on Sandy Bridge, perform two 16-byte stores.
bool Fast;
unsigned AddressSpace = St->getAddressSpace();
unsigned Alignment = St->getAlignment();
if (VT.is256BitVector() && StVT == VT &&
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
AddressSpace, Alignment, &Fast) &&
!Fast) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
SDValue Ptr0 = St->getBasePtr();
SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
SDValue Ch0 =
DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
Alignment, St->getMemOperand()->getFlags());
SDValue Ch1 =
DAG.getStore(St->getChain(), dl, Value1, Ptr1,
St->getPointerInfo().getWithOffset(16),
MinAlign(Alignment, 16U), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
// Optimize trunc store (of multiple scalars) to shuffle and store.
// First, pack all of the elements in one place. Next, store to memory
// in fewer chunks.
if (St->isTruncatingStore() && VT.isVector()) {
// Check if we can detect an AVG pattern from the truncation. If yes,
// replace the trunc store by a normal store with the result of X86ISD::AVG
// instruction.
if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
Subtarget, dl))
return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (SDValue Val =
detectAVX512SSatPattern(St->getValue(), St->getMemoryVT(), Subtarget,
TLI))
return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
dl, Val, St->getBasePtr(),
St->getMemoryVT(), St->getMemOperand(), DAG);
if (SDValue Val = detectAVX512USatPattern(St->getValue(), St->getMemoryVT(),
DAG, dl, Subtarget, TLI))
return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
dl, Val, St->getBasePtr(),
St->getMemoryVT(), St->getMemOperand(), DAG);
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
unsigned FromSz = VT.getScalarSizeInBits();
unsigned ToSz = StVT.getScalarSizeInBits();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
return SDValue();
// From, To sizes and ElemCount must be pow of two
if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
// We are going to use the original vector elt for storing.
// Accumulated smaller vector elements must be a multiple of the store size.
if (0 != (NumElems * FromSz) % ToSz) return SDValue();
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
if (!TLI.isTypeLegal(WideVecVT))
return SDValue();
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
ShuffleVec);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
// Find the largest store unit
MVT StoreType = MVT::i8;
for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
StoreType = Tp;
}
// On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
(64 <= NumElems * ToSz))
StoreType = MVT::f64;
// Bitcast the original vector into a vector of store-size units
EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
SDValue Ptr = St->getBasePtr();
// Perform one or more big stores into memory.
for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
DAG.getIntPtrConstant(i, dl));
SDValue Ch =
DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
Chains.push_back(Ch);
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
// the FP state in cases where an emms may be missing.
// A preferable solution to the general problem is to figure out the right
// places to insert EMMS. This qualifies as a quick hack.
// Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
if (VT.getSizeInBits() != 64)
return SDValue();
const Function &F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
!Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
(VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
SmallVector<SDValue, 8> Ops;
if (!ISD::isNormalLoad(Ld))
return SDValue();
// If this is not the MMX case, i.e. we are just turning i64 load/store
// into f64 load/store, avoid the transformation if there are multiple
// uses of the loaded value.
if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
return SDValue();
SDLoc LdDL(Ld);
SDLoc StDL(N);
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
if (Subtarget.is64Bit() || F64IsLegal) {
MVT LdVT = (Subtarget.is64Bit() &&
(!VT.isFloatingPoint() || !F64IsLegal)) ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
Ld->getMemOperand());
// Make sure new load is placed in same chain order.
DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
St->getMemOperand());
}
// Otherwise, lower to two pairs of 32-bit loads / stores.
SDValue LoAddr = Ld->getBasePtr();
SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
Ld->getPointerInfo().getWithOffset(4),
MinAlign(Ld->getAlignment(), 4),
Ld->getMemOperand()->getFlags());
// Make sure new loads are placed in same chain order.
DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
LoAddr = St->getBasePtr();
HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
SDValue LoSt =
DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
St->getPointerInfo().getWithOffset(4),
MinAlign(St->getAlignment(), 4),
St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
}
// This is similar to the above case, but here we handle a scalar 64-bit
// integer store that is extracted from a vector on a 32-bit target.
// If we have SSE2, then we can treat it like a floating-point double
// to get past legalization. The execution dependencies fixup pass will
// choose the optimal machine instruction for the store if this really is
// an integer or v2f32 rather than an f64.
if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue OldExtract = St->getOperand(1);
SDValue ExtOp0 = OldExtract.getOperand(0);
unsigned VecSize = ExtOp0.getValueSizeInBits();
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
BitCast, OldExtract.getOperand(1));
return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
return SDValue();
}
/// Return 'true' if this vector operation is "horizontal"
/// and return the operands for the horizontal operation in LHS and RHS. A
/// horizontal operation performs the binary operation on successive elements
/// of its first operand, then on successive elements of its second operand,
/// returning the resulting values in a vector. For example, if
/// A = < float a0, float a1, float a2, float a3 >
/// and
/// B = < float b0, float b1, float b2, float b3 >
/// then the result of doing a horizontal operation on A and B is
/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
/// A horizontal-op B, for some already available A and B, and if so then LHS is
/// set to A, RHS to B, and the routine returns 'true'.
static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
// If either operand is undef, bail out. The binop should be simplified.
if (LHS.isUndef() || RHS.isUndef())
return false;
// Look for the following pattern:
// A = < float a0, float a1, float a2, float a3 >
// B = < float b0, float b1, float b2, float b3 >
// and
// LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
// RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
// then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
// which is A horizontal-op B.
// At least one of the operands should be a vector shuffle.
if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
MVT VT = LHS.getSimpleValueType();
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for horizontal add/sub");
// View LHS in the form
// LHS = VECTOR_SHUFFLE A, B, LMask
// If LHS is not a shuffle, then pretend it is the identity shuffle:
// LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
// NOTE: A default initialized SDValue represents an UNDEF of type VT.
unsigned NumElts = VT.getVectorNumElements();
SDValue A, B;
SmallVector<int, 16> LMask(NumElts);
if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (!LHS.getOperand(0).isUndef())
A = LHS.getOperand(0);
if (!LHS.getOperand(1).isUndef())
B = LHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
llvm::copy(Mask, LMask.begin());
} else {
A = LHS;
for (unsigned i = 0; i != NumElts; ++i)
LMask[i] = i;
}
// Likewise, view RHS in the form
// RHS = VECTOR_SHUFFLE C, D, RMask
SDValue C, D;
SmallVector<int, 16> RMask(NumElts);
if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (!RHS.getOperand(0).isUndef())
C = RHS.getOperand(0);
if (!RHS.getOperand(1).isUndef())
D = RHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
llvm::copy(Mask, RMask.begin());
} else {
C = RHS;
for (unsigned i = 0; i != NumElts; ++i)
RMask[i] = i;
}
// If A and B occur in reverse order in RHS, then canonicalize by commuting
// RHS operands and shuffle mask.
if (A != C) {
std::swap(C, D);
ShuffleVectorSDNode::commuteMask(RMask);
}
// Check that the shuffles are both shuffling the same vectors.
if (!(A == C && B == D))
return false;
// LHS and RHS are now:
// LHS = shuffle A, B, LMask
// RHS = shuffle A, B, RMask
// Check that the masks correspond to performing a horizontal operation.
// AVX defines horizontal add/sub to operate independently on 128-bit lanes,
// so we just repeat the inner loop if this is a 256-bit op.
unsigned Num128BitChunks = VT.getSizeInBits() / 128;
unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
assert((NumEltsPer128BitChunk % 2 == 0) &&
"Vector type should have an even number of elements in each lane");
for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
// Ignore undefined components.
int LIdx = LMask[i + j], RIdx = RMask[i + j];
if (LIdx < 0 || RIdx < 0 ||
(!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
(!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
continue;
// The low half of the 128-bit result must choose from A.
// The high half of the 128-bit result must choose from B,
// unless B is undef. In that case, we are always choosing from A.
unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
// Check that successive elements are being operated on. If not, this is
// not a horizontal operation.
int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
if (!(LIdx == Index && RIdx == Index + 1) &&
!(IsCommutative && LIdx == Index + 1 && RIdx == Index))
return false;
}
}
LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
return true;
}
/// Do target-specific dag combines on floating-point adds/subs.
static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
bool IsFadd = N->getOpcode() == ISD::FADD;
auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
// Try to synthesize horizontal add/sub from adds/subs of shuffles.
if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
(Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, IsFadd) &&
shouldUseHorizontalOp(LHS == RHS, DAG, Subtarget))
return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
return SDValue();
}
/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
/// the codegen.
/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
/// anything that is guaranteed to be transformed by DAGCombiner.
static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
SDValue Src = N->getOperand(0);
unsigned Opcode = Src.getOpcode();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = N->getValueType(0);
EVT SrcVT = Src.getValueType();
auto IsFreeTruncation = [VT](SDValue Op) {
unsigned TruncSizeInBits = VT.getScalarSizeInBits();
// See if this has been extended from a smaller/equal size to
// the truncation size, allowing a truncation to combine with the extend.
unsigned Opcode = Op.getOpcode();
if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
Opcode == ISD::ZERO_EXTEND) &&
Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
return true;
// See if this is a single use constant which can be constant folded.
SDValue BC = peekThroughOneUseBitcasts(Op);
return ISD::isBuildVectorOfConstantSDNodes(BC.getNode());
};
auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
return DAG.getNode(Opcode, DL, VT, Trunc0, Trunc1);
};
// Don't combine if the operation has other uses.
if (!Src.hasOneUse())
return SDValue();
// Only support vector truncation for now.
// TODO: i64 scalar math would benefit as well.
if (!VT.isVector())
return SDValue();
// In most cases its only worth pre-truncating if we're only facing the cost
// of one truncation.
// i.e. if one of the inputs will constant fold or the input is repeated.
switch (Opcode) {
case ISD::AND:
case ISD::XOR:
case ISD::OR: {
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegalOrPromote(Opcode, VT) &&
(Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
return TruncateArithmetic(Op0, Op1);
break;
}
case ISD::MUL:
// X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
// better to truncate if we have the chance.
if (SrcVT.getScalarType() == MVT::i64 && TLI.isOperationLegal(Opcode, VT) &&
!TLI.isOperationLegal(Opcode, SrcVT))
return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
LLVM_FALLTHROUGH;
case ISD::ADD: {
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegal(Opcode, VT) &&
(Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
return TruncateArithmetic(Op0, Op1);
break;
}
case ISD::SUB: {
// TODO: ISD::SUB We are conservative and require both sides to be freely
// truncatable to avoid interfering with combineSubToSubus.
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegal(Opcode, VT) &&
(Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
return TruncateArithmetic(Op0, Op1);
break;
}
}
return SDValue();
}
/// Truncate using ISD::AND mask and X86ISD::PACKUS.
static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
EVT InSVT = InVT.getVectorElementType();
EVT OutVT = N->getValueType(0);
EVT OutSVT = OutVT.getVectorElementType();
// Split a long vector into vectors of legal type and mask to unset all bits
// that won't appear in the result to prevent saturation.
// TODO - we should be doing this at the maximum legal size but this is
// causing regressions where we're concatenating back to max width just to
// perform the AND and then extracting back again.....
unsigned NumSubRegs = InVT.getSizeInBits() / 128;
unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
SmallVector<SDValue, 8> SubVecs(NumSubRegs);
APInt Mask =
APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
SDValue MaskVal = DAG.getConstant(Mask, DL, SubRegVT);
for (unsigned i = 0; i < NumSubRegs; i++) {
SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
DAG.getIntPtrConstant(i * NumSubRegElts, DL));
SubVecs[i] = DAG.getNode(ISD::AND, DL, SubRegVT, Sub, MaskVal);
}
In = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, SubVecs);
return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
}
/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
EVT OutVT = N->getValueType(0);
In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
DAG.getValueType(OutVT));
return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
}
/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
/// legalization the truncation will be translated into a BUILD_VECTOR with each
/// element that is extracted from a vector and then truncated, and it is
/// difficult to do this optimization based on them.
static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT OutVT = N->getValueType(0);
if (!OutVT.isVector())
return SDValue();
SDValue In = N->getOperand(0);
if (!In.getValueType().isSimple())
return SDValue();
EVT InVT = In.getValueType();
unsigned NumElems = OutVT.getVectorNumElements();
// TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
// SSE2, and we need to take care of it specially.
// AVX512 provides vpmovdb.
if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
return SDValue();
EVT OutSVT = OutVT.getVectorElementType();
EVT InSVT = InVT.getVectorElementType();
if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
(OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
NumElems >= 8))
return SDValue();
// SSSE3's pshufb results in less instructions in the cases below.
if (Subtarget.hasSSSE3() && NumElems == 8 &&
((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
(InSVT == MVT::i32 && OutSVT == MVT::i16)))
return SDValue();
SDLoc DL(N);
// SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
// for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
// truncate 2 x v4i32 to v8i16.
if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
if (InSVT == MVT::i32)
return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
return SDValue();
}
/// This function transforms vector truncation of 'extended sign-bits' or
/// 'extended zero-bits' values.
/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Requires SSE2 but AVX512 has fast truncate.
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
return SDValue();
if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
return SDValue();
SDValue In = N->getOperand(0);
if (!In.getValueType().isSimple())
return SDValue();
MVT VT = N->getValueType(0).getSimpleVT();
MVT SVT = VT.getScalarType();
MVT InVT = In.getValueType().getSimpleVT();
MVT InSVT = InVT.getScalarType();
// Check we have a truncation suited for PACKSS/PACKUS.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
return SDValue();
if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
return SDValue();
unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
// Use PACKUS if the input has zero-bits that extend all the way to the
// packed/truncated value. e.g. masks, zext_in_reg, etc.
KnownBits Known = DAG.computeKnownBits(In);
unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
// Use PACKSS if the input has sign-bits that extend all the way to the
// packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
unsigned NumSignBits = DAG.ComputeNumSignBits(In);
if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
return SDValue();
}
// Try to form a MULHU or MULHS node by looking for
// (trunc (srl (mul ext, ext), 16))
// TODO: This is X86 specific because we want to be able to handle wide types
// before type legalization. But we can only do it if the vector will be
// legalized via widening/splitting. Type legalization can't handle promotion
// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
// combiner.
static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
SelectionDAG &DAG, const X86Subtarget &Subtarget) {
// First instruction should be a right shift of a multiply.
if (Src.getOpcode() != ISD::SRL ||
Src.getOperand(0).getOpcode() != ISD::MUL)
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
// Only handle vXi16 types that are at least 128-bits unless they will be
// widened.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
(!ExperimentalVectorWideningLegalization &&
VT.getVectorNumElements() < 8))
return SDValue();
// Input type should be vXi32.
EVT InVT = Src.getValueType();
if (InVT.getVectorElementType() != MVT::i32)
return SDValue();
// Need a shift by 16.
APInt ShiftAmt;
if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
ShiftAmt != 16)
return SDValue();
SDValue LHS = Src.getOperand(0).getOperand(0);
SDValue RHS = Src.getOperand(0).getOperand(1);
unsigned ExtOpc = LHS.getOpcode();
if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
RHS.getOpcode() != ExtOpc)
return SDValue();
// Peek through the extends.
LHS = LHS.getOperand(0);
RHS = RHS.getOperand(0);
// Ensure the input types match.
if (LHS.getValueType() != VT || RHS.getValueType() != VT)
return SDValue();
unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
// from one vector with signed bytes from another vector, adds together
// adjacent pairs of 16-bit products, and saturates the result before
// truncating to 16-bits.
//
// Which looks something like this:
// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
// (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (!VT.isVector() || !Subtarget.hasSSSE3())
return SDValue();
unsigned NumElems = VT.getVectorNumElements();
EVT ScalarVT = VT.getVectorElementType();
if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
return SDValue();
SDValue SSatVal = detectSSatPattern(In, VT);
if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
return SDValue();
// Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
// of multiplies from even/odd elements.
SDValue N0 = SSatVal.getOperand(0);
SDValue N1 = SSatVal.getOperand(1);
if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
// TODO: Handle constant vectors and use knownbits/computenumsignbits?
// Canonicalize zero_extend to LHS.
if (N01.getOpcode() == ISD::ZERO_EXTEND)
std::swap(N00, N01);
if (N11.getOpcode() == ISD::ZERO_EXTEND)
std::swap(N10, N11);
// Ensure we have a zero_extend and a sign_extend.
if (N00.getOpcode() != ISD::ZERO_EXTEND ||
N01.getOpcode() != ISD::SIGN_EXTEND ||
N10.getOpcode() != ISD::ZERO_EXTEND ||
N11.getOpcode() != ISD::SIGN_EXTEND)
return SDValue();
// Peek through the extends.
N00 = N00.getOperand(0);
N01 = N01.getOperand(0);
N10 = N10.getOperand(0);
N11 = N11.getOperand(0);
// Ensure the extend is from vXi8.
if (N00.getValueType().getVectorElementType() != MVT::i8 ||
N01.getValueType().getVectorElementType() != MVT::i8 ||
N10.getValueType().getVectorElementType() != MVT::i8 ||
N11.getValueType().getVectorElementType() != MVT::i8)
return SDValue();
// All inputs should be build_vectors.
if (N00.getOpcode() != ISD::BUILD_VECTOR ||
N01.getOpcode() != ISD::BUILD_VECTOR ||
N10.getOpcode() != ISD::BUILD_VECTOR ||
N11.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
// N00/N10 are zero extended. N01/N11 are sign extended.
// For each element, we need to ensure we have an odd element from one vector
// multiplied by the odd element of another vector and the even element from
// one of the same vectors being multiplied by the even element from the
// other vector. So we need to make sure for each element i, this operator
// is being performed:
// A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
SDValue ZExtIn, SExtIn;
for (unsigned i = 0; i != NumElems; ++i) {
SDValue N00Elt = N00.getOperand(i);
SDValue N01Elt = N01.getOperand(i);
SDValue N10Elt = N10.getOperand(i);
SDValue N11Elt = N11.getOperand(i);
// TODO: Be more tolerant to undefs.
if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
return SDValue();
unsigned IdxN00 = ConstN00Elt->getZExtValue();
unsigned IdxN01 = ConstN01Elt->getZExtValue();
unsigned IdxN10 = ConstN10Elt->getZExtValue();
unsigned IdxN11 = ConstN11Elt->getZExtValue();
// Add is commutative so indices can be reordered.
if (IdxN00 > IdxN10) {
std::swap(IdxN00, IdxN10);
std::swap(IdxN01, IdxN11);
}
// N0 indices be the even element. N1 indices must be the next odd element.
if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
return SDValue();
SDValue N00In = N00Elt.getOperand(0);
SDValue N01In = N01Elt.getOperand(0);
SDValue N10In = N10Elt.getOperand(0);
SDValue N11In = N11Elt.getOperand(0);
// First time we find an input capture it.
if (!ZExtIn) {
ZExtIn = N00In;
SExtIn = N01In;
}
if (ZExtIn != N00In || SExtIn != N01In ||
ZExtIn != N10In || SExtIn != N11In)
return SDValue();
}
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i8 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
InVT.getVectorNumElements() / 2);
return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
PMADDBuilder);
}
static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
SDLoc DL(N);
// Attempt to pre-truncate inputs to arithmetic ops instead.
if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
return V;
// Try to detect AVG pattern first.
if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
return Avg;
// Try to detect PMADD
if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
return PMAdd;
// Try to combine truncation with signed/unsigned saturation.
if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
return Val;
// Try to combine PMULHUW/PMULHW for vXi16.
if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
return V;
// The bitcast source is a direct mmx result.
// Detect bitcasts between i32 to x86mmx
if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
SDValue BCSrc = Src.getOperand(0);
if (BCSrc.getValueType() == MVT::x86mmx)
return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
}
// Try to truncate extended sign/zero bits with PACKSS/PACKUS.
if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
return V;
return combineVectorTruncation(N, DAG, Subtarget);
}
/// Returns the negated value if the node \p N flips sign of FP value.
///
/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
/// or FSUB(0, x)
/// AVX512F does not have FXOR, so FNEG is lowered as
/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
/// In this case we go though all bitcasts.
/// This also recognizes splat of a negated value and returns the splat of that
/// value.
static SDValue isFNEG(SelectionDAG &DAG, SDNode *N) {
if (N->getOpcode() == ISD::FNEG)
return N->getOperand(0);
SDValue Op = peekThroughBitcasts(SDValue(N, 0));
auto VT = Op->getValueType(0);
if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
// For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
// of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
if (!SVOp->getOperand(1).isUndef())
return SDValue();
if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode()))
return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
SVOp->getMask());
return SDValue();
}
unsigned Opc = Op.getOpcode();
if (Opc == ISD::INSERT_VECTOR_ELT) {
// Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
// -V, INDEX).
SDValue InsVector = Op.getOperand(0);
SDValue InsVal = Op.getOperand(1);
if (!InsVector.isUndef())
return SDValue();
if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode()))
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
NegInsVal, Op.getOperand(2));
return SDValue();
}
if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
return SDValue();
SDValue Op1 = peekThroughBitcasts(Op.getOperand(1));
if (!Op1.getValueType().isFloatingPoint())
return SDValue();
SDValue Op0 = peekThroughBitcasts(Op.getOperand(0));
// For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
// masks. For FSUB, we have to check if constant bits of Op0 are sign bit
// masks and hence we swap the operands.
if (Opc == ISD::FSUB)
std::swap(Op0, Op1);
APInt UndefElts;
SmallVector<APInt, 16> EltBits;
// Extract constant bits and see if they are all sign bit masks. Ignore the
// undef elements.
if (getTargetConstantBitsFromNode(Op1, Op1.getScalarValueSizeInBits(),
UndefElts, EltBits,
/* AllowWholeUndefs */ true,
/* AllowPartialUndefs */ false)) {
for (unsigned I = 0, E = EltBits.size(); I < E; I++)
if (!UndefElts[I] && !EltBits[I].isSignMask())
return SDValue();
return peekThroughBitcasts(Op0);
}
return SDValue();
}
/// Do target-specific dag combines on floating point negations.
static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT OrigVT = N->getValueType(0);
SDValue Arg = isFNEG(DAG, N);
if (!Arg)
return SDValue();
EVT VT = Arg.getValueType();
EVT SVT = VT.getScalarType();
SDLoc DL(N);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
// If we're negating a FMUL node on a target with FMA, then we can avoid the
// use of a constant by performing (-0 - A*B) instead.
// FIXME: Check rounding control flags as well once it becomes available.
if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
Arg.getOperand(1), Zero);
return DAG.getBitcast(OrigVT, NewNode);
}
// If we're negating an FMA node, then we can adjust the
// instruction to include the extra negation.
unsigned NewOpcode = 0;
if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
switch (Arg.getOpcode()) {
case ISD::FMA: NewOpcode = X86ISD::FNMSUB; break;
case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break;
case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break;
case X86ISD::FNMSUB: NewOpcode = ISD::FMA; break;
case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break;
case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break;
case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break;
// We can't handle scalar intrinsic node here because it would only
// invert one element and not the whole vector. But we could try to handle
// a negation of the lower element only.
}
}
if (NewOpcode)
return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT,
Arg.getNode()->ops()));
return SDValue();
}
static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = N->getSimpleValueType(0);
// If we have integer vector types available, use the integer opcodes.
if (!VT.isVector() || !Subtarget.hasSSE2())
return SDValue();
SDLoc dl(N);
unsigned IntBits = VT.getScalarSizeInBits();
MVT IntSVT = MVT::getIntegerVT(IntBits);
MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
unsigned IntOpcode;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected FP logic op");
case X86ISD::FOR: IntOpcode = ISD::OR; break;
case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
case X86ISD::FAND: IntOpcode = ISD::AND; break;
case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
}
SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
return DAG.getBitcast(VT, IntOp);
}
/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
if (N->getOpcode() != ISD::XOR)
return SDValue();
SDValue LHS = N->getOperand(0);
auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
return SDValue();
X86::CondCode NewCC = X86::GetOppositeBranchCondition(
X86::CondCode(LHS->getConstantOperandVal(0)));
SDLoc DL(N);
return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
}
static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// If this is SSE1 only convert to FXOR to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
N->getValueType(0) == MVT::v4i32) {
return DAG.getBitcast(
MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
}
if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
return Cmp;
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue SetCC = foldXor1SetCC(N, DAG))
return SetCC;
if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
return RV;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
return combineFneg(N, DAG, Subtarget);
}
static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
unsigned NumBits = VT.getSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// TODO - Constant Folding.
if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
// Reduce Cst1 to the bottom 16-bits.
// NOTE: SimplifyDemandedBits won't do this for constants.
const APInt &Val1 = Cst1->getAPIntValue();
APInt MaskedVal1 = Val1 & 0xFFFF;
if (MaskedVal1 != Val1)
return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
DAG.getConstant(MaskedVal1, SDLoc(N), VT));
}
// Only bottom 16-bits of the control bits are required.
APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
return SDValue(N, 0);
return SDValue();
}
static bool isNullFPScalarOrVectorConst(SDValue V) {
return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
}
/// If a value is a scalar FP zero or a vector FP zero (potentially including
/// undefined elements), return a zero constant that may be used to fold away
/// that value. In the case of a vector, the returned constant will not contain
/// undefined elements even if the input parameter does. This makes it suitable
/// to be used as a replacement operand with operations (eg, bitwise-and) where
/// an undef should not propagate.
static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!isNullFPScalarOrVectorConst(V))
return SDValue();
if (V.getValueType().isVector())
return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
return V;
}
static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::f64 && Subtarget.hasSSE2()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
return SDValue();
auto isAllOnesConstantFP = [](SDValue V) {
if (V.getSimpleValueType().isVector())
return ISD::isBuildVectorAllOnes(V.getNode());
auto *C = dyn_cast<ConstantFPSDNode>(V);
return C && C->getConstantFPValue()->isAllOnesValue();
};
// fand (fxor X, -1), Y --> fandn X, Y
if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
// fand X, (fxor Y, -1) --> fandn Y, X
if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
return SDValue();
}
/// Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FAND(0.0, x) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
return V;
// FAND(x, 0.0) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
return V;
if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
return V;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FANDN nodes.
static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FANDN(0.0, x) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(0)))
return N->getOperand(1);
// FANDN(x, 0.0) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
return V;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
// F[X]OR(0.0, x) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(0)))
return N->getOperand(1);
// F[X]OR(x, 0.0) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(1)))
return N->getOperand(0);
if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
return NewVal;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// Only perform optimizations if UnsafeMath is used.
if (!DAG.getTarget().Options.UnsafeFPMath)
return SDValue();
// If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
// into FMINC and FMAXC, which are Commutative operations.
unsigned NewOp = 0;
switch (N->getOpcode()) {
default: llvm_unreachable("unknown opcode");
case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
}
return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
N->getOperand(0), N->getOperand(1));
}
static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (Subtarget.useSoftFloat())
return SDValue();
// TODO: If an operand is already known to be a NaN or not a NaN, this
// should be an optional swap and FMAX/FMIN.
EVT VT = N->getValueType(0);
if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
(Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
(Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDLoc DL(N);
auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
// If we don't have to respect NaN inputs, this is a direct translation to x86
// min/max instructions.
if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
// If we have to respect NaN inputs, this takes at least 3 instructions.
// Favor a library call when operating on a scalar and minimizing code size.
if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
DAG.getDataLayout(), *DAG.getContext(), VT);
// There are 4 possibilities involving NaN inputs, and these are the required
// outputs:
// Op1
// Num NaN
// ----------------
// Num | Max | Op0 |
// Op0 ----------------
// NaN | Op1 | NaN |
// ----------------
//
// The SSE FP max/min instructions were not designed for this case, but rather
// to implement:
// Min = Op1 < Op0 ? Op1 : Op0
// Max = Op1 > Op0 ? Op1 : Op0
//
// So they always return Op0 if either input is a NaN. However, we can still
// use those instructions for fmaxnum by selecting away a NaN input.
// If either operand is NaN, the 2nd source operand (Op0) is passed through.
SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
// If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
// are NaN, the NaN value of Op1 is the result.
return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
}
static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt KnownUndef, KnownZero;
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
KnownZero, DCI))
return SDValue(N, 0);
return SDValue();
}
/// Do target-specific dag combines on X86ISD::ANDNP nodes.
static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
MVT VT = N->getSimpleValueType(0);
// ANDNP(0, x) -> x
if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
return N->getOperand(1);
// ANDNP(x, 0) -> 0
if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
return DAG.getConstant(0, SDLoc(N), VT);
// Turn ANDNP back to AND if input is inverted.
if (VT.isVector() && N->getOperand(0).getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllOnes(N->getOperand(0).getOperand(1).getNode())) {
return DAG.getNode(ISD::AND, SDLoc(N), VT,
N->getOperand(0).getOperand(0), N->getOperand(1));
}
// Attempt to recursively combine a bitmask ANDNP with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
return Res;
}
return SDValue();
}
static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// BT ignores high bits in the bit index operand.
unsigned BitWidth = N1.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
return SDValue();
}
// Try to combine sext_in_reg of a cmov of constants by extending the constants.
static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
if (ExtraVT != MVT::i16)
return SDValue();
// Look through single use any_extends.
if (N0.getOpcode() == ISD::ANY_EXTEND && N0.hasOneUse())
N0 = N0.getOperand(0);
// See if we have a single use cmov.
if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
return SDValue();
SDValue CMovOp0 = N0.getOperand(0);
SDValue CMovOp1 = N0.getOperand(1);
// Make sure both operands are constants.
if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
!isa<ConstantSDNode>(CMovOp1.getNode()))
return SDValue();
SDLoc DL(N);
// If we looked through an any_extend above, add one to the constants.
if (N0.getValueType() != VT) {
CMovOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp0);
CMovOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp1);
}
CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp0, N1);
CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp1, N1);
return DAG.getNode(X86ISD::CMOV, DL, VT, CMovOp0, CMovOp1,
N0.getOperand(2), N0.getOperand(3));
}
static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (SDValue V = combineSextInRegCmov(N, DAG))
return V;
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
SDLoc dl(N);
// The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
// both SSE and AVX2 since there is no sign-extended shift right
// operation on a vector with 64-bit elements.
//(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
// (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND)) {
SDValue N00 = N0.getOperand(0);
// EXTLOAD has a better solution on AVX2,
// it may be replaced with X86ISD::VSEXT node.
if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
if (!ISD::isNormalLoad(N00.getNode()))
return SDValue();
if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
N00, N1);
return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
}
}
return SDValue();
}
/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
/// opportunities to combine math ops, use an LEA, or use a complex addressing
/// mode. This can eliminate extend, add, and shift instructions.
static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
Ext->getOpcode() != ISD::ZERO_EXTEND)
return SDValue();
// TODO: This should be valid for other integer types.
EVT VT = Ext->getValueType(0);
if (VT != MVT::i64)
return SDValue();
SDValue Add = Ext->getOperand(0);
if (Add.getOpcode() != ISD::ADD)
return SDValue();
bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
bool NSW = Add->getFlags().hasNoSignedWrap();
bool NUW = Add->getFlags().hasNoUnsignedWrap();
// We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
// into the 'zext'
if ((Sext && !NSW) || (!Sext && !NUW))
return SDValue();
// Having a constant operand to the 'add' ensures that we are not increasing
// the instruction count because the constant is extended for free below.
// A constant operand can also become the displacement field of an LEA.
auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
if (!AddOp1)
return SDValue();
// Don't make the 'add' bigger if there's no hope of combining it with some
// other 'add' or 'shl' instruction.
// TODO: It may be profitable to generate simpler LEA instructions in place
// of single 'add' instructions, but the cost model for selecting an LEA
// currently has a high threshold.
bool HasLEAPotential = false;
for (auto *User : Ext->uses()) {
if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
HasLEAPotential = true;
break;
}
}
if (!HasLEAPotential)
return SDValue();
// Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
SDValue AddOp0 = Add.getOperand(0);
SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
// The wider add is guaranteed to not wrap because both operands are
// sign-extended.
SDNodeFlags Flags;
Flags.setNoSignedWrap(NSW);
Flags.setNoUnsignedWrap(NUW);
return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
}
// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
// operands and the result of CMOV is not used anywhere else - promote CMOV
// itself instead of promoting its result. This could be beneficial, because:
// 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
// (or more) pseudo-CMOVs only when they go one-after-another and
// getting rid of result extension code after CMOV will help that.
// 2) Promotion of constant CMOV arguments is free, hence the
// {ANY,SIGN,ZERO}_EXTEND will just be deleted.
// 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
// promotion is also good in terms of code-size.
// (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
// promotion).
static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
SDValue CMovN = Extend->getOperand(0);
if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
return SDValue();
EVT TargetVT = Extend->getValueType(0);
unsigned ExtendOpcode = Extend->getOpcode();
SDLoc DL(Extend);
EVT VT = CMovN.getValueType();
SDValue CMovOp0 = CMovN.getOperand(0);
SDValue CMovOp1 = CMovN.getOperand(1);
if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
!isa<ConstantSDNode>(CMovOp1.getNode()))
return SDValue();
// Only extend to i32 or i64.
if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
return SDValue();
// Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
// are free.
if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
return SDValue();
// If this a zero extend to i64, we should only extend to i32 and use a free
// zero extend to finish.
EVT ExtendVT = TargetVT;
if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
ExtendVT = MVT::i32;
CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
CMovN.getOperand(2), CMovN.getOperand(3));
// Finish extending if needed.
if (ExtendVT != TargetVT)
Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
return Res;
}
// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
// This is more or less the reverse of combineBitcastvxi1.
static SDValue
combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
Opcode != ISD::ANY_EXTEND)
return SDValue();
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
return SDValue();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InSVT = N0.getValueType().getScalarType();
unsigned EltSizeInBits = SVT.getSizeInBits();
// Input type must be extending a bool vector (bit-casted from a scalar
// integer) to legal integer types.
if (!VT.isVector())
return SDValue();
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
return SDValue();
if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
return SDValue();
SDValue N00 = N0.getOperand(0);
EVT SclVT = N0.getOperand(0).getValueType();
if (!SclVT.isScalarInteger())
return SDValue();
SDLoc DL(N);
SDValue Vec;
SmallVector<int, 32> ShuffleMask;
unsigned NumElts = VT.getVectorNumElements();
assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
// Broadcast the scalar integer to the vector elements.
if (NumElts > EltSizeInBits) {
// If the scalar integer is greater than the vector element size, then we
// must split it down into sub-sections for broadcasting. For example:
// i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
// i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
unsigned Scale = NumElts / EltSizeInBits;
EVT BroadcastVT =
EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
Vec = DAG.getBitcast(VT, Vec);
for (unsigned i = 0; i != Scale; ++i)
ShuffleMask.append(EltSizeInBits, i);
} else {
// For smaller scalar integers, we can simply any-extend it to the vector
// element size (we don't care about the upper bits) and broadcast it to all
// elements.
SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
ShuffleMask.append(NumElts, 0);
}
Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
// Now, mask the relevant bit in each element.
SmallVector<SDValue, 32> Bits;
for (unsigned i = 0; i != NumElts; ++i) {
int BitIdx = (i % EltSizeInBits);
APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
Bits.push_back(DAG.getConstant(Bit, DL, SVT));
}
SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
// Compare against the bitmask and extend the result.
EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
// For SEXT, this is now done, otherwise shift the result down for
// zero-extension.
if (Opcode == ISD::SIGN_EXTEND)
return Vec;
return DAG.getNode(ISD::SRL, DL, VT, Vec,
DAG.getConstant(EltSizeInBits - 1, DL, VT));
}
/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
/// with UNDEFs) of the input to vectors of the same size as the target type
/// which then extends the lowest elements.
static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (ExperimentalVectorWideningLegalization)
return SDValue();
unsigned Opcode = N->getOpcode();
if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
return SDValue();
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InVT = N0.getValueType();
EVT InSVT = InVT.getScalarType();
// FIXME: Generic DAGCombiner previously had a bug that would cause a
// sign_extend of setcc to sometimes return the original node and tricked it
// into thinking CombineTo was used which prevented the target combines from
// running.
// Earlying out here to avoid regressions like this
// (v4i32 (sext (v4i1 (setcc (v4i16)))))
// Becomes
// (v4i32 (sext_invec (v8i16 (concat (v4i16 (setcc (v4i16))), undef))))
// Type legalized to
// (v4i32 (sext_invec (v8i16 (trunc_invec (v4i32 (setcc (v4i32)))))))
// Leading to a packssdw+pmovsxwd
// We could write a DAG combine to fix this, but really we shouldn't be
// creating sext_invec that's forcing v8i16 into the DAG.
if (N0.getOpcode() == ISD::SETCC)
return SDValue();
// Input type must be a vector and we must be extending legal integer types.
if (!VT.isVector() || VT.getVectorNumElements() < 2)
return SDValue();
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
// If the input/output types are both legal then we have at least AVX1 and
// we will be able to use SIGN_EXTEND/ZERO_EXTEND directly.
if (DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
DAG.getTargetLoweringInfo().isTypeLegal(InVT))
return SDValue();
SDLoc DL(N);
auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
EVT InVT = N.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
Size / InVT.getScalarSizeInBits());
SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
DAG.getUNDEF(InVT));
Opnds[0] = N;
return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
};
// If target-size is less than 128-bits, extend to a type that would extend
// to 128 bits, extend that and extract the original target vector.
if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
unsigned Scale = 128 / VT.getSizeInBits();
EVT ExVT =
EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
DAG.getIntPtrConstant(0, DL));
}
// If target-size is 128-bits (or 256-bits on AVX target), then convert to
// ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
// Also use this if we don't have SSE41 to allow the legalizer do its job.
if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
(VT.is256BitVector() && Subtarget.hasAVX()) ||
(VT.is512BitVector() && Subtarget.useAVX512Regs())) {
SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
Opcode = Opcode == ISD::SIGN_EXTEND ? ISD::SIGN_EXTEND_VECTOR_INREG
: ISD::ZERO_EXTEND_VECTOR_INREG;
return DAG.getNode(Opcode, DL, VT, ExOp);
}
auto SplitAndExtendInReg = [&](unsigned SplitSize) {
unsigned NumVecs = VT.getSizeInBits() / SplitSize;
unsigned NumSubElts = SplitSize / SVT.getSizeInBits();
EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
unsigned IROpc = Opcode == ISD::SIGN_EXTEND ? ISD::SIGN_EXTEND_VECTOR_INREG
: ISD::ZERO_EXTEND_VECTOR_INREG;
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
DAG.getIntPtrConstant(Offset, DL));
SrcVec = ExtendVecSize(DL, SrcVec, SplitSize);
SrcVec = DAG.getNode(IROpc, DL, SubVT, SrcVec);
Opnds.push_back(SrcVec);
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
};
// On pre-AVX targets, split into 128-bit nodes of
// ISD::*_EXTEND_VECTOR_INREG.
if (!Subtarget.hasAVX() && !(VT.getSizeInBits() % 128))
return SplitAndExtendInReg(128);
// On pre-AVX512 targets, split into 256-bit nodes of
// ISD::*_EXTEND_VECTOR_INREG.
if (!Subtarget.useAVX512Regs() && !(VT.getSizeInBits() % 256))
return SplitAndExtendInReg(256);
return SDValue();
}
// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
// result type.
static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
SDLoc dl(N);
// Only do this combine with AVX512 for vector extends.
if (!Subtarget.hasAVX512() || !VT.isVector() || N0->getOpcode() != ISD::SETCC)
return SDValue();
// Only combine legal element types.
EVT SVT = VT.getVectorElementType();
if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
return SDValue();
// We can only do this if the vector size in 256 bits or less.
unsigned Size = VT.getSizeInBits();
if (Size > 256)
return SDValue();
// Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
// that's the only integer compares with we have.
ISD::CondCode CC = cast<CondCodeSDNode>(N0->getOperand(2))->get();
if (ISD::isUnsignedIntSetCC(CC))
return SDValue();
// Only do this combine if the extension will be fully consumed by the setcc.
EVT N00VT = N0.getOperand(0).getValueType();
EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
if (Size != MatchingVecType.getSizeInBits())
return SDValue();
SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
if (N->getOpcode() == ISD::ZERO_EXTEND)
Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
return Res;
}
static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = N0.getValueType();
SDLoc DL(N);
if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
return NewCMov;
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
// Invert and sign-extend a boolean is the same as zero-extend and subtract
// 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
// lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
// sext (xor Bool, -1) --> sub (zext Bool), 1
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
}
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (VT.isVector())
if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
return R;
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
return NewAdd;
return SDValue();
}
static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
if (NegMul) {
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::FMA: Opcode = X86ISD::FNMADD; break;
case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FNMADD: Opcode = ISD::FMA; break;
case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
}
}
if (NegAcc) {
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::FMA: Opcode = X86ISD::FMSUB; break;
case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
case X86ISD::FMSUB: Opcode = ISD::FMA; break;
case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
}
}
return Opcode;
}
static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
EVT ScalarVT = VT.getScalarType();
if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
return SDValue();
SDValue A = N->getOperand(0);
SDValue B = N->getOperand(1);
SDValue C = N->getOperand(2);
auto invertIfNegative = [&DAG](SDValue &V) {
if (SDValue NegVal = isFNEG(DAG, V.getNode())) {
V = DAG.getBitcast(V.getValueType(), NegVal);
return true;
}
// Look through extract_vector_elts. If it comes from an FNEG, create a
// new extract from the FNEG input.
if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isNullConstant(V.getOperand(1))) {
if (SDValue NegVal = isFNEG(DAG, V.getOperand(0).getNode())) {
NegVal = DAG.getBitcast(V.getOperand(0).getValueType(), NegVal);
V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
NegVal, V.getOperand(1));
return true;
}
}
return false;
};
// Do not convert the passthru input of scalar intrinsics.
// FIXME: We could allow negations of the lower element only.
bool NegA = invertIfNegative(A);
bool NegB = invertIfNegative(B);
bool NegC = invertIfNegative(C);
if (!NegA && !NegB && !NegC)
return SDValue();
unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
if (N->getNumOperands() == 4)
return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
return DAG.getNode(NewOpcode, dl, VT, A, B, C);
}
// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue NegVal = isFNEG(DAG, N->getOperand(2).getNode());
if (!NegVal)
return SDValue();
unsigned NewOpcode;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected opcode!");
case X86ISD::FMADDSUB: NewOpcode = X86ISD::FMSUBADD; break;
case X86ISD::FMADDSUB_RND: NewOpcode = X86ISD::FMSUBADD_RND; break;
case X86ISD::FMSUBADD: NewOpcode = X86ISD::FMADDSUB; break;
case X86ISD::FMSUBADD_RND: NewOpcode = X86ISD::FMADDSUB_RND; break;
}
if (N->getNumOperands() == 4)
return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
NegVal, N->getOperand(3));
return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
NegVal);
}
static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
// ISD::SETCC is always legalized to i8.
SDLoc dl(N);
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() == ISD::AND &&
N0.hasOneUse() &&
N0.getOperand(0).hasOneUse()) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
if (!isOneConstant(N0.getOperand(1)))
return SDValue();
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, dl, VT));
}
}
if (N0.getOpcode() == ISD::TRUNCATE &&
N0.hasOneUse() &&
N0.getOperand(0).hasOneUse()) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, dl, VT));
}
}
if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
return NewCMov;
if (DCI.isBeforeLegalizeOps())
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (VT.isVector())
if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
return R;
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
return NewAdd;
if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
return R;
return SDValue();
}
/// Try to map a 128-bit or larger integer comparison to vector instructions
/// before type legalization splits it up into chunks.
static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
// We're looking for an oversized integer equality comparison.
SDValue X = SetCC->getOperand(0);
SDValue Y = SetCC->getOperand(1);
EVT OpVT = X.getValueType();
unsigned OpSize = OpVT.getSizeInBits();
if (!OpVT.isScalarInteger() || OpSize < 128)
return SDValue();
// Ignore a comparison with zero because that gets special treatment in
// EmitTest(). But make an exception for the special case of a pair of
// logically-combined vector-sized operands compared to zero. This pattern may
// be generated by the memcmp expansion pass with oversized integer compares
// (see PR33325).
bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
X.getOperand(0).getOpcode() == ISD::XOR &&
X.getOperand(1).getOpcode() == ISD::XOR;
if (isNullConstant(Y) && !IsOrXorXorCCZero)
return SDValue();
// Bail out if we know that this is not really just an oversized integer.
if (peekThroughBitcasts(X).getValueType() == MVT::f128 ||
peekThroughBitcasts(Y).getValueType() == MVT::f128)
return SDValue();
// TODO: Use PXOR + PTEST for SSE4.1 or later?
EVT VT = SetCC->getValueType(0);
SDLoc DL(SetCC);
if ((OpSize == 128 && Subtarget.hasSSE2()) ||
(OpSize == 256 && Subtarget.hasAVX2()) ||
(OpSize == 512 && Subtarget.useAVX512Regs())) {
EVT VecVT = OpSize == 512 ? MVT::v16i32 :
OpSize == 256 ? MVT::v32i8 :
MVT::v16i8;
EVT CmpVT = OpSize == 512 ? MVT::v16i1 : VecVT;
SDValue Cmp;
if (IsOrXorXorCCZero) {
// This is a bitwise-combined equality comparison of 2 pairs of vectors:
// setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
// Use 2 vector equality compares and 'and' the results before doing a
// MOVMSK.
SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
} else {
SDValue VecX = DAG.getBitcast(VecVT, X);
SDValue VecY = DAG.getBitcast(VecVT, Y);
Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
}
// For 512-bits we want to emit a setcc that will lower to kortest.
if (OpSize == 512)
return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i16, Cmp),
DAG.getConstant(0xFFFF, DL, MVT::i16), CC);
// If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
// setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
// setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
// setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
// setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
MVT::i32);
return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
}
return SDValue();
}
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT OpVT = LHS.getValueType();
SDLoc DL(N);
if (CC == ISD::SETNE || CC == ISD::SETEQ) {
// 0-x == y --> x+y == 0
// 0-x != y --> x+y != 0
if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
LHS.hasOneUse()) {
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
// x == 0-y --> x+y == 0
// x != 0-y --> x+y != 0
if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
RHS.hasOneUse()) {
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
return V;
}
if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
(CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
// Put build_vectors on the right.
if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
std::swap(LHS, RHS);
CC = ISD::getSetCCSwappedOperands(CC);
}
bool IsSEXT0 =
(LHS.getOpcode() == ISD::SIGN_EXTEND) &&
(LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
if (IsSEXT0 && IsVZero1) {
assert(VT == LHS.getOperand(0).getValueType() &&
"Uexpected operand type");
if (CC == ISD::SETGT)
return DAG.getConstant(0, DL, VT);
if (CC == ISD::SETLE)
return DAG.getConstant(1, DL, VT);
if (CC == ISD::SETEQ || CC == ISD::SETGE)
return DAG.getNOT(DL, LHS.getOperand(0), VT);
assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
"Unexpected condition code!");
return LHS.getOperand(0);
}
}
// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
// pre-promote its result type since vXi1 vectors don't get promoted
// during type legalization.
// NOTE: The element count check is to ignore operand types that need to
// go through type promotion to a 128-bit vector.
if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
VT.getVectorElementType() == MVT::i1 &&
(ExperimentalVectorWideningLegalization ||
VT.getVectorNumElements() > 4) &&
(OpVT.getVectorElementType() == MVT::i8 ||
OpVT.getVectorElementType() == MVT::i16)) {
SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
N->getOperand(2));
return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
}
// For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
// to avoid scalarization via legalization because v4i32 is not a legal type.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
LHS.getValueType() == MVT::v4f32)
return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
return SDValue();
}
static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue Src = N->getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT VT = N->getSimpleValueType(0);
// Perform constant folding.
if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
assert(VT== MVT::i32 && "Unexpected result type");
APInt Imm(32, 0);
for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Src.getOperand(Idx);
if (!In.isUndef() &&
cast<ConstantSDNode>(In)->getAPIntValue().isNegative())
Imm.setBit(Idx);
}
return DAG.getConstant(Imm, SDLoc(N), VT);
}
// Look through int->fp bitcasts that don't change the element width.
if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse() &&
SrcVT.isFloatingPoint() &&
Src.getOperand(0).getValueType() ==
EVT(SrcVT).changeVectorElementTypeToInteger())
Src = Src.getOperand(0);
// Simplify the inputs.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedMask(APInt::getAllOnesValue(VT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
return SDValue(N, 0);
// Combine (movmsk (setne (and X, (1 << C)), 0)) -> (movmsk (X << C)).
// Only do this when the setcc input and output types are the same and the
// setcc and the 'and' node have a single use.
// FIXME: Support 256-bits with AVX1. The movmsk is split, but the and isn't.
APInt SplatVal;
if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
Src.getOperand(0).getValueType() == Src.getValueType() &&
cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETNE &&
ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
Src.getOperand(0).getOpcode() == ISD::AND) {
SDValue And = Src.getOperand(0);
if (And.hasOneUse() &&
ISD::isConstantSplatVector(And.getOperand(1).getNode(), SplatVal) &&
SplatVal.isPowerOf2()) {
MVT VT = Src.getSimpleValueType();
unsigned BitWidth = VT.getScalarSizeInBits();
unsigned ShAmt = BitWidth - SplatVal.logBase2() - 1;
SDLoc DL(And);
SDValue X = And.getOperand(0);
// If the element type is i8, we need to bitcast to i16 to use a legal
// shift. If we wait until lowering we end up with an extra and to bits
// from crossing the 8-bit elements, but we don't care about that here.
if (VT.getVectorElementType() == MVT::i8) {
VT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
X = DAG.getBitcast(VT, X);
}
SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, X,
DAG.getConstant(ShAmt, DL, VT));
SDValue Cast = DAG.getBitcast(SrcVT, Shl);
return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), N->getValueType(0), Cast);
}
}
return SDValue();
}
static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
if (DCI.isBeforeLegalizeOps()) {
SDValue Index = N->getOperand(4);
// Remove any sign extends from 32 or smaller to larger than 32.
// Only do this before LegalizeOps in case we need the sign extend for
// legalization.
if (Index.getOpcode() == ISD::SIGN_EXTEND) {
if (Index.getScalarValueSizeInBits() > 32 &&
Index.getOperand(0).getScalarValueSizeInBits() <= 32) {
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index.getOperand(0);
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N) {
// The original sign extend has less users, add back to worklist in
// case it needs to be removed
DCI.AddToWorklist(Index.getNode());
DCI.AddToWorklist(N);
}
return SDValue(Res, 0);
}
}
// Make sure the index is either i32 or i64
unsigned ScalarSize = Index.getScalarValueSizeInBits();
if (ScalarSize != 32 && ScalarSize != 64) {
MVT EltVT = ScalarSize > 32 ? MVT::i64 : MVT::i32;
EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
Index.getValueType().getVectorNumElements());
Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index;
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N)
DCI.AddToWorklist(N);
return SDValue(Res, 0);
}
// Try to remove zero extends from 32->64 if we know the sign bit of
// the input is zero.
if (Index.getOpcode() == ISD::ZERO_EXTEND &&
Index.getScalarValueSizeInBits() == 64 &&
Index.getOperand(0).getScalarValueSizeInBits() == 32) {
if (DAG.SignBitIsZero(Index.getOperand(0))) {
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index.getOperand(0);
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N) {
// The original sign extend has less users, add back to worklist in
// case it needs to be removed
DCI.AddToWorklist(Index.getNode());
DCI.AddToWorklist(N);
}
return SDValue(Res, 0);
}
}
}
// With AVX2 we only demand the upper bit of the mask.
if (!Subtarget.hasAVX512()) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Mask = N->getOperand(2);
APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
return SDValue(N, 0);
}
return SDValue();
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
// Try to simplify the EFLAGS and condition code operands.
if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
return getSETCC(CC, Flags, DL, DAG);
return SDValue();
}
/// Optimize branch condition evaluation.
static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue EFLAGS = N->getOperand(3);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
// Try to simplify the EFLAGS and condition code operands.
// Make sure to not keep references to operands, as combineSetCCEFLAGS can
// RAUW them under us.
if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
N->getOperand(1), Cond, Flags);
}
return SDValue();
}
static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
SelectionDAG &DAG) {
// Take advantage of vector comparisons producing 0 or -1 in each lane to
// optimize away operation when it's from a constant.
//
// The general transformation is:
// UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
// AND(VECTOR_CMP(x,y), constant2)
// constant2 = UNARYOP(constant)
// Early exit if this isn't a vector operation, the operand of the
// unary operation isn't a bitwise AND, or if the sizes of the operations
// aren't the same.
EVT VT = N->getValueType(0);
if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
return SDValue();
// Now check that the other operand of the AND is a constant. We could
// make the transformation for non-constant splats as well, but it's unclear
// that would be a benefit as it would not eliminate any operations, just
// perform one more step in scalar code before moving to the vector unit.
if (BuildVectorSDNode *BV =
dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
// Bail out if the vector isn't a constant.
if (!BV->isConstant())
return SDValue();
// Everything checks out. Build up the new and improved node.
SDLoc DL(N);
EVT IntVT = BV->getValueType(0);
// Create a new constant of the appropriate type for the transformed
// DAG.
SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
// The AND node needs bitcasts to/from an integer vector type around it.
SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
N->getOperand(0)->getOperand(0), MaskConst);
SDValue Res = DAG.getBitcast(VT, NewAnd);
return Res;
}
return SDValue();
}
static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
// UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
// UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
// UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements());
SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
// UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
}
// Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
// optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
// the optimization here.
if (DAG.SignBitIsZero(Op0))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
return SDValue();
}
static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
return Res;
// Now move on to more general possibilities.
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
// SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
// SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
// SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements());
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
}
// Without AVX512DQ we only support i64 to float scalar conversion. For both
// vectors and scalars, see if we know that the upper bits are all the sign
// bit, in which case we can truncate the input to i32 and convert from that.
if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
unsigned BitWidth = InVT.getScalarSizeInBits();
unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
if (NumSignBits >= (BitWidth - 31)) {
EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32);
if (InVT.isVector())
TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
InVT.getVectorNumElements());
SDLoc dl(N);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
}
}
// Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
// a 32-bit target where SSE doesn't support i64->FP operations.
if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
Op0.getOpcode() == ISD::LOAD) {
LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
EVT LdVT = Ld->getValueType(0);
// This transformation is not supported if the result type is f16 or f128.
if (VT == MVT::f16 || VT == MVT::f128)
return SDValue();
// If we have AVX512DQ we can use packed conversion instructions unless
// the VT is f80.
if (Subtarget.hasDQI() && VT != MVT::f80)
return SDValue();
if (!Ld->isVolatile() && !VT.isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
!Subtarget.is64Bit() && LdVT == MVT::i64) {
SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
}
}
return SDValue();
}
static bool needCarryOrOverflowFlag(SDValue Flags) {
assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
UI != UE; ++UI) {
SDNode *User = *UI;
X86::CondCode CC;
switch (User->getOpcode()) {
default:
// Be conservative.
return true;
case X86ISD::SETCC:
case X86ISD::SETCC_CARRY:
CC = (X86::CondCode)User->getConstantOperandVal(0);
break;
case X86ISD::BRCOND:
CC = (X86::CondCode)User->getConstantOperandVal(2);
break;
case X86ISD::CMOV:
CC = (X86::CondCode)User->getConstantOperandVal(2);
break;
}
switch (CC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
case X86::COND_O: case X86::COND_NO:
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
return true;
}
}
return false;
}
static bool onlyZeroFlagUsed(SDValue Flags) {
assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
UI != UE; ++UI) {
SDNode *User = *UI;
unsigned CCOpNo;
switch (User->getOpcode()) {
default:
// Be conservative.
return false;
case X86ISD::SETCC: CCOpNo = 0; break;
case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
case X86ISD::BRCOND: CCOpNo = 2; break;
case X86ISD::CMOV: CCOpNo = 2; break;
}
X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
if (CC != X86::COND_E && CC != X86::COND_NE)
return false;
}
return true;
}
static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
// Only handle test patterns.
if (!isNullConstant(N->getOperand(1)))
return SDValue();
// If we have a CMP of a truncated binop, see if we can make a smaller binop
// and use its flags directly.
// TODO: Maybe we should try promoting compares that only use the zero flag
// first if we can prove the upper bits with computeKnownBits?
SDLoc dl(N);
SDValue Op = N->getOperand(0);
EVT VT = Op.getValueType();
// If we have a constant logical shift that's only used in a comparison
// against zero turn it into an equivalent AND. This allows turning it into
// a TEST instruction later.
if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
onlyZeroFlagUsed(SDValue(N, 0))) {
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
unsigned ShAmt = Op.getConstantOperandVal(1);
if (ShAmt < BitWidth) { // Avoid undefined shifts.
APInt Mask = Op.getOpcode() == ISD::SRL
? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
: APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
if (Mask.isSignedIntN(32)) {
Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
DAG.getConstant(Mask, dl, VT));
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, VT));
}
}
}
// Look for a truncate with a single use.
if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
return SDValue();
Op = Op.getOperand(0);
// Arithmetic op can only have one use.
if (!Op.hasOneUse())
return SDValue();
unsigned NewOpc;
switch (Op.getOpcode()) {
default: return SDValue();
case ISD::AND:
// Skip and with constant. We have special handling for and with immediate
// during isel to generate test instructions.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return SDValue();
NewOpc = X86ISD::AND;
break;
case ISD::OR: NewOpc = X86ISD::OR; break;
case ISD::XOR: NewOpc = X86ISD::XOR; break;
case ISD::ADD:
// If the carry or overflow flag is used, we can't truncate.
if (needCarryOrOverflowFlag(SDValue(N, 0)))
return SDValue();
NewOpc = X86ISD::ADD;
break;
case ISD::SUB:
// If the carry or overflow flag is used, we can't truncate.
if (needCarryOrOverflowFlag(SDValue(N, 0)))
return SDValue();
NewOpc = X86ISD::SUB;
break;
}
// We found an op we can narrow. Truncate its inputs.
SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
// Use a X86 specific opcode to avoid DAG combine messing with it.
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
// For AND, keep a CMP so that we can match the test pattern.
if (NewOpc == X86ISD::AND)
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, VT));
// Return the flags.
return Op.getValue(1);
}
static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
MVT VT = N->getSimpleValueType(0);
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
N->getOperand(0), N->getOperand(1),
Flags);
}
return SDValue();
}
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
// If the LHS and RHS of the ADC node are zero, then it can't overflow and
// the result is either zero or one (depending on the input carry bit).
// Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
if (X86::isZeroNode(N->getOperand(0)) &&
X86::isZeroNode(N->getOperand(1)) &&
// We don't have a good way to replace an EFLAGS use, so only do this when
// dead right now.
SDValue(N, 1).use_empty()) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL,
MVT::i8),
N->getOperand(2)),
DAG.getConstant(1, DL, VT));
return DCI.CombineTo(N, Res1, CarryOut);
}
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
MVT VT = N->getSimpleValueType(0);
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
N->getOperand(0), N->getOperand(1),
Flags);
}
return SDValue();
}
/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
bool IsSub = N->getOpcode() == ISD::SUB;
SDValue X = N->getOperand(0);
SDValue Y = N->getOperand(1);
// If this is an add, canonicalize a zext operand to the RHS.
// TODO: Incomplete? What if both sides are zexts?
if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
Y.getOpcode() != ISD::ZERO_EXTEND)
std::swap(X, Y);
// Look through a one-use zext.
bool PeekedThroughZext = false;
if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
Y = Y.getOperand(0);
PeekedThroughZext = true;
}
// If this is an add, canonicalize a setcc operand to the RHS.
// TODO: Incomplete? What if both sides are setcc?
// TODO: Should we allow peeking through a zext of the other operand?
if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
Y.getOpcode() != X86ISD::SETCC)
std::swap(X, Y);
if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
// If X is -1 or 0, then we have an opportunity to avoid constants required in
// the general case below.
auto *ConstantX = dyn_cast<ConstantSDNode>(X);
if (ConstantX) {
if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
(IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
// This is a complicated way to get -1 or 0 from the carry flag:
// -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
// 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
Y.getOperand(1));
}
if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
(IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
SDValue EFLAGS = Y->getOperand(1);
if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
EFLAGS.getValueType().isInteger() &&
!isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
// Swap the operands of a SUB, and we have the same pattern as above.
// -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
// 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
SDValue NewSub = DAG.getNode(
X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
NewEFLAGS);
}
}
}
if (CC == X86::COND_B) {
// X + SETB Z --> adc X, 0
// X - SETB Z --> sbb X, 0
return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
DAG.getVTList(VT, MVT::i32), X,
DAG.getConstant(0, DL, VT), Y.getOperand(1));
}
if (CC == X86::COND_A) {
SDValue EFLAGS = Y->getOperand(1);
// Try to convert COND_A into COND_B in an attempt to facilitate
// materializing "setb reg".
//
// Do not flip "e > c", where "c" is a constant, because Cmp instruction
// cannot take an immediate as its first operand.
//
if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
EFLAGS.getValueType().isInteger() &&
!isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
DAG.getVTList(VT, MVT::i32), X,
DAG.getConstant(0, DL, VT), NewEFLAGS);
}
}
if (CC != X86::COND_E && CC != X86::COND_NE)
return SDValue();
SDValue Cmp = Y.getOperand(1);
if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
!X86::isZeroNode(Cmp.getOperand(1)) ||
!Cmp.getOperand(0).getValueType().isInteger())
return SDValue();
SDValue Z = Cmp.getOperand(0);
EVT ZVT = Z.getValueType();
// If X is -1 or 0, then we have an opportunity to avoid constants required in
// the general case below.
if (ConstantX) {
// 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
// fake operands:
// 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
// -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
(!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
SDValue Zero = DAG.getConstant(0, DL, ZVT);
SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
SDValue(Neg.getNode(), 1));
}
// cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
// with fake operands:
// 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
// -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
(!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
SDValue One = DAG.getConstant(1, DL, ZVT);
SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1);
}
}
// (cmp Z, 1) sets the carry flag if Z is 0.
SDValue One = DAG.getConstant(1, DL, ZVT);
SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
// Add the flags type for ADC/SBB nodes.
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
// X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
// X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
if (CC == X86::COND_NE)
return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
DAG.getConstant(-1ULL, DL, VT), Cmp1);
// X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
// X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
DAG.getConstant(0, DL, VT), Cmp1);
}
static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// If the vector size is less than 128, or greater than the supported RegSize,
// do not use PMADD.
if (!VT.isVector() || VT.getVectorNumElements() < 8)
return SDValue();
if (Op0.getOpcode() != ISD::MUL)
std::swap(Op0, Op1);
if (Op0.getOpcode() != ISD::MUL)
return SDValue();
ShrinkMode Mode;
if (!canReduceVMulWidth(Op0.getNode(), DAG, Mode) || Mode == MULU16)
return SDValue();
SDLoc DL(N);
EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
VT.getVectorNumElements());
EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
VT.getVectorNumElements() / 2);
// Madd vector size is half of the original vector size
auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops);
};
auto BuildPMADDWD = [&](SDValue Mul) {
// Shrink the operands of mul.
SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(0));
SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(1));
SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
PMADDWDBuilder);
// Fill the rest of the output with 0
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd,
DAG.getConstant(0, DL, MAddVT));
};
Op0 = BuildPMADDWD(Op0);
// It's possible that Op1 is also a mul we can reduce.
if (Op1.getOpcode() == ISD::MUL &&
canReduceVMulWidth(Op1.getNode(), DAG, Mode) && Mode != MULU16) {
Op1 = BuildPMADDWD(Op1);
}
return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
}
static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// TODO: There's nothing special about i32, any integer type above i16 should
// work just as well.
if (!VT.isVector() || !VT.isSimple() ||
!(VT.getVectorElementType() == MVT::i32))
return SDValue();
unsigned RegSize = 128;
if (Subtarget.useBWIRegs())
RegSize = 512;
else if (Subtarget.hasAVX())
RegSize = 256;
// We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
// TODO: We should be able to handle larger vectors by splitting them before
// feeding them into several SADs, and then reducing over those.
if (VT.getSizeInBits() / 4 > RegSize)
return SDValue();
// We know N is a reduction add, which means one of its operands is a phi.
// To match SAD, we need the other operand to be a vector select.
if (Op0.getOpcode() != ISD::VSELECT)
std::swap(Op0, Op1);
if (Op0.getOpcode() != ISD::VSELECT)
return SDValue();
auto BuildPSADBW = [&](SDValue Op0, SDValue Op1) {
// SAD pattern detected. Now build a SAD instruction and an addition for
// reduction. Note that the number of elements of the result of SAD is less
// than the number of elements of its input. Therefore, we could only update
// part of elements in the reduction vector.
SDValue Sad = createPSADBW(DAG, Op0, Op1, DL, Subtarget);
// The output of PSADBW is a vector of i64.
// We need to turn the vector of i64 into a vector of i32.
// If the reduction vector is at least as wide as the psadbw result, just
// bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
// anyway.
MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
if (VT.getSizeInBits() >= ResVT.getSizeInBits())
Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
else
Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
// Fill the upper elements with zero to match the add width.
SDValue Zero = DAG.getConstant(0, DL, VT);
Sad = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Zero, Sad,
DAG.getIntPtrConstant(0, DL));
}
return Sad;
};
// Check whether we have an abs-diff pattern feeding into the select.
SDValue SadOp0, SadOp1;
if (!detectZextAbsDiff(Op0, SadOp0, SadOp1))
return SDValue();
Op0 = BuildPSADBW(SadOp0, SadOp1);
// It's possible we have a sad on the other side too.
if (Op1.getOpcode() == ISD::VSELECT &&
detectZextAbsDiff(Op1, SadOp0, SadOp1)) {
Op1 = BuildPSADBW(SadOp0, SadOp1);
}
return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
}
/// Convert vector increment or decrement to sub/add with an all-ones constant:
/// add X, <1, 1...> --> sub X, <-1, -1...>
/// sub X, <1, 1...> --> add X, <-1, -1...>
/// The all-ones vector constant can be materialized using a pcmpeq instruction
/// that is commonly recognized as an idiom (has no register dependency), so
/// that's better/smaller than loading a splat 1 constant.
static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Unexpected opcode for increment/decrement transform");
// Pseudo-legality check: getOnesVector() expects one of these types, so bail
// out and wait for legalization if we have an unsupported vector length.
EVT VT = N->getValueType(0);
if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
return SDValue();
APInt SplatVal;
if (!isConstantSplat(N->getOperand(1), SplatVal) || !SplatVal.isOneValue())
return SDValue();
SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N));
unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec);
}
static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
const SDLoc &DL, EVT VT,
const X86Subtarget &Subtarget) {
// Example of pattern we try to detect:
// t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
//(add (build_vector (extract_elt t, 0),
// (extract_elt t, 2),
// (extract_elt t, 4),
// (extract_elt t, 6)),
// (build_vector (extract_elt t, 1),
// (extract_elt t, 3),
// (extract_elt t, 5),
// (extract_elt t, 7)))
if (!Subtarget.hasSSE2())
return SDValue();
if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
Op1.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
VT.getVectorNumElements() < 4 ||
!isPowerOf2_32(VT.getVectorNumElements()))
return SDValue();
// Check if one of Op0,Op1 is of the form:
// (build_vector (extract_elt Mul, 0),
// (extract_elt Mul, 2),
// (extract_elt Mul, 4),
// ...
// the other is of the form:
// (build_vector (extract_elt Mul, 1),
// (extract_elt Mul, 3),
// (extract_elt Mul, 5),
// ...
// and identify Mul.
SDValue Mul;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
// TODO: Be more tolerant to undefs.
if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
if (!Const0L || !Const1L || !Const0H || !Const1H)
return SDValue();
unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
// Commutativity of mul allows factors of a product to reorder.
if (Idx0L > Idx1L)
std::swap(Idx0L, Idx1L);
if (Idx0H > Idx1H)
std::swap(Idx0H, Idx1H);
// Commutativity of add allows pairs of factors to reorder.
if (Idx0L > Idx0H) {
std::swap(Idx0L, Idx0H);
std::swap(Idx1L, Idx1H);
}
if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
Idx1H != 2 * i + 3)
return SDValue();
if (!Mul) {
// First time an extract_elt's source vector is visited. Must be a MUL
// with 2X number of vector elements than the BUILD_VECTOR.
// Both extracts must be from same MUL.
Mul = Op0L->getOperand(0);
if (Mul->getOpcode() != ISD::MUL ||
Mul.getValueType().getVectorNumElements() != 2 * e)
return SDValue();
}
// Check that the extract is from the same MUL previously seen.
if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
return SDValue();
}
// Check if the Mul source can be safely shrunk.
ShrinkMode Mode;
if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
return SDValue();
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i32 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements() / 2);
EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
InVT.getVectorNumElements());
return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Mul.getOperand(0), Mul.getOperand(1) },
PMADDBuilder);
}
// Try to turn (add (umax X, C), -C) into (psubus X, C)
static SDValue combineAddToSUBUS(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
EVT VT = N->getValueType(0);
// psubus is available in SSE2 for i8 and i16 vectors.
if (!VT.isVector() || VT.getVectorNumElements() < 2 ||
!isPowerOf2_32(VT.getVectorNumElements()) ||
!(VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16))
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (Op0.getOpcode() != ISD::UMAX)
return SDValue();
// The add should have a constant that is the negative of the max.
// TODO: Handle build_vectors with undef elements.
auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) {
return Max->getAPIntValue() == (-Op->getAPIntValue());
};
if (!ISD::matchBinaryPredicate(Op0.getOperand(1), Op1, MatchUSUBSAT))
return SDValue();
SDLoc DL(N);
return DAG.getNode(ISD::USUBSAT, DL, VT, Op0.getOperand(0),
Op0.getOperand(1));
}
// Attempt to turn this pattern into PMADDWD.
// (mul (add (zext (build_vector)), (zext (build_vector))),
// (add (zext (build_vector)), (zext (build_vector)))
static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
const SDLoc &DL, EVT VT,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
return SDValue();
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
VT.getVectorNumElements() < 4 ||
!isPowerOf2_32(VT.getVectorNumElements()))
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
// All inputs need to be sign extends.
// TODO: Support ZERO_EXTEND from known positive?
if (N00.getOpcode() != ISD::SIGN_EXTEND ||
N01.getOpcode() != ISD::SIGN_EXTEND ||
N10.getOpcode() != ISD::SIGN_EXTEND ||
N11.getOpcode() != ISD::SIGN_EXTEND)
return SDValue();
// Peek through the extends.
N00 = N00.getOperand(0);
N01 = N01.getOperand(0);
N10 = N10.getOperand(0);
N11 = N11.getOperand(0);
// Must be extending from vXi16.
EVT InVT = N00.getValueType();
if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
N10.getValueType() != InVT || N11.getValueType() != InVT)
return SDValue();
// All inputs should be build_vectors.
if (N00.getOpcode() != ISD::BUILD_VECTOR ||
N01.getOpcode() != ISD::BUILD_VECTOR ||
N10.getOpcode() != ISD::BUILD_VECTOR ||
N11.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
// For each element, we need to ensure we have an odd element from one vector
// multiplied by the odd element of another vector and the even element from
// one of the same vectors being multiplied by the even element from the
// other vector. So we need to make sure for each element i, this operator
// is being performed:
// A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
SDValue In0, In1;
for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
SDValue N00Elt = N00.getOperand(i);
SDValue N01Elt = N01.getOperand(i);
SDValue N10Elt = N10.getOperand(i);
SDValue N11Elt = N11.getOperand(i);
// TODO: Be more tolerant to undefs.
if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
return SDValue();
unsigned IdxN00 = ConstN00Elt->getZExtValue();
unsigned IdxN01 = ConstN01Elt->getZExtValue();
unsigned IdxN10 = ConstN10Elt->getZExtValue();
unsigned IdxN11 = ConstN11Elt->getZExtValue();
// Add is commutative so indices can be reordered.
if (IdxN00 > IdxN10) {
std::swap(IdxN00, IdxN10);
std::swap(IdxN01, IdxN11);
}
// N0 indices be the even element. N1 indices must be the next odd element.
if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
return SDValue();
SDValue N00In = N00Elt.getOperand(0);
SDValue N01In = N01Elt.getOperand(0);
SDValue N10In = N10Elt.getOperand(0);
SDValue N11In = N11Elt.getOperand(0);
// First time we find an input capture it.
if (!In0) {
In0 = N00In;
In1 = N01In;
}
// Mul is commutative so the input vectors can be in any order.
// Canonicalize to make the compares easier.
if (In0 != N00In)
std::swap(N00In, N01In);
if (In0 != N10In)
std::swap(N10In, N11In);
if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
return SDValue();
}
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i16 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements() / 2);
return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
PMADDBuilder);
}
static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
const SDNodeFlags Flags = N->getFlags();
if (Flags.hasVectorReduction()) {
if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
return Sad;
if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
return MAdd;
}
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
return MAdd;
if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
return MAdd;
// Try to synthesize horizontal adds from adds of shuffles.
if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
VT == MVT::v8i32) &&
Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, true) &&
shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
HADDBuilder);
}
if (SDValue V = combineIncDecVector(N, DAG))
return V;
if (SDValue V = combineAddToSUBUS(N, DAG, Subtarget))
return V;
return combineAddOrSubToADCOrSBB(N, DAG);
}
static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// PSUBUS is supported, starting from SSE2, but truncation for v8i32
// is only worth it with SSSE3 (PSHUFB).
if (!(Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) &&
!(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
!(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)) &&
!(Subtarget.useBWIRegs() && (VT == MVT::v64i8 || VT == MVT::v32i16 ||
VT == MVT::v16i32 || VT == MVT::v8i64)))
return SDValue();
SDValue SubusLHS, SubusRHS;
// Try to find umax(a,b) - b or a - umin(a,b) patterns
// they may be converted to subus(a,b).
// TODO: Need to add IR canonicalization for this code.
if (Op0.getOpcode() == ISD::UMAX) {
SubusRHS = Op1;
SDValue MaxLHS = Op0.getOperand(0);
SDValue MaxRHS = Op0.getOperand(1);
if (MaxLHS == Op1)
SubusLHS = MaxRHS;
else if (MaxRHS == Op1)
SubusLHS = MaxLHS;
else
return SDValue();
} else if (Op1.getOpcode() == ISD::UMIN) {
SubusLHS = Op0;
SDValue MinLHS = Op1.getOperand(0);
SDValue MinRHS = Op1.getOperand(1);
if (MinLHS == Op0)
SubusRHS = MinRHS;
else if (MinRHS == Op0)
SubusRHS = MinLHS;
else
return SDValue();
} else
return SDValue();
auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops);
};
// PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
// special preprocessing in some cases.
if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64)
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
{ SubusLHS, SubusRHS }, USUBSATBuilder);
// Special preprocessing case can be only applied
// if the value was zero extended from 16 bit,
// so we require first 16 bits to be zeros for 32 bit
// values, or first 48 bits for 64 bit values.
KnownBits Known = DAG.computeKnownBits(SubusLHS);
unsigned NumZeros = Known.countMinLeadingZeros();
if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
return SDValue();
EVT ExtType = SubusLHS.getValueType();
EVT ShrinkedType;
if (VT == MVT::v8i32 || VT == MVT::v8i64)
ShrinkedType = MVT::v8i16;
else
ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
// If SubusLHS is zeroextended - truncate SubusRHS to it's
// size SubusRHS = umin(0xFFF.., SubusRHS).
SDValue SaturationConst =
DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
ShrinkedType.getScalarSizeInBits()),
SDLoc(SubusLHS), ExtType);
SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
SaturationConst);
SDValue NewSubusLHS =
DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
SDValue Psubus =
SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType,
{ NewSubusLHS, NewSubusRHS }, USUBSATBuilder);
// Zero extend the result, it may be used somewhere as 32 bit,
// if not zext and following trunc will shrink.
return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
}
static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// X86 can't encode an immediate LHS of a sub. See if we can push the
// negation into a preceding instruction.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
// If the RHS of the sub is a XOR with one use and a constant, invert the
// immediate. Then add one to the LHS of the sub so we can turn
// X-Y -> X+~Y+1, saving one register.
if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
isa<ConstantSDNode>(Op1.getOperand(1))) {
APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
EVT VT = Op0.getValueType();
SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
Op1.getOperand(0),
DAG.getConstant(~XorC, SDLoc(Op1), VT));
return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
}
}
// Try to synthesize horizontal subs from subs of shuffles.
EVT VT = N->getValueType(0);
if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
VT == MVT::v8i32) &&
Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, false) &&
shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
HSUBBuilder);
}
if (SDValue V = combineIncDecVector(N, DAG))
return V;
// Try to create PSUBUS if SUB's argument is max/min
if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
return V;
return combineAddOrSubToADCOrSBB(N, DAG);
}
static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
if (N->getOperand(0) == N->getOperand(1)) {
if (N->getOpcode() == X86ISD::PCMPEQ)
return DAG.getConstant(-1, DL, VT);
if (N->getOpcode() == X86ISD::PCMPGT)
return DAG.getConstant(0, DL, VT);
}
return SDValue();
}
static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
MVT OpVT = N->getSimpleValueType(0);
bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
SDLoc dl(N);
SDValue Vec = N->getOperand(0);
SDValue SubVec = N->getOperand(1);
unsigned IdxVal = N->getConstantOperandVal(2);
MVT SubVecVT = SubVec.getSimpleValueType();
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
// Inserting zeros into zeros is a nop.
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
return getZeroVector(OpVT, Subtarget, DAG, dl);
// If we're inserting into a zero vector and then into a larger zero vector,
// just insert into the larger zero vector directly.
if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
unsigned Idx2Val = SubVec.getConstantOperandVal(2);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl),
SubVec.getOperand(1),
DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
}
// If we're inserting into a zero vector and our input was extracted from an
// insert into a zero vector of the same type and the extraction was at
// least as large as the original insertion. Just insert the original
// subvector into a zero vector.
if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
SubVec.getConstantOperandVal(1) == 0 &&
SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
SDValue Ins = SubVec.getOperand(0);
if (Ins.getConstantOperandVal(2) == 0 &&
ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl),
Ins.getOperand(1), N->getOperand(2));
}
// If we're inserting a bitcast into zeros, rewrite the insert and move the
// bitcast to the other side. This helps with detecting zero extending
// during isel.
// TODO: Is this useful for other indices than 0?
if (!IsI1Vector && SubVec.getOpcode() == ISD::BITCAST && IdxVal == 0) {
MVT CastVT = SubVec.getOperand(0).getSimpleValueType();
unsigned NumElems = OpVT.getSizeInBits() / CastVT.getScalarSizeInBits();
MVT NewVT = MVT::getVectorVT(CastVT.getVectorElementType(), NumElems);
SDValue Insert = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
DAG.getBitcast(NewVT, Vec),
SubVec.getOperand(0), N->getOperand(2));
return DAG.getBitcast(OpVT, Insert);
}
}
// Stop here if this is an i1 vector.
if (IsI1Vector)
return SDValue();
// If this is an insert of an extract, combine to a shuffle. Don't do this
// if the insert or extract can be represented with a subregister operation.
if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
SubVec.getOperand(0).getSimpleValueType() == OpVT &&
(IdxVal != 0 || !Vec.isUndef())) {
int ExtIdxVal = SubVec.getConstantOperandVal(1);
if (ExtIdxVal != 0) {
int VecNumElts = OpVT.getVectorNumElements();
int SubVecNumElts = SubVecVT.getVectorNumElements();
SmallVector<int, 64> Mask(VecNumElts);
// First create an identity shuffle mask.
for (int i = 0; i != VecNumElts; ++i)
Mask[i] = i;
// Now insert the extracted portion.
for (int i = 0; i != SubVecNumElts; ++i)
Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
}
}
// Fold two 16-byte or 32-byte subvector loads into one 32-byte or 64-byte
// load:
// (insert_subvector (insert_subvector undef, (load16 addr), 0),
// (load16 addr + 16), Elts/2)
// --> load32 addr
// or:
// (insert_subvector (insert_subvector undef, (load32 addr), 0),
// (load32 addr + 32), Elts/2)
// --> load64 addr
// or a 16-byte or 32-byte broadcast:
// (insert_subvector (insert_subvector undef, (load16 addr), 0),
// (load16 addr), Elts/2)
// --> X86SubVBroadcast(load16 addr)
// or:
// (insert_subvector (insert_subvector undef, (load32 addr), 0),
// (load32 addr), Elts/2)
// --> X86SubVBroadcast(load32 addr)
if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2) {
if (isNullConstant(Vec.getOperand(2))) {
SDValue SubVec2 = Vec.getOperand(1);
// If needed, look through bitcasts to get to the load.
if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(SubVec2))) {
bool Fast;
unsigned Alignment = FirstLd->getAlignment();
unsigned AS = FirstLd->getAddressSpace();
const X86TargetLowering *TLI = Subtarget.getTargetLowering();
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
OpVT, AS, Alignment, &Fast) && Fast) {
SDValue Ops[] = {SubVec2, SubVec};
if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG,
Subtarget, false))
return Ld;
}
}
// If lower/upper loads are the same and there's no other use of the lower
// load, then splat the loaded value with a broadcast.
if (auto *Ld = dyn_cast<LoadSDNode>(peekThroughOneUseBitcasts(SubVec2)))
if (SubVec2 == SubVec && ISD::isNormalLoad(Ld) && Vec.hasOneUse())
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
// If this is subv_broadcast insert into both halves, use a larger
// subv_broadcast.
if (SubVec.getOpcode() == X86ISD::SUBV_BROADCAST && SubVec == SubVec2)
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT,
SubVec.getOperand(0));
// If we're inserting all zeros into the upper half, change this to
// an insert into an all zeros vector. We will match this to a move
// with implicit upper bit zeroing during isel.
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl), SubVec2,
Vec.getOperand(2));
// If we are inserting into both halves of the vector, the starting
// vector should be undef. If it isn't, make it so. Only do this if the
// the early insert has no other uses.
// TODO: Should this be a generic DAG combine?
if (!Vec.getOperand(0).isUndef() && Vec.hasOneUse()) {
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT),
SubVec2, Vec.getOperand(2));
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec, SubVec,
N->getOperand(2));
}
}
}
return SDValue();
}
static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// For AVX1 only, if we are extracting from a 256-bit and+not (which will
// eventually get combined/lowered into ANDNP) with a concatenated operand,
// split the 'and' into 128-bit ops to avoid the concatenate and extract.
// We let generic combining take over from there to simplify the
// insert/extract and 'not'.
// This pattern emerges during AVX1 legalization. We handle it before lowering
// to avoid complications like splitting constant vector loads.
// Capture the original wide type in the likely case that we need to bitcast
// back to this type.
EVT VT = N->getValueType(0);
EVT WideVecVT = N->getOperand(0).getValueType();
SDValue WideVec = peekThroughBitcasts(N->getOperand(0));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
TLI.isTypeLegal(WideVecVT) &&
WideVecVT.getSizeInBits() == 256 && WideVec.getOpcode() == ISD::AND) {
auto isConcatenatedNot = [] (SDValue V) {
V = peekThroughBitcasts(V);
if (!isBitwiseNot(V))
return false;
SDValue NotOp = V->getOperand(0);
return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
};
if (isConcatenatedNot(WideVec.getOperand(0)) ||
isConcatenatedNot(WideVec.getOperand(1))) {
// extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
SDValue Concat = split256IntArith(WideVec, DAG);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
DAG.getBitcast(WideVecVT, Concat), N->getOperand(1));
}
}
if (DCI.isBeforeLegalizeOps())
return SDValue();
MVT OpVT = N->getSimpleValueType(0);
SDValue InVec = N->getOperand(0);
unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
if (ISD::isBuildVectorAllZeros(InVec.getNode()))
return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
if (OpVT.getScalarType() == MVT::i1)
return DAG.getConstant(1, SDLoc(N), OpVT);
return getOnesVector(OpVT, DAG, SDLoc(N));
}
if (InVec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getBuildVector(
OpVT, SDLoc(N),
InVec.getNode()->ops().slice(IdxVal, OpVT.getVectorNumElements()));
// If we're extracting the lowest subvector and we're the only user,
// we may be able to perform this with a smaller vector width.
if (IdxVal == 0 && InVec.hasOneUse()) {
unsigned InOpcode = InVec.getOpcode();
if (OpVT == MVT::v2f64 && InVec.getValueType() == MVT::v4f64) {
// v2f64 CVTDQ2PD(v4i32).
if (InOpcode == ISD::SINT_TO_FP &&
InVec.getOperand(0).getValueType() == MVT::v4i32) {
return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), OpVT, InVec.getOperand(0));
}
// v2f64 CVTPS2PD(v4f32).
if (InOpcode == ISD::FP_EXTEND &&
InVec.getOperand(0).getValueType() == MVT::v4f32) {
return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), OpVT, InVec.getOperand(0));
}
}
if ((InOpcode == ISD::ZERO_EXTEND || InOpcode == ISD::SIGN_EXTEND) &&
OpVT.is128BitVector() &&
InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
unsigned ExtOp =
InOpcode == ISD::ZERO_EXTEND ? ISD::ZERO_EXTEND_VECTOR_INREG
: ISD::SIGN_EXTEND_VECTOR_INREG;
return DAG.getNode(ExtOp, SDLoc(N), OpVT, InVec.getOperand(0));
}
if ((InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
OpVT.is128BitVector() &&
InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
return DAG.getNode(InOpcode, SDLoc(N), OpVT, InVec.getOperand(0));
}
if (InOpcode == ISD::BITCAST) {
// TODO - do this for target shuffles in general.
SDValue InVecBC = peekThroughOneUseBitcasts(InVec);
if (InVecBC.getOpcode() == X86ISD::PSHUFB && OpVT.is128BitVector()) {
SDLoc DL(N);
SDValue SubPSHUFB =
DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
extract128BitVector(InVecBC.getOperand(0), 0, DAG, DL),
extract128BitVector(InVecBC.getOperand(1), 0, DAG, DL));
return DAG.getBitcast(OpVT, SubPSHUFB);
}
}
}
return SDValue();
}
static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
// If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
// This occurs frequently in our masked scalar intrinsic code and our
// floating point select lowering with AVX512.
// TODO: SimplifyDemandedBits instead?
if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
if (C->getAPIntValue().isOneValue())
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), MVT::v1i1,
Src.getOperand(0));
// Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
if (C->isNullValue())
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
Src.getOperand(0), Src.getOperand(1));
return SDValue();
}
// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
// Canonicalize constant to RHS.
if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
!DAG.isConstantIntBuildVectorOrConstantInt(RHS))
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
// Multiply by zero.
if (ISD::isBuildVectorAllZeros(RHS.getNode()))
return RHS;
// Aggressively peek through ops to get at the demanded low bits.
APInt DemandedMask = APInt::getLowBitsSet(64, 32);
SDValue DemandedLHS = DAG.GetDemandedBits(LHS, DemandedMask);
SDValue DemandedRHS = DAG.GetDemandedBits(RHS, DemandedMask);
if (DemandedLHS || DemandedRHS)
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
DemandedLHS ? DemandedLHS : LHS,
DemandedRHS ? DemandedRHS : RHS);
// PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
return SDValue(N, 0);
return SDValue();
}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
case ISD::SCALAR_TO_VECTOR:
return combineScalarToVector(N, DAG);
case ISD::EXTRACT_VECTOR_ELT:
case X86ISD::PEXTRW:
case X86ISD::PEXTRB:
return combineExtractVectorElt(N, DAG, DCI, Subtarget);
case ISD::INSERT_SUBVECTOR:
return combineInsertSubvector(N, DAG, DCI, Subtarget);
case ISD::EXTRACT_SUBVECTOR:
return combineExtractSubvector(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
case ISD::SELECT:
case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
case X86ISD::CMP: return combineCMP(N, DAG);
case ISD::ADD: return combineAdd(N, DAG, Subtarget);
case ISD::SUB: return combineSub(N, DAG, Subtarget);
case X86ISD::SBB: return combineSBB(N, DAG);
case X86ISD::ADC: return combineADC(N, DAG, DCI);
case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
case ISD::STORE: return combineStore(N, DAG, Subtarget);
case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
case ISD::FADD:
case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
case X86ISD::FXOR:
case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
case X86ISD::FMIN:
case X86ISD::FMAX: return combineFMinFMax(N, DAG);
case ISD::FMINNUM:
case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
case X86ISD::CVTSI2P:
case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
case X86ISD::BT: return combineBT(N, DAG, DCI);
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
case X86ISD::PACKSS:
case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
case X86ISD::VSHL:
case X86ISD::VSRA:
case X86ISD::VSRL:
return combineVectorShiftVar(N, DAG, DCI, Subtarget);
case X86ISD::VSHLI:
case X86ISD::VSRAI:
case X86ISD::VSRLI:
return combineVectorShiftImm(N, DAG, DCI, Subtarget);
case X86ISD::PINSRB:
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::INSERTPS:
case X86ISD::EXTRQI:
case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
case X86ISD::BLENDI:
case X86ISD::UNPCKH:
case X86ISD::UNPCKL:
case X86ISD::MOVHLPS:
case X86ISD::MOVLHPS:
case X86ISD::PSHUFB:
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
case X86ISD::VBROADCAST:
case X86ISD::VPPERM:
case X86ISD::VPERMI:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
case X86ISD::VPERMIL2:
case X86ISD::VPERMILPI:
case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
case X86ISD::SHUF128:
case X86ISD::VZEXT_MOVL:
case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
case X86ISD::FMADD_RND:
case X86ISD::FMSUB:
case X86ISD::FMSUB_RND:
case X86ISD::FNMADD:
case X86ISD::FNMADD_RND:
case X86ISD::FNMSUB:
case X86ISD::FNMSUB_RND:
case ISD::FMA: return combineFMA(N, DAG, Subtarget);
case X86ISD::FMADDSUB_RND:
case X86ISD::FMSUBADD_RND:
case X86ISD::FMADDSUB:
case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI);
case X86ISD::MGATHER:
case X86ISD::MSCATTER:
case ISD::MGATHER:
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
}
return SDValue();
}
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
if (!isTypeLegal(VT))
return false;
// There are no vXi8 shifts.
if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
return false;
// 8-bit multiply is probably not much cheaper than 32-bit multiply, and
// we have specializations to turn 32-bit multiply into LEA or other ops.
// Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
// check for a constant operand to the multiply.
if (Opc == ISD::MUL && VT == MVT::i8)
return false;
// i16 instruction encodings are longer and some i16 instructions are slow,
// so those are not desirable.
if (VT == MVT::i16) {
switch (Opc) {
default:
break;
case ISD::LOAD:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
case ISD::SHL:
case ISD::SRL:
case ISD::SUB:
case ISD::ADD:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
return false;
}
}
// Any legal type not explicitly accounted for above here is desirable.
return true;
}
SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
SDValue Value, SDValue Addr,
SelectionDAG &DAG) const {
const Module *M = DAG.getMachineFunction().getMMI().getModule();
Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
if (IsCFProtectionSupported) {
// In case control-flow branch protection is enabled, we need to add
// notrack prefix to the indirect branch.
// In order to do that we create NT_BRIND SDNode.
// Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
}
return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
}
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
EVT VT = Op.getValueType();
bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
isa<ConstantSDNode>(Op.getOperand(1));
// i16 is legal, but undesirable since i16 instruction encodings are longer
// and some i16 instructions are slow.
// 8-bit multiply-by-constant can usually be expanded to something cheaper
// using LEA and/or other ALU ops.
if (VT != MVT::i16 && !Is8BitMulByConstant)
return false;
auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
if (!Op.hasOneUse())
return false;
SDNode *User = *Op->use_begin();
if (!ISD::isNormalStore(User))
return false;
auto *Ld = cast<LoadSDNode>(Load);
auto *St = cast<StoreSDNode>(User);
return Ld->getBasePtr() == St->getBasePtr();
};
auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
return false;
if (!Op.hasOneUse())
return false;
SDNode *User = *Op->use_begin();
if (User->getOpcode() != ISD::ATOMIC_STORE)
return false;
auto *Ld = cast<AtomicSDNode>(Load);
auto *St = cast<AtomicSDNode>(User);
return Ld->getBasePtr() == St->getBasePtr();
};
bool Commute = false;
switch (Op.getOpcode()) {
default: return false;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
break;
case ISD::SHL:
case ISD::SRL: {
SDValue N0 = Op.getOperand(0);
// Look out for (store (shl (load), x)).
if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
return false;
break;
}
case ISD::ADD:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
Commute = true;
LLVM_FALLTHROUGH;
case ISD::SUB: {
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
// Avoid disabling potential load folding opportunities.
if (MayFoldLoad(N1) &&
(!Commute || !isa<ConstantSDNode>(N0) ||
(Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
return false;
if (MayFoldLoad(N0) &&
((Commute && !isa<ConstantSDNode>(N1)) ||
(Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
return false;
if (IsFoldableAtomicRMW(N0, Op) ||
(Commute && IsFoldableAtomicRMW(N1, Op)))
return false;
}
}
PVT = MVT::i32;
return true;
}
bool X86TargetLowering::
isDesirableToCombineBuildVectorToShuffleTruncate(
ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
"Element count mismatch");
assert(
Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
"Shuffle Mask expected to be legal");
// For 32-bit elements VPERMD is better than shuffle+truncate.
// TODO: After we improve lowerBuildVector, add execption for VPERMW.
if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
return false;
if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
return false;
return true;
}
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
// Helper to match a string separated by whitespace.
static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
for (StringRef Piece : Pieces) {
if (!S.startswith(Piece)) // Check if the piece matches.
return false;
S = S.substr(Piece.size());
StringRef::size_type Pos = S.find_first_not_of(" \t");
if (Pos == 0) // We matched a prefix.
return false;
S = S.substr(Pos);
}
return S.empty();
}
static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
if (AsmPieces.size() == 3)
return true;
else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
return true;
}
}
return false;
}
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
const std::string &AsmStr = IA->getAsmString();
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
// TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
SmallVector<StringRef, 4> AsmPieces;
SplitString(AsmStr, AsmPieces, ";\n");
switch (AsmPieces.size()) {
default: return false;
case 1:
// FIXME: this should verify that we are targeting a 486 or better. If not,
// we will turn this bswap into something that will be lowered to logical
// ops instead of emitting the bswap asm. For now, we don't support 486 or
// lower so don't worry about this.
// bswap $0
if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
// No need to check constraints, nothing other than the equivalent of
// "=r,0" would be valid here.
return IntrinsicLowering::LowerToByteSwap(CI);
}
// rorw $$8, ${0:w} --> llvm.bswap.i16
if (CI->getType()->isIntegerTy(16) &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
(matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
AsmPieces.clear();
StringRef ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
if (clobbersFlagRegisters(AsmPieces))
return IntrinsicLowering::LowerToByteSwap(CI);
}
break;
case 3:
if (CI->getType()->isIntegerTy(32) &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
AsmPieces.clear();
StringRef ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
if (clobbersFlagRegisters(AsmPieces))
return IntrinsicLowering::LowerToByteSwap(CI);
}
if (CI->getType()->isIntegerTy(64)) {
InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
if (Constraints.size() >= 2 &&
Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
// bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
return IntrinsicLowering::LowerToByteSwap(CI);
}
}
break;
}
return false;
}
/// Given a constraint letter, return the type of constraint for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'R':
case 'q':
case 'Q':
case 'f':
case 't':
case 'u':
case 'y':
case 'x':
case 'v':
case 'Y':
case 'l':
case 'k': // AVX512 masking registers.
return C_RegisterClass;
case 'a':
case 'b':
case 'c':
case 'd':
case 'S':
case 'D':
case 'A':
return C_Register;
case 'I':
case 'J':
case 'K':
case 'L':
case 'M':
case 'N':
case 'G':
case 'C':
case 'e':
case 'Z':
return C_Other;
default:
break;
}
}
else if (Constraint.size() == 2) {
switch (Constraint[0]) {
default:
break;
case 'Y':
switch (Constraint[1]) {
default:
break;
case 'z':
case '0':
return C_Register;
case 'i':
case 'm':
case 'k':
case 't':
case '2':
return C_RegisterClass;
}
}
}
return TargetLowering::getConstraintType(Constraint);
}
/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
X86TargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const {
ConstraintWeight weight = CW_Invalid;
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
LLVM_FALLTHROUGH;
case 'R':
case 'q':
case 'Q':
case 'a':
case 'b':
case 'c':
case 'd':
case 'S':
case 'D':
case 'A':
if (CallOperandVal->getType()->isIntegerTy())
weight = CW_SpecificReg;
break;
case 'f':
case 't':
case 'u':
if (type->isFloatingPointTy())
weight = CW_SpecificReg;
break;
case 'y':
if (type->isX86_MMXTy() && Subtarget.hasMMX())
weight = CW_SpecificReg;
break;
case 'Y': {
unsigned Size = StringRef(constraint).size();
// Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
char NextChar = Size == 2 ? constraint[1] : 'i';
if (Size > 2)
break;
switch (NextChar) {
default:
return CW_Invalid;
// XMM0
case 'z':
case '0':
if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
return CW_SpecificReg;
return CW_Invalid;
// Conditional OpMask regs (AVX512)
case 'k':
if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
return CW_Register;
return CW_Invalid;
// Any MMX reg
case 'm':
if (type->isX86_MMXTy() && Subtarget.hasMMX())
return weight;
return CW_Invalid;
// Any SSE reg when ISA >= SSE2, same as 'Y'
case 'i':
case 't':
case '2':
if (!Subtarget.hasSSE2())
return CW_Invalid;
break;
}
// Fall through (handle "Y" constraint).
LLVM_FALLTHROUGH;
}
case 'v':
if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
weight = CW_Register;
LLVM_FALLTHROUGH;
case 'x':
if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
weight = CW_Register;
break;
case 'k':
// Enable conditional vector operations using %k<#> registers.
if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
weight = CW_Register;
break;
case 'I':
if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
if (C->getZExtValue() <= 31)
weight = CW_Constant;
}
break;
case 'J':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 63)
weight = CW_Constant;
}
break;
case 'K':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
weight = CW_Constant;
}
break;
case 'L':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
weight = CW_Constant;
}
break;
case 'M':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 3)
weight = CW_Constant;
}
break;
case 'N':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 0xff)
weight = CW_Constant;
}
break;
case 'G':
case 'C':
if (isa<ConstantFP>(CallOperandVal)) {
weight = CW_Constant;
}
break;
case 'e':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getSExtValue() >= -0x80000000LL) &&
(C->getSExtValue() <= 0x7fffffffLL))
weight = CW_Constant;
}
break;
case 'Z':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 0xffffffff)
weight = CW_Constant;
}
break;
}
return weight;
}
/// Try to replace an X constraint, which matches anything, with another that
/// has more specific requirements based on the type of the corresponding
/// operand.
const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
if (Subtarget.hasSSE2())
return "Y";
if (Subtarget.hasSSE1())
return "x";
}
return TargetLowering::LowerXConstraint(ConstraintVT);
}
/// Lower the specified operand into the Ops vector.
/// If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1) return;
char ConstraintLetter = Constraint[0];
switch (ConstraintLetter) {
default: break;
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 31) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'J':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 63) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'K':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (isInt<8>(C->getSExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'L':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
(Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'M':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 3) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 255) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'O':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 127) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
// FIXME gcc accepts some relocatable values here too, but only in certain
// memory models; it's complicated.
}
return;
}
case 'Z': {
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
// FIXME gcc accepts some relocatable values here too, but only in certain
// memory models; it's complicated.
return;
}
case 'i': {
// Literal immediates are always ok.
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
// In any sort of PIC mode addresses need to be computed at runtime by
// adding in a register or some sort of table lookup. These can't
// be used as immediates.
if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
return;
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
GlobalAddressSDNode *GA = nullptr;
int64_t Offset = 0;
// Match either (GA), (GA+C), (GA+C1+C2), etc.
while (1) {
if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
Offset += GA->getOffset();
break;
} else if (Op.getOpcode() == ISD::ADD) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
Offset += C->getZExtValue();
Op = Op.getOperand(0);
continue;
}
} else if (Op.getOpcode() == ISD::SUB) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
Offset += -C->getZExtValue();
Op = Op.getOperand(0);
continue;
}
}
// Otherwise, this isn't something we can handle, reject it.
return;
}
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
return;
Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
GA->getValueType(0), Offset);
break;
}
}
if (Result.getNode()) {
Ops.push_back(Result);
return;
}
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
/// Check if \p RC is a general purpose register class.
/// I.e., GR* or one of their variant.
static bool isGRClass(const TargetRegisterClass &RC) {
return RC.hasSuperClassEq(&X86::GR8RegClass) ||
RC.hasSuperClassEq(&X86::GR16RegClass) ||
RC.hasSuperClassEq(&X86::GR32RegClass) ||
RC.hasSuperClassEq(&X86::GR64RegClass) ||
RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
}
/// Check if \p RC is a vector register class.
/// I.e., FR* / VR* or one of their variant.
static bool isFRClass(const TargetRegisterClass &RC) {
return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
RC.hasSuperClassEq(&X86::FR64XRegClass) ||
RC.hasSuperClassEq(&X86::VR128XRegClass) ||
RC.hasSuperClassEq(&X86::VR256XRegClass) ||
RC.hasSuperClassEq(&X86::VR512RegClass);
}
std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
MVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
default: break;
// TODO: Slight differences here in allocation order and leaving
// RIP in the class. Do they matter any more here than they do
// in the normal allocation?
case 'k':
if (Subtarget.hasAVX512()) {
// Only supported in AVX512 or later.
switch (VT.SimpleTy) {
default: break;
case MVT::i32:
return std::make_pair(0U, &X86::VK32RegClass);
case MVT::i16:
return std::make_pair(0U, &X86::VK16RegClass);
case MVT::i8:
return std::make_pair(0U, &X86::VK8RegClass);
case MVT::i1:
return std::make_pair(0U, &X86::VK1RegClass);
case MVT::i64:
return std::make_pair(0U, &X86::VK64RegClass);
}
}
break;
case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
if (Subtarget.is64Bit()) {
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i64 || VT == MVT::f64)
return std::make_pair(0U, &X86::GR64RegClass);
break;
}
LLVM_FALLTHROUGH;
// 32-bit fallthrough
case 'Q': // Q_REGS
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32_ABCDRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_ABCDRegClass);
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
if (VT == MVT::i64)
return std::make_pair(0U, &X86::GR64_ABCDRegClass);
break;
case 'r': // GENERAL_REGS
case 'l': // INDEX_REGS
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32RegClass);
return std::make_pair(0U, &X86::GR64RegClass);
case 'R': // LEGACY_REGS
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8_NOREXRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_NOREXRegClass);
if (VT == MVT::i32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32_NOREXRegClass);
return std::make_pair(0U, &X86::GR64_NOREXRegClass);
case 'f': // FP Stack registers.
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
// value to the correct fpstack register class.
if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, &X86::RFP32RegClass);
if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, &X86::RFP64RegClass);
return std::make_pair(0U, &X86::RFP80RegClass);
case 'y': // MMX_REGS if MMX allowed.
if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'Y': // SSE_REGS if SSE2 allowed
if (!Subtarget.hasSSE2()) break;
LLVM_FALLTHROUGH;
case 'v':
case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
if (!Subtarget.hasSSE1()) break;
bool VConstraint = (Constraint[0] == 'v');
switch (VT.SimpleTy) {
default: break;
// Scalar SSE types.
case MVT::f32:
case MVT::i32:
if (VConstraint && Subtarget.hasAVX512() && Subtarget.hasVLX())
return std::make_pair(0U, &X86::FR32XRegClass);
return std::make_pair(0U, &X86::FR32RegClass);
case MVT::f64:
case MVT::i64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::FR64XRegClass);
return std::make_pair(0U, &X86::FR64RegClass);
// TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
// Vector types.
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::VR128XRegClass);
return std::make_pair(0U, &X86::VR128RegClass);
// AVX types.
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
case MVT::v8f32:
case MVT::v4f64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::VR256XRegClass);
return std::make_pair(0U, &X86::VR256RegClass);
case MVT::v8f64:
case MVT::v16f32:
case MVT::v16i32:
case MVT::v8i64:
return std::make_pair(0U, &X86::VR512RegClass);
}
break;
}
} else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
switch (Constraint[1]) {
default:
break;
case 'i':
case 't':
case '2':
return getRegForInlineAsmConstraint(TRI, "Y", VT);
case 'm':
if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'z':
case '0':
if (!Subtarget.hasSSE1()) break;
return std::make_pair(X86::XMM0, &X86::VR128RegClass);
case 'k':
// This register class doesn't allocate k0 for masked vector operation.
if (Subtarget.hasAVX512()) { // Only supported in AVX512.
switch (VT.SimpleTy) {
default: break;
case MVT::i32:
return std::make_pair(0U, &X86::VK32WMRegClass);
case MVT::i16:
return std::make_pair(0U, &X86::VK16WMRegClass);
case MVT::i8:
return std::make_pair(0U, &X86::VK8WMRegClass);
case MVT::i1:
return std::make_pair(0U, &X86::VK1WMRegClass);
case MVT::i64:
return std::make_pair(0U, &X86::VK64WMRegClass);
}
}
break;
}
}
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
// Not found as a standard register?
if (!Res.second) {
// Map st(0) -> st(7) -> ST0
if (Constraint.size() == 7 && Constraint[0] == '{' &&
tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
Constraint[3] == '(' &&
(Constraint[4] >= '0' && Constraint[4] <= '7') &&
Constraint[5] == ')' && Constraint[6] == '}') {
// st(7) is not allocatable and thus not a member of RFP80. Return
// singleton class in cases where we have a reference to it.
if (Constraint[4] == '7')
return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
return std::make_pair(X86::FP0 + Constraint[4] - '0',
&X86::RFP80RegClass);
}
// GCC allows "st(0)" to be called just plain "st".
if (StringRef("{st}").equals_lower(Constraint))
return std::make_pair(X86::FP0, &X86::RFP80RegClass);
// flags -> EFLAGS
if (StringRef("{flags}").equals_lower(Constraint))
return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
+ // dirflag -> DF
+ if (StringRef("{dirflag}").equals_lower(Constraint))
+ return std::make_pair(X86::DF, &X86::DFCCRRegClass);
+
+ // fpsr -> FPSW
+ if (StringRef("{fpsr}").equals_lower(Constraint))
+ return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
+
// 'A' means [ER]AX + [ER]DX.
if (Constraint == "A") {
if (Subtarget.is64Bit())
return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
"Expecting 64, 32 or 16 bit subtarget");
return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
}
return Res;
}
// Make sure it isn't a register that requires 64-bit mode.
if (!Subtarget.is64Bit() &&
(isFRClass(*Res.second) || isGRClass(*Res.second)) &&
TRI->getEncodingValue(Res.first) >= 8) {
// Register requires REX prefix, but we're in 32-bit mode.
return std::make_pair(0, nullptr);
}
// Make sure it isn't a register that requires AVX512.
if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
TRI->getEncodingValue(Res.first) & 0x10) {
// Register requires EVEX prefix.
return std::make_pair(0, nullptr);
}
// Otherwise, check to see if this is a register class of the wrong value
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
// turn into {ax},{dx}.
// MVT::Other is used to specify clobber names.
if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
return Res; // Correct type already, nothing to do.
// Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
// return "eax". This should even work for things like getting 64bit integer
// registers when given an f64 type.
const TargetRegisterClass *Class = Res.second;
// The generic code will match the first register class that contains the
// given register. Thus, based on the ordering of the tablegened file,
// the "plain" GR classes might not come first.
// Therefore, use a helper method.
if (isGRClass(*Class)) {
unsigned Size = VT.getSizeInBits();
if (Size == 1) Size = 8;
unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
if (DestReg > 0) {
bool is64Bit = Subtarget.is64Bit();
const TargetRegisterClass *RC =
Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
: Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
: Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
: Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
: nullptr;
if (Size == 64 && !is64Bit) {
// Model GCC's behavior here and select a fixed pair of 32-bit
// registers.
switch (Res.first) {
case X86::EAX:
return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
case X86::EDX:
return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
case X86::ECX:
return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
case X86::EBX:
return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
case X86::ESI:
return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
case X86::EDI:
return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
case X86::EBP:
return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
default:
return std::make_pair(0, nullptr);
}
}
if (RC && RC->contains(DestReg))
return std::make_pair(DestReg, RC);
return Res;
}
// No register found/type mismatch.
return std::make_pair(0, nullptr);
} else if (isFRClass(*Class)) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
// find, ignoring the required type.
// TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
if (VT == MVT::f32 || VT == MVT::i32)
Res.second = &X86::FR32RegClass;
else if (VT == MVT::f64 || VT == MVT::i64)
Res.second = &X86::FR64RegClass;
else if (TRI->isTypeLegalForClass(X86::VR128RegClass, VT))
Res.second = &X86::VR128RegClass;
else if (TRI->isTypeLegalForClass(X86::VR256RegClass, VT))
Res.second = &X86::VR256RegClass;
else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
Res.second = &X86::VR512RegClass;
else {
// Type mismatch and not a clobber: Return an error;
Res.first = 0;
Res.second = nullptr;
}
}
return Res;
}
int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS) const {
// Scaling factors are not free at all.
// An indexed folded instruction, i.e., inst (reg1, reg2, scale),
// will take 2 allocations in the out of order engine instead of 1
// for plain addressing mode, i.e. inst (reg1).
// E.g.,
// vaddps (%rsi,%rdx), %ymm0, %ymm1
// Requires two allocations (one for the load, one for the computation)
// whereas:
// vaddps (%rsi), %ymm0, %ymm1
// Requires just 1 allocation, i.e., freeing allocations for other operations
// and having less micro operations to execute.
//
// For some X86 architectures, this is even worse because for instance for
// stores, the complex addressing mode forces the instruction to use the
// "load" ports instead of the dedicated "store" port.
// E.g., on Haswell:
// vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
// vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
if (isLegalAddressingMode(DL, AM, Ty, AS))
// Scale represents reg2 * scale, thus account for 1
// as soon as we use a second register.
return AM.Scale != 0;
return -1;
}
bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// Integer division on x86 is expensive. However, when aggressively optimizing
// for code size, we prefer to use a div instruction, as it is usually smaller
// than the alternative sequence.
// The exception to this is vector division. Since x86 doesn't have vector
// integer division, leaving the division as-is is a loss even in terms of
// size, because it will have to be scalarized, while the alternative code
// sequence can be performed in vector form.
bool OptSize =
Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
return OptSize && !VT.isVector();
}
void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
if (!Subtarget.is64Bit())
return;
// Update IsSplitCSR in X86MachineFunctionInfo.
X86MachineFunctionInfo *AFI =
Entry->getParent()->getInfo<X86MachineFunctionInfo>();
AFI->setIsSplitCSR(true);
}
void X86TargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
if (!IStart)
return;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
MachineBasicBlock::iterator MBBI = Entry->begin();
for (const MCPhysReg *I = IStart; *I; ++I) {
const TargetRegisterClass *RC = nullptr;
if (X86::GR64RegClass.contains(*I))
RC = &X86::GR64RegClass;
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
unsigned NewVR = MRI->createVirtualRegister(RC);
// Create copy from CSR to a virtual register.
// FIXME: this currently does not emit CFI pseudo-instructions, it works
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
.addReg(*I);
// Insert the copy-back instructions right before the terminator.
for (auto *Exit : Exits)
BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
TII->get(TargetOpcode::COPY), *I)
.addReg(NewVR);
}
}
bool X86TargetLowering::supportSwiftError() const {
return Subtarget.is64Bit();
}
/// Returns the name of the symbol used to emit stack probes or the empty
/// string if not applicable.
StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
// If the function specifically requests stack probes, emit them.
if (MF.getFunction().hasFnAttribute("probe-stack"))
return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
return "";
// We need a stack probe to conform to the Windows ABI. Choose the right
// symbol.
if (Subtarget.is64Bit())
return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
}
Index: vendor/llvm/dist-release_80/lib/Target/X86/X86InstrFPStack.td
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86InstrFPStack.td (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86InstrFPStack.td (revision 344166)
@@ -1,742 +1,742 @@
//===- X86InstrFPStack.td - FPU Instruction Set ------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 x87 FPU instruction set, defining the
// instructions, and properties of the instructions which are needed for code
// generation, machine code emission, and analysis.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// FPStack specific DAG Nodes.
//===----------------------------------------------------------------------===//
def SDTX86FpGet2 : SDTypeProfile<2, 0, [SDTCisVT<0, f80>,
SDTCisVT<1, f80>]>;
def SDTX86Fld : SDTypeProfile<1, 2, [SDTCisFP<0>,
SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fst : SDTypeProfile<0, 3, [SDTCisFP<0>,
SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fild : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fnstsw : SDTypeProfile<1, 1, [SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
def SDTX86FpToIMem : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>;
def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fst : SDNode<"X86ISD::FST", SDTX86Fst,
[SDNPHasChain, SDNPInGlue, SDNPMayStore,
SDNPMemOperand]>;
def X86fild : SDNode<"X86ISD::FILD", SDTX86Fild,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fildflag : SDNode<"X86ISD::FILD_FLAG", SDTX86Fild,
[SDNPHasChain, SDNPOutGlue, SDNPMayLoad,
SDNPMemOperand]>;
def X86fp_stsw : SDNode<"X86ISD::FNSTSW16r", SDTX86Fnstsw>;
def X86fp_to_i16mem : SDNode<"X86ISD::FP_TO_INT16_IN_MEM", SDTX86FpToIMem,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_i32mem : SDNode<"X86ISD::FP_TO_INT32_IN_MEM", SDTX86FpToIMem,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_i64mem : SDNode<"X86ISD::FP_TO_INT64_IN_MEM", SDTX86FpToIMem,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_cwd_get16 : SDNode<"X86ISD::FNSTCW16m", SDTX86CwdStore,
[SDNPHasChain, SDNPMayStore, SDNPSideEffect,
SDNPMemOperand]>;
//===----------------------------------------------------------------------===//
// FPStack pattern fragments
//===----------------------------------------------------------------------===//
def fpimm0 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(+0.0);
}]>;
def fpimmneg0 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(-0.0);
}]>;
def fpimm1 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(+1.0);
}]>;
def fpimmneg1 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(-1.0);
}]>;
// Some 'special' instructions - expanded after instruction selection.
let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in {
def FP32_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP32:$src),
[(X86fp_to_i16mem RFP32:$src, addr:$dst)]>;
def FP32_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP32:$src),
[(X86fp_to_i32mem RFP32:$src, addr:$dst)]>;
def FP32_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP32:$src),
[(X86fp_to_i64mem RFP32:$src, addr:$dst)]>;
def FP64_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP64:$src),
[(X86fp_to_i16mem RFP64:$src, addr:$dst)]>;
def FP64_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP64:$src),
[(X86fp_to_i32mem RFP64:$src, addr:$dst)]>;
def FP64_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP64:$src),
[(X86fp_to_i64mem RFP64:$src, addr:$dst)]>;
def FP80_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP80:$src),
[(X86fp_to_i16mem RFP80:$src, addr:$dst)]>;
def FP80_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP80:$src),
[(X86fp_to_i32mem RFP80:$src, addr:$dst)]>;
def FP80_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP80:$src),
[(X86fp_to_i64mem RFP80:$src, addr:$dst)]>;
}
// All FP Stack operations are represented with four instructions here. The
// first three instructions, generated by the instruction selector, use "RFP32"
// "RFP64" or "RFP80" registers: traditional register files to reference 32-bit,
// 64-bit or 80-bit floating point values. These sizes apply to the values,
// not the registers, which are always 80 bits; RFP32, RFP64 and RFP80 can be
// copied to each other without losing information. These instructions are all
// pseudo instructions and use the "_Fp" suffix.
// In some cases there are additional variants with a mixture of different
// register sizes.
// The second instruction is defined with FPI, which is the actual instruction
// emitted by the assembler. These use "RST" registers, although frequently
// the actual register(s) used are implicit. These are always 80 bits.
// The FP stackifier pass converts one to the other after register allocation
// occurs.
//
// Note that the FpI instruction should have instruction selection info (e.g.
// a pattern) and the FPI instruction should have emission info (e.g. opcode
// encoding and asm printing info).
// FpIf32, FpIf64 - Floating Point Pseudo Instruction template.
// f32 instructions can use SSE1 and are predicated on FPStackf32 == !SSE1.
// f64 instructions can use SSE2 and are predicated on FPStackf64 == !SSE2.
// f80 instructions cannot use SSE and use neither of these.
class FpIf32<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
FpI_<outs, ins, fp, pattern>, Requires<[FPStackf32]>;
class FpIf64<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
FpI_<outs, ins, fp, pattern>, Requires<[FPStackf64]>;
// Factoring for arithmetic.
multiclass FPBinary_rr<SDNode OpNode> {
// Register op register -> register
// These are separated out because they have no reversed form.
def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2), TwoArgFP,
[(set RFP32:$dst, (OpNode RFP32:$src1, RFP32:$src2))]>;
def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2), TwoArgFP,
[(set RFP64:$dst, (OpNode RFP64:$src1, RFP64:$src2))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), TwoArgFP,
[(set RFP80:$dst, (OpNode RFP80:$src1, RFP80:$src2))]>;
}
// The FopST0 series are not included here because of the irregularities
// in where the 'r' goes in assembly output.
// These instructions cannot address 80-bit memory.
multiclass FPBinary<SDNode OpNode, Format fp, string asmstring,
bit Forward = 1> {
let mayLoad = 1, hasSideEffects = 1 in {
// ST(0) = ST(0) + [mem]
def _Fp32m : FpIf32<(outs RFP32:$dst),
(ins RFP32:$src1, f32mem:$src2), OneArgFPRW,
[!if(Forward,
(set RFP32:$dst,
(OpNode RFP32:$src1, (loadf32 addr:$src2))),
(set RFP32:$dst,
(OpNode (loadf32 addr:$src2), RFP32:$src1)))]>;
def _Fp64m : FpIf64<(outs RFP64:$dst),
(ins RFP64:$src1, f64mem:$src2), OneArgFPRW,
[!if(Forward,
(set RFP64:$dst,
(OpNode RFP64:$src1, (loadf64 addr:$src2))),
(set RFP64:$dst,
(OpNode (loadf64 addr:$src2), RFP64:$src1)))]>;
def _Fp64m32: FpIf64<(outs RFP64:$dst),
(ins RFP64:$src1, f32mem:$src2), OneArgFPRW,
[!if(Forward,
(set RFP64:$dst,
(OpNode RFP64:$src1, (f64 (extloadf32 addr:$src2)))),
(set RFP64:$dst,
(OpNode (f64 (extloadf32 addr:$src2)), RFP64:$src1)))]>;
def _Fp80m32: FpI_<(outs RFP80:$dst),
(ins RFP80:$src1, f32mem:$src2), OneArgFPRW,
[!if(Forward,
(set RFP80:$dst,
(OpNode RFP80:$src1, (f80 (extloadf32 addr:$src2)))),
(set RFP80:$dst,
(OpNode (f80 (extloadf32 addr:$src2)), RFP80:$src1)))]>;
def _Fp80m64: FpI_<(outs RFP80:$dst),
(ins RFP80:$src1, f64mem:$src2), OneArgFPRW,
[!if(Forward,
(set RFP80:$dst,
(OpNode RFP80:$src1, (f80 (extloadf64 addr:$src2)))),
(set RFP80:$dst,
(OpNode (f80 (extloadf64 addr:$src2)), RFP80:$src1)))]>;
def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src),
!strconcat("f", asmstring, "{s}\t$src")>;
def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src),
!strconcat("f", asmstring, "{l}\t$src")>;
// ST(0) = ST(0) + [memint]
def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP32:$dst,
(OpNode RFP32:$src1, (X86fild addr:$src2, i16))),
(set RFP32:$dst,
(OpNode (X86fild addr:$src2, i16), RFP32:$src1)))]>;
def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP32:$dst,
(OpNode RFP32:$src1, (X86fild addr:$src2, i32))),
(set RFP32:$dst,
(OpNode (X86fild addr:$src2, i32), RFP32:$src1)))]>;
def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP64:$dst,
(OpNode RFP64:$src1, (X86fild addr:$src2, i16))),
(set RFP64:$dst,
(OpNode (X86fild addr:$src2, i16), RFP64:$src1)))]>;
def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP64:$dst,
(OpNode RFP64:$src1, (X86fild addr:$src2, i32))),
(set RFP64:$dst,
(OpNode (X86fild addr:$src2, i32), RFP64:$src1)))]>;
def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP80:$dst,
(OpNode RFP80:$src1, (X86fild addr:$src2, i16))),
(set RFP80:$dst,
(OpNode (X86fild addr:$src2, i16), RFP80:$src1)))]>;
def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2),
OneArgFPRW,
[!if(Forward,
(set RFP80:$dst,
(OpNode RFP80:$src1, (X86fild addr:$src2, i32))),
(set RFP80:$dst,
(OpNode (X86fild addr:$src2, i32), RFP80:$src1)))]>;
def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src),
!strconcat("fi", asmstring, "{s}\t$src")>;
def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
!strconcat("fi", asmstring, "{l}\t$src")>;
} // mayLoad = 1, hasSideEffects = 1
}
-let Defs = [FPSW] in {
+let Defs = [FPSW], Uses = [FPCW] in {
// FPBinary_rr just defines pseudo-instructions, no need to set a scheduling
// resources.
let hasNoSchedulingInfo = 1 in {
defm ADD : FPBinary_rr<fadd>;
defm SUB : FPBinary_rr<fsub>;
defm MUL : FPBinary_rr<fmul>;
defm DIV : FPBinary_rr<fdiv>;
}
// Sets the scheduling resources for the actual NAME#_F<size>m defintions.
let SchedRW = [WriteFAddLd] in {
defm ADD : FPBinary<fadd, MRM0m, "add">;
defm SUB : FPBinary<fsub, MRM4m, "sub">;
defm SUBR: FPBinary<fsub ,MRM5m, "subr", 0>;
}
let SchedRW = [WriteFMulLd] in {
defm MUL : FPBinary<fmul, MRM1m, "mul">;
}
let SchedRW = [WriteFDivLd] in {
defm DIV : FPBinary<fdiv, MRM6m, "div">;
defm DIVR: FPBinary<fdiv, MRM7m, "divr", 0>;
}
} // Defs = [FPSW]
class FPST0rInst<Format fp, string asm>
- : FPI<0xD8, fp, (outs), (ins RST:$op), asm>;
+ : FPI<0xD8, fp, (outs), (ins RSTi:$op), asm>;
class FPrST0Inst<Format fp, string asm>
- : FPI<0xDC, fp, (outs), (ins RST:$op), asm>;
+ : FPI<0xDC, fp, (outs), (ins RSTi:$op), asm>;
class FPrST0PInst<Format fp, string asm>
- : FPI<0xDE, fp, (outs), (ins RST:$op), asm>;
+ : FPI<0xDE, fp, (outs), (ins RSTi:$op), asm>;
// NOTE: GAS and apparently all other AT&T style assemblers have a broken notion
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
-let SchedRW = [WriteFAdd] in {
-def ADD_FST0r : FPST0rInst <MRM0r, "fadd\t$op">;
-def ADD_FrST0 : FPrST0Inst <MRM0r, "fadd\t{%st(0), $op|$op, st(0)}">;
-def ADD_FPrST0 : FPrST0PInst<MRM0r, "faddp\t$op">;
-def SUBR_FST0r : FPST0rInst <MRM5r, "fsubr\t$op">;
-def SUB_FrST0 : FPrST0Inst <MRM5r, "fsub{r}\t{%st(0), $op|$op, st(0)}">;
-def SUB_FPrST0 : FPrST0PInst<MRM5r, "fsub{r}p\t$op">;
-def SUB_FST0r : FPST0rInst <MRM4r, "fsub\t$op">;
-def SUBR_FrST0 : FPrST0Inst <MRM4r, "fsub{|r}\t{%st(0), $op|$op, st(0)}">;
-def SUBR_FPrST0 : FPrST0PInst<MRM4r, "fsub{|r}p\t$op">;
+let SchedRW = [WriteFAdd], Defs = [FPSW], Uses = [FPCW] in {
+def ADD_FST0r : FPST0rInst <MRM0r, "fadd\t{$op, %st|st, $op}">;
+def ADD_FrST0 : FPrST0Inst <MRM0r, "fadd\t{%st, $op|$op, st}">;
+def ADD_FPrST0 : FPrST0PInst<MRM0r, "faddp\t{%st, $op|$op, st}">;
+def SUBR_FST0r : FPST0rInst <MRM5r, "fsubr\t{$op, %st|st, $op}">;
+def SUB_FrST0 : FPrST0Inst <MRM5r, "fsub{r}\t{%st, $op|$op, st}">;
+def SUB_FPrST0 : FPrST0PInst<MRM5r, "fsub{r}p\t{%st, $op|$op, st}">;
+def SUB_FST0r : FPST0rInst <MRM4r, "fsub\t{$op, %st|st, $op}">;
+def SUBR_FrST0 : FPrST0Inst <MRM4r, "fsub{|r}\t{%st, $op|$op, st}">;
+def SUBR_FPrST0 : FPrST0PInst<MRM4r, "fsub{|r}p\t{%st, $op|$op, st}">;
} // SchedRW
-let SchedRW = [WriteFCom] in {
+let SchedRW = [WriteFCom], Defs = [FPSW], Uses = [FPCW] in {
def COM_FST0r : FPST0rInst <MRM2r, "fcom\t$op">;
def COMP_FST0r : FPST0rInst <MRM3r, "fcomp\t$op">;
} // SchedRW
-let SchedRW = [WriteFMul] in {
-def MUL_FST0r : FPST0rInst <MRM1r, "fmul\t$op">;
-def MUL_FrST0 : FPrST0Inst <MRM1r, "fmul\t{%st(0), $op|$op, st(0)}">;
-def MUL_FPrST0 : FPrST0PInst<MRM1r, "fmulp\t$op">;
+let SchedRW = [WriteFMul], Defs = [FPSW], Uses = [FPCW] in {
+def MUL_FST0r : FPST0rInst <MRM1r, "fmul\t{$op, %st|st, $op}">;
+def MUL_FrST0 : FPrST0Inst <MRM1r, "fmul\t{%st, $op|$op, st}">;
+def MUL_FPrST0 : FPrST0PInst<MRM1r, "fmulp\t{%st, $op|$op, st}">;
} // SchedRW
-let SchedRW = [WriteFDiv] in {
-def DIVR_FST0r : FPST0rInst <MRM7r, "fdivr\t$op">;
-def DIV_FrST0 : FPrST0Inst <MRM7r, "fdiv{r}\t{%st(0), $op|$op, st(0)}">;
-def DIV_FPrST0 : FPrST0PInst<MRM7r, "fdiv{r}p\t$op">;
-def DIV_FST0r : FPST0rInst <MRM6r, "fdiv\t$op">;
-def DIVR_FrST0 : FPrST0Inst <MRM6r, "fdiv{|r}\t{%st(0), $op|$op, st(0)}">;
-def DIVR_FPrST0 : FPrST0PInst<MRM6r, "fdiv{|r}p\t$op">;
+let SchedRW = [WriteFDiv], Defs = [FPSW], Uses = [FPCW] in {
+def DIVR_FST0r : FPST0rInst <MRM7r, "fdivr\t{$op, %st|st, $op}">;
+def DIV_FrST0 : FPrST0Inst <MRM7r, "fdiv{r}\t{%st, $op|$op, st}">;
+def DIV_FPrST0 : FPrST0PInst<MRM7r, "fdiv{r}p\t{%st, $op|$op, st}">;
+def DIV_FST0r : FPST0rInst <MRM6r, "fdiv\t{$op, %st|st, $op}">;
+def DIVR_FrST0 : FPrST0Inst <MRM6r, "fdiv{|r}\t{%st, $op|$op, st}">;
+def DIVR_FPrST0 : FPrST0PInst<MRM6r, "fdiv{|r}p\t{%st, $op|$op, st}">;
} // SchedRW
// Unary operations.
multiclass FPUnary<SDNode OpNode, Format fp, string asmstring> {
def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src), OneArgFPRW,
[(set RFP32:$dst, (OpNode RFP32:$src))]>;
def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src), OneArgFPRW,
[(set RFP64:$dst, (OpNode RFP64:$src))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src), OneArgFPRW,
[(set RFP80:$dst, (OpNode RFP80:$src))]>;
def _F : FPI<0xD9, fp, (outs), (ins), asmstring>;
}
-let Defs = [FPSW] in {
+let Defs = [FPSW], Uses = [FPCW] in {
let SchedRW = [WriteFSign] in {
defm CHS : FPUnary<fneg, MRM_E0, "fchs">;
defm ABS : FPUnary<fabs, MRM_E1, "fabs">;
}
let SchedRW = [WriteFSqrt80] in
defm SQRT: FPUnary<fsqrt,MRM_FA, "fsqrt">;
let SchedRW = [WriteMicrocoded] in {
defm SIN : FPUnary<fsin, MRM_FE, "fsin">;
defm COS : FPUnary<fcos, MRM_FF, "fcos">;
}
let SchedRW = [WriteFCom] in {
let hasSideEffects = 0 in {
def TST_Fp32 : FpIf32<(outs), (ins RFP32:$src), OneArgFP, []>;
def TST_Fp64 : FpIf64<(outs), (ins RFP64:$src), OneArgFP, []>;
def TST_Fp80 : FpI_<(outs), (ins RFP80:$src), OneArgFP, []>;
} // hasSideEffects
def TST_F : FPI<0xD9, MRM_E4, (outs), (ins), "ftst">;
} // SchedRW
} // Defs = [FPSW]
// Versions of FP instructions that take a single memory operand. Added for the
// disassembler; remove as they are included with patterns elsewhere.
-let SchedRW = [WriteFComLd] in {
+let SchedRW = [WriteFComLd], Defs = [FPSW], Uses = [FPCW] in {
def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{s}\t$src">;
def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{s}\t$src">;
def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{l}\t$src">;
def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{l}\t$src">;
def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{s}\t$src">;
def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{s}\t$src">;
def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">;
def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
} // SchedRW
let SchedRW = [WriteMicrocoded] in {
def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">;
def FSTENVm : FPI<0xD9, MRM6m, (outs), (ins f32mem:$dst), "fnstenv\t$dst">;
def FRSTORm : FPI<0xDD, MRM4m, (outs), (ins f32mem:$dst), "frstor\t$dst">;
def FSAVEm : FPI<0xDD, MRM6m, (outs), (ins f32mem:$dst), "fnsave\t$dst">;
def FNSTSWm : FPI<0xDD, MRM7m, (outs), (ins i16mem:$dst), "fnstsw\t$dst">;
def FBLDm : FPI<0xDF, MRM4m, (outs), (ins f80mem:$src), "fbld\t$src">;
def FBSTPm : FPI<0xDF, MRM6m, (outs), (ins f80mem:$dst), "fbstp\t$dst">;
} // SchedRW
// Floating point cmovs.
class FpIf32CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
FpI_<outs, ins, fp, pattern>, Requires<[FPStackf32, HasCMov]>;
class FpIf64CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
FpI_<outs, ins, fp, pattern>, Requires<[FPStackf64, HasCMov]>;
multiclass FPCMov<PatLeaf cc> {
def _Fp32 : FpIf32CMov<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2),
CondMovFP,
[(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2,
cc, EFLAGS))]>;
def _Fp64 : FpIf64CMov<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2),
CondMovFP,
[(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2,
cc, EFLAGS))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2),
CondMovFP,
[(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2,
cc, EFLAGS))]>,
Requires<[HasCMov]>;
}
let Defs = [FPSW] in {
let SchedRW = [WriteFCMOV] in {
let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
defm CMOVB : FPCMov<X86_COND_B>;
defm CMOVBE : FPCMov<X86_COND_BE>;
defm CMOVE : FPCMov<X86_COND_E>;
defm CMOVP : FPCMov<X86_COND_P>;
defm CMOVNB : FPCMov<X86_COND_AE>;
defm CMOVNBE: FPCMov<X86_COND_A>;
defm CMOVNE : FPCMov<X86_COND_NE>;
defm CMOVNP : FPCMov<X86_COND_NP>;
} // Uses = [EFLAGS], Constraints = "$src1 = $dst"
let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
-def CMOVB_F : FPI<0xDA, MRM0r, (outs), (ins RST:$op),
- "fcmovb\t{$op, %st(0)|st(0), $op}">;
-def CMOVBE_F : FPI<0xDA, MRM2r, (outs), (ins RST:$op),
- "fcmovbe\t{$op, %st(0)|st(0), $op}">;
-def CMOVE_F : FPI<0xDA, MRM1r, (outs), (ins RST:$op),
- "fcmove\t{$op, %st(0)|st(0), $op}">;
-def CMOVP_F : FPI<0xDA, MRM3r, (outs), (ins RST:$op),
- "fcmovu\t{$op, %st(0)|st(0), $op}">;
-def CMOVNB_F : FPI<0xDB, MRM0r, (outs), (ins RST:$op),
- "fcmovnb\t{$op, %st(0)|st(0), $op}">;
-def CMOVNBE_F: FPI<0xDB, MRM2r, (outs), (ins RST:$op),
- "fcmovnbe\t{$op, %st(0)|st(0), $op}">;
-def CMOVNE_F : FPI<0xDB, MRM1r, (outs), (ins RST:$op),
- "fcmovne\t{$op, %st(0)|st(0), $op}">;
-def CMOVNP_F : FPI<0xDB, MRM3r, (outs), (ins RST:$op),
- "fcmovnu\t{$op, %st(0)|st(0), $op}">;
+def CMOVB_F : FPI<0xDA, MRM0r, (outs), (ins RSTi:$op),
+ "fcmovb\t{$op, %st|st, $op}">;
+def CMOVBE_F : FPI<0xDA, MRM2r, (outs), (ins RSTi:$op),
+ "fcmovbe\t{$op, %st|st, $op}">;
+def CMOVE_F : FPI<0xDA, MRM1r, (outs), (ins RSTi:$op),
+ "fcmove\t{$op, %st|st, $op}">;
+def CMOVP_F : FPI<0xDA, MRM3r, (outs), (ins RSTi:$op),
+ "fcmovu\t{$op, %st|st, $op}">;
+def CMOVNB_F : FPI<0xDB, MRM0r, (outs), (ins RSTi:$op),
+ "fcmovnb\t{$op, %st|st, $op}">;
+def CMOVNBE_F: FPI<0xDB, MRM2r, (outs), (ins RSTi:$op),
+ "fcmovnbe\t{$op, %st|st, $op}">;
+def CMOVNE_F : FPI<0xDB, MRM1r, (outs), (ins RSTi:$op),
+ "fcmovne\t{$op, %st|st, $op}">;
+def CMOVNP_F : FPI<0xDB, MRM3r, (outs), (ins RSTi:$op),
+ "fcmovnu\t{$op, %st|st, $op}">;
} // Predicates = [HasCMov]
} // SchedRW
// Floating point loads & stores.
let SchedRW = [WriteLoad] in {
let canFoldAsLoad = 1 in {
def LD_Fp32m : FpIf32<(outs RFP32:$dst), (ins f32mem:$src), ZeroArgFP,
[(set RFP32:$dst, (loadf32 addr:$src))]>;
let isReMaterializable = 1 in
def LD_Fp64m : FpIf64<(outs RFP64:$dst), (ins f64mem:$src), ZeroArgFP,
[(set RFP64:$dst, (loadf64 addr:$src))]>;
def LD_Fp80m : FpI_<(outs RFP80:$dst), (ins f80mem:$src), ZeroArgFP,
[(set RFP80:$dst, (loadf80 addr:$src))]>;
} // canFoldAsLoad
def LD_Fp32m64 : FpIf64<(outs RFP64:$dst), (ins f32mem:$src), ZeroArgFP,
[(set RFP64:$dst, (f64 (extloadf32 addr:$src)))]>;
def LD_Fp64m80 : FpI_<(outs RFP80:$dst), (ins f64mem:$src), ZeroArgFP,
[(set RFP80:$dst, (f80 (extloadf64 addr:$src)))]>;
def LD_Fp32m80 : FpI_<(outs RFP80:$dst), (ins f32mem:$src), ZeroArgFP,
[(set RFP80:$dst, (f80 (extloadf32 addr:$src)))]>;
def ILD_Fp16m32: FpIf32<(outs RFP32:$dst), (ins i16mem:$src), ZeroArgFP,
[(set RFP32:$dst, (X86fild addr:$src, i16))]>;
def ILD_Fp32m32: FpIf32<(outs RFP32:$dst), (ins i32mem:$src), ZeroArgFP,
[(set RFP32:$dst, (X86fild addr:$src, i32))]>;
def ILD_Fp64m32: FpIf32<(outs RFP32:$dst), (ins i64mem:$src), ZeroArgFP,
[(set RFP32:$dst, (X86fild addr:$src, i64))]>;
def ILD_Fp16m64: FpIf64<(outs RFP64:$dst), (ins i16mem:$src), ZeroArgFP,
[(set RFP64:$dst, (X86fild addr:$src, i16))]>;
def ILD_Fp32m64: FpIf64<(outs RFP64:$dst), (ins i32mem:$src), ZeroArgFP,
[(set RFP64:$dst, (X86fild addr:$src, i32))]>;
def ILD_Fp64m64: FpIf64<(outs RFP64:$dst), (ins i64mem:$src), ZeroArgFP,
[(set RFP64:$dst, (X86fild addr:$src, i64))]>;
def ILD_Fp16m80: FpI_<(outs RFP80:$dst), (ins i16mem:$src), ZeroArgFP,
[(set RFP80:$dst, (X86fild addr:$src, i16))]>;
def ILD_Fp32m80: FpI_<(outs RFP80:$dst), (ins i32mem:$src), ZeroArgFP,
[(set RFP80:$dst, (X86fild addr:$src, i32))]>;
def ILD_Fp64m80: FpI_<(outs RFP80:$dst), (ins i64mem:$src), ZeroArgFP,
[(set RFP80:$dst, (X86fild addr:$src, i64))]>;
} // SchedRW
-let SchedRW = [WriteStore] in {
+let SchedRW = [WriteStore], Uses = [FPCW] in {
def ST_Fp32m : FpIf32<(outs), (ins f32mem:$op, RFP32:$src), OneArgFP,
[(store RFP32:$src, addr:$op)]>;
def ST_Fp64m32 : FpIf64<(outs), (ins f32mem:$op, RFP64:$src), OneArgFP,
[(truncstoref32 RFP64:$src, addr:$op)]>;
def ST_Fp64m : FpIf64<(outs), (ins f64mem:$op, RFP64:$src), OneArgFP,
[(store RFP64:$src, addr:$op)]>;
def ST_Fp80m32 : FpI_<(outs), (ins f32mem:$op, RFP80:$src), OneArgFP,
[(truncstoref32 RFP80:$src, addr:$op)]>;
def ST_Fp80m64 : FpI_<(outs), (ins f64mem:$op, RFP80:$src), OneArgFP,
[(truncstoref64 RFP80:$src, addr:$op)]>;
// FST does not support 80-bit memory target; FSTP must be used.
let mayStore = 1, hasSideEffects = 0 in {
def ST_FpP32m : FpIf32<(outs), (ins f32mem:$op, RFP32:$src), OneArgFP, []>;
def ST_FpP64m32 : FpIf64<(outs), (ins f32mem:$op, RFP64:$src), OneArgFP, []>;
def ST_FpP64m : FpIf64<(outs), (ins f64mem:$op, RFP64:$src), OneArgFP, []>;
def ST_FpP80m32 : FpI_<(outs), (ins f32mem:$op, RFP80:$src), OneArgFP, []>;
def ST_FpP80m64 : FpI_<(outs), (ins f64mem:$op, RFP80:$src), OneArgFP, []>;
} // mayStore
def ST_FpP80m : FpI_<(outs), (ins f80mem:$op, RFP80:$src), OneArgFP,
[(store RFP80:$src, addr:$op)]>;
let mayStore = 1, hasSideEffects = 0 in {
def IST_Fp16m32 : FpIf32<(outs), (ins i16mem:$op, RFP32:$src), OneArgFP, []>;
def IST_Fp32m32 : FpIf32<(outs), (ins i32mem:$op, RFP32:$src), OneArgFP, []>;
def IST_Fp64m32 : FpIf32<(outs), (ins i64mem:$op, RFP32:$src), OneArgFP, []>;
def IST_Fp16m64 : FpIf64<(outs), (ins i16mem:$op, RFP64:$src), OneArgFP, []>;
def IST_Fp32m64 : FpIf64<(outs), (ins i32mem:$op, RFP64:$src), OneArgFP, []>;
def IST_Fp64m64 : FpIf64<(outs), (ins i64mem:$op, RFP64:$src), OneArgFP, []>;
def IST_Fp16m80 : FpI_<(outs), (ins i16mem:$op, RFP80:$src), OneArgFP, []>;
def IST_Fp32m80 : FpI_<(outs), (ins i32mem:$op, RFP80:$src), OneArgFP, []>;
def IST_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, []>;
} // mayStore
-} // SchedRW
+} // SchedRW, Uses = [FPCW]
let mayLoad = 1, SchedRW = [WriteLoad] in {
def LD_F32m : FPI<0xD9, MRM0m, (outs), (ins f32mem:$src), "fld{s}\t$src">;
def LD_F64m : FPI<0xDD, MRM0m, (outs), (ins f64mem:$src), "fld{l}\t$src">;
def LD_F80m : FPI<0xDB, MRM5m, (outs), (ins f80mem:$src), "fld{t}\t$src">;
def ILD_F16m : FPI<0xDF, MRM0m, (outs), (ins i16mem:$src), "fild{s}\t$src">;
def ILD_F32m : FPI<0xDB, MRM0m, (outs), (ins i32mem:$src), "fild{l}\t$src">;
def ILD_F64m : FPI<0xDF, MRM5m, (outs), (ins i64mem:$src), "fild{ll}\t$src">;
}
-let mayStore = 1, SchedRW = [WriteStore] in {
+let mayStore = 1, SchedRW = [WriteStore], Uses = [FPCW] in {
def ST_F32m : FPI<0xD9, MRM2m, (outs), (ins f32mem:$dst), "fst{s}\t$dst">;
def ST_F64m : FPI<0xDD, MRM2m, (outs), (ins f64mem:$dst), "fst{l}\t$dst">;
def ST_FP32m : FPI<0xD9, MRM3m, (outs), (ins f32mem:$dst), "fstp{s}\t$dst">;
def ST_FP64m : FPI<0xDD, MRM3m, (outs), (ins f64mem:$dst), "fstp{l}\t$dst">;
def ST_FP80m : FPI<0xDB, MRM7m, (outs), (ins f80mem:$dst), "fstp{t}\t$dst">;
def IST_F16m : FPI<0xDF, MRM2m, (outs), (ins i16mem:$dst), "fist{s}\t$dst">;
def IST_F32m : FPI<0xDB, MRM2m, (outs), (ins i32mem:$dst), "fist{l}\t$dst">;
def IST_FP16m : FPI<0xDF, MRM3m, (outs), (ins i16mem:$dst), "fistp{s}\t$dst">;
def IST_FP32m : FPI<0xDB, MRM3m, (outs), (ins i32mem:$dst), "fistp{l}\t$dst">;
def IST_FP64m : FPI<0xDF, MRM7m, (outs), (ins i64mem:$dst), "fistp{ll}\t$dst">;
}
// FISTTP requires SSE3 even though it's a FPStack op.
-let Predicates = [HasSSE3], SchedRW = [WriteStore] in {
+let Predicates = [HasSSE3], SchedRW = [WriteStore], Uses = [FPCW] in {
def ISTT_Fp16m32 : FpI_<(outs), (ins i16mem:$op, RFP32:$src), OneArgFP,
[(X86fp_to_i16mem RFP32:$src, addr:$op)]>;
def ISTT_Fp32m32 : FpI_<(outs), (ins i32mem:$op, RFP32:$src), OneArgFP,
[(X86fp_to_i32mem RFP32:$src, addr:$op)]>;
def ISTT_Fp64m32 : FpI_<(outs), (ins i64mem:$op, RFP32:$src), OneArgFP,
[(X86fp_to_i64mem RFP32:$src, addr:$op)]>;
def ISTT_Fp16m64 : FpI_<(outs), (ins i16mem:$op, RFP64:$src), OneArgFP,
[(X86fp_to_i16mem RFP64:$src, addr:$op)]>;
def ISTT_Fp32m64 : FpI_<(outs), (ins i32mem:$op, RFP64:$src), OneArgFP,
[(X86fp_to_i32mem RFP64:$src, addr:$op)]>;
def ISTT_Fp64m64 : FpI_<(outs), (ins i64mem:$op, RFP64:$src), OneArgFP,
[(X86fp_to_i64mem RFP64:$src, addr:$op)]>;
def ISTT_Fp16m80 : FpI_<(outs), (ins i16mem:$op, RFP80:$src), OneArgFP,
[(X86fp_to_i16mem RFP80:$src, addr:$op)]>;
def ISTT_Fp32m80 : FpI_<(outs), (ins i32mem:$op, RFP80:$src), OneArgFP,
[(X86fp_to_i32mem RFP80:$src, addr:$op)]>;
def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP,
[(X86fp_to_i64mem RFP80:$src, addr:$op)]>;
} // Predicates = [HasSSE3]
-let mayStore = 1, SchedRW = [WriteStore] in {
+let mayStore = 1, SchedRW = [WriteStore], Uses = [FPCW] in {
def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst">;
def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst">;
def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), "fisttp{ll}\t$dst">;
}
// FP Stack manipulation instructions.
let SchedRW = [WriteMove] in {
-def LD_Frr : FPI<0xD9, MRM0r, (outs), (ins RST:$op), "fld\t$op">;
-def ST_Frr : FPI<0xDD, MRM2r, (outs), (ins RST:$op), "fst\t$op">;
-def ST_FPrr : FPI<0xDD, MRM3r, (outs), (ins RST:$op), "fstp\t$op">;
-def XCH_F : FPI<0xD9, MRM1r, (outs), (ins RST:$op), "fxch\t$op">;
+def LD_Frr : FPI<0xD9, MRM0r, (outs), (ins RSTi:$op), "fld\t$op">;
+def ST_Frr : FPI<0xDD, MRM2r, (outs), (ins RSTi:$op), "fst\t$op">;
+def ST_FPrr : FPI<0xDD, MRM3r, (outs), (ins RSTi:$op), "fstp\t$op">;
+def XCH_F : FPI<0xD9, MRM1r, (outs), (ins RSTi:$op), "fxch\t$op">;
}
// Floating point constant loads.
let isReMaterializable = 1, SchedRW = [WriteZero] in {
def LD_Fp032 : FpIf32<(outs RFP32:$dst), (ins), ZeroArgFP,
[(set RFP32:$dst, fpimm0)]>;
def LD_Fp132 : FpIf32<(outs RFP32:$dst), (ins), ZeroArgFP,
[(set RFP32:$dst, fpimm1)]>;
def LD_Fp064 : FpIf64<(outs RFP64:$dst), (ins), ZeroArgFP,
[(set RFP64:$dst, fpimm0)]>;
def LD_Fp164 : FpIf64<(outs RFP64:$dst), (ins), ZeroArgFP,
[(set RFP64:$dst, fpimm1)]>;
def LD_Fp080 : FpI_<(outs RFP80:$dst), (ins), ZeroArgFP,
[(set RFP80:$dst, fpimm0)]>;
def LD_Fp180 : FpI_<(outs RFP80:$dst), (ins), ZeroArgFP,
[(set RFP80:$dst, fpimm1)]>;
}
let SchedRW = [WriteFLD0] in
def LD_F0 : FPI<0xD9, MRM_EE, (outs), (ins), "fldz">;
let SchedRW = [WriteFLD1] in
def LD_F1 : FPI<0xD9, MRM_E8, (outs), (ins), "fld1">;
-let SchedRW = [WriteFLDC], Defs = [FPSW] in {
+let SchedRW = [WriteFLDC] in {
def FLDL2T : I<0xD9, MRM_E9, (outs), (ins), "fldl2t", []>;
def FLDL2E : I<0xD9, MRM_EA, (outs), (ins), "fldl2e", []>;
def FLDPI : I<0xD9, MRM_EB, (outs), (ins), "fldpi", []>;
def FLDLG2 : I<0xD9, MRM_EC, (outs), (ins), "fldlg2", []>;
def FLDLN2 : I<0xD9, MRM_ED, (outs), (ins), "fldln2", []>;
} // SchedRW
// Floating point compares.
-let SchedRW = [WriteFCom] in {
+let SchedRW = [WriteFCom], Uses = [FPCW] in {
def UCOM_Fpr32 : FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
[(set FPSW, (trunc (X86cmp RFP32:$lhs, RFP32:$rhs)))]>;
def UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
[(set FPSW, (trunc (X86cmp RFP64:$lhs, RFP64:$rhs)))]>;
def UCOM_Fpr80 : FpI_ <(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
[(set FPSW, (trunc (X86cmp RFP80:$lhs, RFP80:$rhs)))]>;
} // SchedRW
} // Defs = [FPSW]
let SchedRW = [WriteFCom] in {
// CC = ST(0) cmp ST(i)
-let Defs = [EFLAGS, FPSW] in {
-let Predicates = [FPStackf32, HasCMov] in
-def UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
- [(set EFLAGS, (X86cmp RFP32:$lhs, RFP32:$rhs))]>;
-let Predicates = [FPStackf64, HasCMov] in
-def UCOM_FpIr64: FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
- [(set EFLAGS, (X86cmp RFP64:$lhs, RFP64:$rhs))]>;
-let Predicates = [HasCMov] in
+let Defs = [EFLAGS, FPSW], Uses = [FPCW] in {
+def UCOM_FpIr32: FpI_<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
+ [(set EFLAGS, (X86cmp RFP32:$lhs, RFP32:$rhs))]>,
+ Requires<[FPStackf32, HasCMov]>;
+def UCOM_FpIr64: FpI_<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
+ [(set EFLAGS, (X86cmp RFP64:$lhs, RFP64:$rhs))]>,
+ Requires<[FPStackf64, HasCMov]>;
def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
- [(set EFLAGS, (X86cmp RFP80:$lhs, RFP80:$rhs))]>;
+ [(set EFLAGS, (X86cmp RFP80:$lhs, RFP80:$rhs))]>,
+ Requires<[HasCMov]>;
}
-let Defs = [FPSW], Uses = [ST0] in {
+let Defs = [FPSW], Uses = [ST0, FPCW] in {
def UCOM_Fr : FPI<0xDD, MRM4r, // FPSW = cmp ST(0) with ST(i)
- (outs), (ins RST:$reg), "fucom\t$reg">;
+ (outs), (ins RSTi:$reg), "fucom\t$reg">;
def UCOM_FPr : FPI<0xDD, MRM5r, // FPSW = cmp ST(0) with ST(i), pop
- (outs), (ins RST:$reg), "fucomp\t$reg">;
+ (outs), (ins RSTi:$reg), "fucomp\t$reg">;
def UCOM_FPPr : FPI<0xDA, MRM_E9, // cmp ST(0) with ST(1), pop, pop
(outs), (ins), "fucompp">;
}
-let Defs = [EFLAGS, FPSW], Uses = [ST0] in {
+let Defs = [EFLAGS, FPSW], Uses = [ST0, FPCW] in {
def UCOM_FIr : FPI<0xDB, MRM5r, // CC = cmp ST(0) with ST(i)
- (outs), (ins RST:$reg), "fucomi\t$reg">;
+ (outs), (ins RSTi:$reg), "fucomi\t{$reg, %st|st, $reg}">;
def UCOM_FIPr : FPI<0xDF, MRM5r, // CC = cmp ST(0) with ST(i), pop
- (outs), (ins RST:$reg), "fucompi\t$reg">;
-}
+ (outs), (ins RSTi:$reg), "fucompi\t{$reg, %st|st, $reg}">;
-let Defs = [EFLAGS, FPSW] in {
-def COM_FIr : FPI<0xDB, MRM6r, (outs), (ins RST:$reg), "fcomi\t$reg">;
-def COM_FIPr : FPI<0xDF, MRM6r, (outs), (ins RST:$reg), "fcompi\t$reg">;
+def COM_FIr : FPI<0xDB, MRM6r, (outs), (ins RSTi:$reg),
+ "fcomi\t{$reg, %st|st, $reg}">;
+def COM_FIPr : FPI<0xDF, MRM6r, (outs), (ins RSTi:$reg),
+ "fcompi\t{$reg, %st|st, $reg}">;
}
} // SchedRW
// Floating point flag ops.
let SchedRW = [WriteALU] in {
let Defs = [AX], Uses = [FPSW] in
def FNSTSW16r : I<0xDF, MRM_E0, // AX = fp flags
(outs), (ins), "fnstsw\t{%ax|ax}",
[(set AX, (X86fp_stsw FPSW))]>;
-let Defs = [FPSW] in
+let Defs = [FPSW], Uses = [FPCW] in
def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world
(outs), (ins i16mem:$dst), "fnstcw\t$dst",
[(X86fp_cwd_get16 addr:$dst)]>;
} // SchedRW
-let Defs = [FPSW], mayLoad = 1 in
+let Defs = [FPSW,FPCW], mayLoad = 1 in
def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16]
(outs), (ins i16mem:$dst), "fldcw\t$dst", []>,
Sched<[WriteLoad]>;
// FPU control instructions
let SchedRW = [WriteMicrocoded] in {
let Defs = [FPSW] in {
def FNINIT : I<0xDB, MRM_E3, (outs), (ins), "fninit", []>;
-def FFREE : FPI<0xDD, MRM0r, (outs), (ins RST:$reg), "ffree\t$reg">;
-def FFREEP : FPI<0xDF, MRM0r, (outs), (ins RST:$reg), "ffreep\t$reg">;
+def FFREE : FPI<0xDD, MRM0r, (outs), (ins RSTi:$reg), "ffree\t$reg">;
+def FFREEP : FPI<0xDF, MRM0r, (outs), (ins RSTi:$reg), "ffreep\t$reg">;
// Clear exceptions
def FNCLEX : I<0xDB, MRM_E2, (outs), (ins), "fnclex", []>;
} // Defs = [FPSW]
} // SchedRW
// Operand-less floating-point instructions for the disassembler.
def FNOP : I<0xD9, MRM_D0, (outs), (ins), "fnop", []>, Sched<[WriteNop]>;
let SchedRW = [WriteMicrocoded] in {
let Defs = [FPSW] in {
def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>;
def FXAM : I<0xD9, MRM_E5, (outs), (ins), "fxam", []>;
def F2XM1 : I<0xD9, MRM_F0, (outs), (ins), "f2xm1", []>;
def FYL2X : I<0xD9, MRM_F1, (outs), (ins), "fyl2x", []>;
def FPTAN : I<0xD9, MRM_F2, (outs), (ins), "fptan", []>;
def FPATAN : I<0xD9, MRM_F3, (outs), (ins), "fpatan", []>;
def FXTRACT : I<0xD9, MRM_F4, (outs), (ins), "fxtract", []>;
def FPREM1 : I<0xD9, MRM_F5, (outs), (ins), "fprem1", []>;
def FDECSTP : I<0xD9, MRM_F6, (outs), (ins), "fdecstp", []>;
def FINCSTP : I<0xD9, MRM_F7, (outs), (ins), "fincstp", []>;
def FPREM : I<0xD9, MRM_F8, (outs), (ins), "fprem", []>;
def FYL2XP1 : I<0xD9, MRM_F9, (outs), (ins), "fyl2xp1", []>;
def FSINCOS : I<0xD9, MRM_FB, (outs), (ins), "fsincos", []>;
def FRNDINT : I<0xD9, MRM_FC, (outs), (ins), "frndint", []>;
def FSCALE : I<0xD9, MRM_FD, (outs), (ins), "fscale", []>;
def FCOMPP : I<0xDE, MRM_D9, (outs), (ins), "fcompp", []>;
} // Defs = [FPSW]
def FXSAVE : I<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
"fxsave\t$dst", [(int_x86_fxsave addr:$dst)]>, TB,
Requires<[HasFXSR]>;
def FXSAVE64 : RI<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
"fxsave64\t$dst", [(int_x86_fxsave64 addr:$dst)]>,
TB, Requires<[HasFXSR, In64BitMode]>;
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor\t$src", [(int_x86_fxrstor addr:$src)]>,
TB, Requires<[HasFXSR]>;
def FXRSTOR64 : RI<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor64\t$src", [(int_x86_fxrstor64 addr:$src)]>,
TB, Requires<[HasFXSR, In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
// Required for RET of f32 / f64 / f80 values.
def : Pat<(X86fld addr:$src, f32), (LD_Fp32m addr:$src)>;
def : Pat<(X86fld addr:$src, f64), (LD_Fp64m addr:$src)>;
def : Pat<(X86fld addr:$src, f80), (LD_Fp80m addr:$src)>;
// Required for CALL which return f32 / f64 / f80 values.
def : Pat<(X86fst RFP32:$src, addr:$op, f32), (ST_Fp32m addr:$op, RFP32:$src)>;
def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op,
RFP64:$src)>;
def : Pat<(X86fst RFP64:$src, addr:$op, f64), (ST_Fp64m addr:$op, RFP64:$src)>;
def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op,
RFP80:$src)>;
def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op,
RFP80:$src)>;
def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op,
RFP80:$src)>;
// Floating point constant -0.0 and -1.0
def : Pat<(f32 fpimmneg0), (CHS_Fp32 (LD_Fp032))>, Requires<[FPStackf32]>;
def : Pat<(f32 fpimmneg1), (CHS_Fp32 (LD_Fp132))>, Requires<[FPStackf32]>;
def : Pat<(f64 fpimmneg0), (CHS_Fp64 (LD_Fp064))>, Requires<[FPStackf64]>;
def : Pat<(f64 fpimmneg1), (CHS_Fp64 (LD_Fp164))>, Requires<[FPStackf64]>;
def : Pat<(f80 fpimmneg0), (CHS_Fp80 (LD_Fp080))>;
def : Pat<(f80 fpimmneg1), (CHS_Fp80 (LD_Fp180))>;
// Used to conv. i64 to f64 since there isn't a SSE version.
def : Pat<(X86fildflag addr:$src, i64), (ILD_Fp64m64 addr:$src)>;
// FP extensions map onto simple pseudo-value conversions if they are to/from
// the FP stack.
def : Pat<(f64 (fpextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP64)>,
Requires<[FPStackf32]>;
def : Pat<(f80 (fpextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP80)>,
Requires<[FPStackf32]>;
def : Pat<(f80 (fpextend RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP80)>,
Requires<[FPStackf64]>;
// FP truncations map onto simple pseudo-value conversions if they are to/from
// the FP stack. We have validated that only value-preserving truncations make
// it through isel.
def : Pat<(f32 (fpround RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP32)>,
Requires<[FPStackf32]>;
def : Pat<(f32 (fpround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP32)>,
Requires<[FPStackf32]>;
def : Pat<(f64 (fpround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP64)>,
Requires<[FPStackf64]>;
Index: vendor/llvm/dist-release_80/lib/Target/X86/X86InstrInfo.td
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86InstrInfo.td (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86InstrInfo.td (revision 344166)
@@ -1,3474 +1,3474 @@
//===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 instruction set, defining the instructions, and
// properties of the instructions which are needed for code generation, machine
// code emission, and analysis.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// X86 specific DAG Nodes.
//
def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
def SDTX86Cmps : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
//def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
// Unary and binary operator instructions that set EFLAGS as a side-effect.
def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
[SDTCisSameAs<0, 2>,
SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>, SDTCisVT<1, i32>]>;
// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>,
SDTCisVT<1, i32>,
SDTCisVT<4, i32>]>;
// RES1, RES2, FLAGS = op LHS, RHS
def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
[SDTCisSameAs<0, 1>,
SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
def SDTX86SetCC : SDTypeProfile<1, 2,
[SDTCisVT<0, i8>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
def SDTX86SetCC_C : SDTypeProfile<1, 2,
[SDTCisInt<0>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
SDTCisVT<2, i8>]>;
def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
[SDTCisVT<0, i32>, SDTCisPtrTy<1>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
[SDTCisVT<0, i64>, SDTCisPtrTy<1>,
SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisPtrTy<1>,
SDTCisInt<2>]>;
def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
SDTCisPtrTy<1>]>;
def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
SDTCisVT<1, i32>]>;
def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
SDTCisVT<1, i32>]>;
def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
SDTCisVT<1, iPTR>,
SDTCisVT<2, iPTR>]>;
def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
SDTCisPtrTy<1>,
SDTCisVT<2, i32>,
SDTCisVT<3, i8>,
SDTCisVT<4, i32>]>;
def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
def SDTX86Void : SDTypeProfile<0, 0, []>;
def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
[SDNPHasChain,SDNPSideEffect]>;
def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
[SDNPHasChain]>;
def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>;
def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
[SDNPHasChain, SDNPSideEffect]>;
def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
[SDNPHasChain, SDNPSideEffect]>;
def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
SDTX86caspairSaveEbx8,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue,
SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
SDTX86caspairSaveRbx16,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue,
SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue]>;
def X86vastart_save_xmm_regs :
SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
SDT_X86VASTART_SAVE_XMM_REGS,
[SDNPHasChain, SDNPVariadic]>;
def X86vaarg64 :
SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
[SDNPHasChain, SDNPMayLoad, SDNPMayStore,
SDNPMemOperand]>;
def X86callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
def X86callseq_end :
SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
[SDNPHasChain]>;
def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad]>;
def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
SDTCisInt<1>]>>;
def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
[SDNPHasChain]>;
def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
SDTypeProfile<1, 1, [SDTCisInt<0>,
SDTCisPtrTy<1>]>,
[SDNPHasChain, SDNPSideEffect]>;
def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
[SDNPHasChain, SDNPSideEffect]>;
def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
SDTypeProfile<0, 0, []>,
[SDNPHasChain, SDNPSideEffect]>;
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
[SDNPCommutative]>;
def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad,
SDNPMemOperand]>;
def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad,
SDNPMemOperand]>;
def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad,
SDNPMemOperand]>;
def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad,
SDNPMemOperand]>;
def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad,
SDNPMemOperand]>;
def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
[SDNPHasChain, SDNPOutGlue]>;
def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
[SDNPHasChain]>;
def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86lwpins : SDNode<"X86ISD::LWPINS",
SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
def X86umwait : SDNode<"X86ISD::UMWAIT",
SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
[SDNPHasChain, SDNPSideEffect]>;
def X86tpause : SDNode<"X86ISD::TPAUSE",
SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
[SDNPHasChain, SDNPSideEffect]>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
//
// A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
// the index operand of an address, to conform to x86 encoding restrictions.
def ptr_rc_nosp : PointerLikeRegClass<1>;
// *mem - Operand definitions for the funky X86 addressing mode operands.
//
def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
}
let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
// Gather mem operands
def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
}
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
let SuperClasses = [X86MemAsmOperand];
}
class X86MemOperand<string printMethod,
AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
let PrintMethod = printMethod;
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = parserMatchClass;
let OperandType = "OPERAND_MEMORY";
}
// Gather mem operands
class X86VMemOperand<RegisterClass RC, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
}
def anymem : X86MemOperand<"printanymem">;
// FIXME: Right now we allow any size during parsing, but we might want to
// restrict to only unsized memory.
def opaquemem : X86MemOperand<"printopaquemem">;
def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>;
def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>;
def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>;
def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>;
def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>;
def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>;
def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>;
def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>;
def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>;
def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>;
def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>;
def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>;
def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
// Gather mem operands
def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
def vy128xmem : X86VMemOperand<VR256X, "printi128mem", X86Mem128_RC256XOperand>;
def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
def vy512xmem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
def vz256mem : X86VMemOperand<VR512, "printi256mem", X86Mem256_RC512Operand>;
def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
// A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
// of a plain GPR, so that it doesn't potentially require a REX prefix.
def ptr_rc_norex : PointerLikeRegClass<2>;
def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
def i8mem_NOREX : Operand<iPTR> {
let PrintMethod = "printi8mem";
let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
SEGMENT_REG);
let ParserMatchClass = X86Mem8AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
// GPRs available for tailcall.
// It represents GR32_TC, GR64_TC or GR64_TCW64.
def ptr_rc_tailcall : PointerLikeRegClass<4>;
// Special i32mem for addresses of load folding tail calls. These are not
// allowed to use callee-saved registers since they must be scheduled
// after callee-saved register are popped.
def i32mem_TC : Operand<i32> {
let PrintMethod = "printi32mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
// Special i64mem for addresses of load folding tail calls. These are not
// allowed to use callee-saved registers since they must be scheduled
// after callee-saved register are popped.
def i64mem_TC : Operand<i64> {
let PrintMethod = "printi64mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
ptr_rc_tailcall, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
let OperandType = "OPERAND_PCREL",
ParserMatchClass = X86AbsMemAsmOperand,
PrintMethod = "printPCRelImm" in {
def i32imm_pcrel : Operand<i32>;
def i16imm_pcrel : Operand<i16>;
// Branch targets have OtherVT type and print as pc-relative values.
def brtarget : Operand<OtherVT>;
def brtarget8 : Operand<OtherVT>;
}
// Special parser to detect 16-bit mode to select 16-bit displacement.
def X86AbsMem16AsmOperand : AsmOperandClass {
let Name = "AbsMem16";
let RenderMethod = "addAbsMemOperands";
let SuperClasses = [X86AbsMemAsmOperand];
}
// Branch targets have OtherVT type and print as pc-relative values.
let OperandType = "OPERAND_PCREL",
PrintMethod = "printPCRelImm" in {
let ParserMatchClass = X86AbsMem16AsmOperand in
def brtarget16 : Operand<OtherVT>;
let ParserMatchClass = X86AbsMemAsmOperand in
def brtarget32 : Operand<OtherVT>;
}
let RenderMethod = "addSrcIdxOperands" in {
def X86SrcIdx8Operand : AsmOperandClass {
let Name = "SrcIdx8";
let SuperClasses = [X86Mem8AsmOperand];
}
def X86SrcIdx16Operand : AsmOperandClass {
let Name = "SrcIdx16";
let SuperClasses = [X86Mem16AsmOperand];
}
def X86SrcIdx32Operand : AsmOperandClass {
let Name = "SrcIdx32";
let SuperClasses = [X86Mem32AsmOperand];
}
def X86SrcIdx64Operand : AsmOperandClass {
let Name = "SrcIdx64";
let SuperClasses = [X86Mem64AsmOperand];
}
} // RenderMethod = "addSrcIdxOperands"
let RenderMethod = "addDstIdxOperands" in {
def X86DstIdx8Operand : AsmOperandClass {
let Name = "DstIdx8";
let SuperClasses = [X86Mem8AsmOperand];
}
def X86DstIdx16Operand : AsmOperandClass {
let Name = "DstIdx16";
let SuperClasses = [X86Mem16AsmOperand];
}
def X86DstIdx32Operand : AsmOperandClass {
let Name = "DstIdx32";
let SuperClasses = [X86Mem32AsmOperand];
}
def X86DstIdx64Operand : AsmOperandClass {
let Name = "DstIdx64";
let SuperClasses = [X86Mem64AsmOperand];
}
} // RenderMethod = "addDstIdxOperands"
let RenderMethod = "addMemOffsOperands" in {
def X86MemOffs16_8AsmOperand : AsmOperandClass {
let Name = "MemOffs16_8";
let SuperClasses = [X86Mem8AsmOperand];
}
def X86MemOffs16_16AsmOperand : AsmOperandClass {
let Name = "MemOffs16_16";
let SuperClasses = [X86Mem16AsmOperand];
}
def X86MemOffs16_32AsmOperand : AsmOperandClass {
let Name = "MemOffs16_32";
let SuperClasses = [X86Mem32AsmOperand];
}
def X86MemOffs32_8AsmOperand : AsmOperandClass {
let Name = "MemOffs32_8";
let SuperClasses = [X86Mem8AsmOperand];
}
def X86MemOffs32_16AsmOperand : AsmOperandClass {
let Name = "MemOffs32_16";
let SuperClasses = [X86Mem16AsmOperand];
}
def X86MemOffs32_32AsmOperand : AsmOperandClass {
let Name = "MemOffs32_32";
let SuperClasses = [X86Mem32AsmOperand];
}
def X86MemOffs32_64AsmOperand : AsmOperandClass {
let Name = "MemOffs32_64";
let SuperClasses = [X86Mem64AsmOperand];
}
def X86MemOffs64_8AsmOperand : AsmOperandClass {
let Name = "MemOffs64_8";
let SuperClasses = [X86Mem8AsmOperand];
}
def X86MemOffs64_16AsmOperand : AsmOperandClass {
let Name = "MemOffs64_16";
let SuperClasses = [X86Mem16AsmOperand];
}
def X86MemOffs64_32AsmOperand : AsmOperandClass {
let Name = "MemOffs64_32";
let SuperClasses = [X86Mem32AsmOperand];
}
def X86MemOffs64_64AsmOperand : AsmOperandClass {
let Name = "MemOffs64_64";
let SuperClasses = [X86Mem64AsmOperand];
}
} // RenderMethod = "addMemOffsOperands"
class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
}
class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops ptr_rc);
}
def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
class X86MemOffsOperand<Operand immOperand, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops immOperand, SEGMENT_REG);
}
def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
X86MemOffs16_8AsmOperand>;
def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
X86MemOffs16_16AsmOperand>;
def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
X86MemOffs16_32AsmOperand>;
def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
X86MemOffs32_8AsmOperand>;
def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
X86MemOffs32_16AsmOperand>;
def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
X86MemOffs32_32AsmOperand>;
def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
X86MemOffs32_64AsmOperand>;
def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
X86MemOffs64_8AsmOperand>;
def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
X86MemOffs64_16AsmOperand>;
def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
X86MemOffs64_32AsmOperand>;
def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
X86MemOffs64_64AsmOperand>;
def SSECC : Operand<i8> {
let PrintMethod = "printSSEAVXCC";
let OperandType = "OPERAND_IMMEDIATE";
}
def AVXCC : Operand<i8> {
let PrintMethod = "printSSEAVXCC";
let OperandType = "OPERAND_IMMEDIATE";
}
def AVX512ICC : Operand<i8> {
let PrintMethod = "printSSEAVXCC";
let OperandType = "OPERAND_IMMEDIATE";
}
def XOPCC : Operand<i8> {
let PrintMethod = "printXOPCC";
let OperandType = "OPERAND_IMMEDIATE";
}
class ImmSExtAsmOperandClass : AsmOperandClass {
let SuperClasses = [ImmAsmOperand];
let RenderMethod = "addImmOperands";
}
def X86GR32orGR64AsmOperand : AsmOperandClass {
let Name = "GR32orGR64";
}
def GR32orGR64 : RegisterOperand<GR32> {
let ParserMatchClass = X86GR32orGR64AsmOperand;
}
def AVX512RCOperand : AsmOperandClass {
let Name = "AVX512RC";
}
def AVX512RC : Operand<i32> {
let PrintMethod = "printRoundingControl";
let OperandType = "OPERAND_IMMEDIATE";
let ParserMatchClass = AVX512RCOperand;
}
// Sign-extended immediate classes. We don't need to define the full lattice
// here because there is no instruction with an ambiguity between ImmSExti64i32
// and ImmSExti32i8.
//
// The strange ranges come from the fact that the assembler always works with
// 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
// (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
// [0, 0x7FFFFFFF] |
// [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti64i32";
}
// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti16i8";
let SuperClasses = [ImmSExti64i32AsmOperand];
}
// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti32i8";
}
// [0, 0x0000007F] |
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti64i8";
let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
ImmSExti64i32AsmOperand];
}
// Unsigned immediate used by SSE/AVX instructions
// [0, 0xFF]
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmUnsignedi8AsmOperand : AsmOperandClass {
let Name = "ImmUnsignedi8";
let RenderMethod = "addImmOperands";
}
// A couple of more descriptive operand definitions.
// 16-bits but only 8 bits are significant.
def i16i8imm : Operand<i16> {
let ParserMatchClass = ImmSExti16i8AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// 32-bits but only 8 bits are significant.
def i32i8imm : Operand<i32> {
let ParserMatchClass = ImmSExti32i8AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// 64-bits but only 32 bits are significant.
def i64i32imm : Operand<i64> {
let ParserMatchClass = ImmSExti64i32AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// 64-bits but only 8 bits are significant.
def i64i8imm : Operand<i64> {
let ParserMatchClass = ImmSExti64i8AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// Unsigned 8-bit immediate used by SSE/AVX instructions.
def u8imm : Operand<i8> {
let PrintMethod = "printU8Imm";
let ParserMatchClass = ImmUnsignedi8AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// 32-bit immediate but only 8-bits are significant and they are unsigned.
// Used by some SSE/AVX instructions that use intrinsics.
def i32u8imm : Operand<i32> {
let PrintMethod = "printU8Imm";
let ParserMatchClass = ImmUnsignedi8AsmOperand;
let OperandType = "OPERAND_IMMEDIATE";
}
// 64-bits but only 32 bits are significant, and those bits are treated as being
// pc relative.
def i64i32imm_pcrel : Operand<i64> {
let PrintMethod = "printPCRelImm";
let ParserMatchClass = X86AbsMemAsmOperand;
let OperandType = "OPERAND_PCREL";
}
def lea64_32mem : Operand<i32> {
let PrintMethod = "printanymem";
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
// Memory operands that use 64-bit pointers in both ILP32 and LP64.
def lea64mem : Operand<i64> {
let PrintMethod = "printanymem";
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
//===----------------------------------------------------------------------===//
// X86 Complex Pattern Definitions.
//
// Define X86-specific addressing mode.
def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex],
[]>;
// In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
[add, sub, mul, X86mul_imm, shl, or,
frameindex, X86WrapperRIP],
[]>;
def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
[tglobaltlsaddr], []>;
def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
[tglobaltlsaddr], []>;
def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex,
X86WrapperRIP], []>;
def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
[tglobaltlsaddr], []>;
def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
[tglobaltlsaddr], []>;
def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
// A relocatable immediate is either an immediate operand or an operand that can
// be relocated by the linker to an immediate, such as a regular symbol in
// non-PIC code.
def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", [imm, X86Wrapper], [],
0>;
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
def TruePredicate : Predicate<"true">;
def HasCMov : Predicate<"Subtarget->hasCMov()">;
def NoCMov : Predicate<"!Subtarget->hasCMov()">;
def HasMMX : Predicate<"Subtarget->hasMMX()">;
def Has3DNow : Predicate<"Subtarget->has3DNow()">;
def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def NoAVX : Predicate<"!Subtarget->hasAVX()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
def HasCDI : Predicate<"Subtarget->hasCDI()">;
def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
def HasPFI : Predicate<"Subtarget->hasPFI()">;
def HasERI : Predicate<"Subtarget->hasERI()">;
def HasDQI : Predicate<"Subtarget->hasDQI()">;
def NoDQI : Predicate<"!Subtarget->hasDQI()">;
def HasBWI : Predicate<"Subtarget->hasBWI()">;
def NoBWI : Predicate<"!Subtarget->hasBWI()">;
def HasVLX : Predicate<"Subtarget->hasVLX()">;
def NoVLX : Predicate<"!Subtarget->hasVLX()">;
def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
def PKU : Predicate<"Subtarget->hasPKU()">;
def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
def HasVAES : Predicate<"Subtarget->hasVAES()">;
def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
def NoVLX_Or_NoVPCLMULQDQ :
Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
def HasFMA : Predicate<"Subtarget->hasFMA()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
def HasXOP : Predicate<"Subtarget->hasXOP()">;
def HasTBM : Predicate<"Subtarget->hasTBM()">;
def NoTBM : Predicate<"!Subtarget->hasTBM()">;
def HasLWP : Predicate<"Subtarget->hasLWP()">;
def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
def HasF16C : Predicate<"Subtarget->hasF16C()">;
def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
def HasBMI : Predicate<"Subtarget->hasBMI()">;
def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
def HasRTM : Predicate<"Subtarget->hasRTM()">;
def HasADX : Predicate<"Subtarget->hasADX()">;
def HasSHA : Predicate<"Subtarget->hasSHA()">;
def HasSGX : Predicate<"Subtarget->hasSGX()">;
def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasMPX : Predicate<"Subtarget->hasMPX()">;
def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">,
AssemblerPredicate<"Mode64Bit", "64-bit mode">;
def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
def In16BitMode : Predicate<"Subtarget->is16Bit()">,
AssemblerPredicate<"Mode16Bit", "16-bit mode">;
def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
AssemblerPredicate<"!Mode16Bit", "Not 16-bit mode">;
def In32BitMode : Predicate<"Subtarget->is32Bit()">,
AssemblerPredicate<"Mode32Bit", "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
"Subtarget->getFrameLowering()->hasFP(*MF)"> {
let RecomputePerFunction = 1;
}
def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
"TM.getCodeModel() == CodeModel::Kernel">;
def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
// We could compute these on a per-module basis but doing so requires accessing
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
def OptForSize : Predicate<"MF->getFunction().optForSize()">;
def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
"MF->getFunction().optForSize()">;
def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || "
"!Subtarget->hasSSE41()">;
}
def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
def HasMFence : Predicate<"Subtarget->hasMFence()">;
def UseRetpolineIndirectCalls : Predicate<"Subtarget->useRetpolineIndirectCalls()">;
def NotUseRetpolineIndirectCalls : Predicate<"!Subtarget->useRetpolineIndirectCalls()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
//
include "X86InstrFormats.td"
//===----------------------------------------------------------------------===//
// Pattern fragments.
//
// X86 specific condition code. These correspond to CondCode in
// X86InstrInfo.h. They must be kept in synch.
def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
def X86_COND_NO : PatLeaf<(i8 10)>;
def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
def X86_COND_NS : PatLeaf<(i8 12)>;
def X86_COND_O : PatLeaf<(i8 13)>;
def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
def X86_COND_S : PatLeaf<(i8 15)>;
def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
// FIXME: Ideally we would just replace the above i*immSExt* matchers with
// relocImm-based matchers, but then FastISel would be unable to use them.
def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
return isSExtRelocImm<8>(N);
}]>;
def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
return isSExtRelocImm<32>(N);
}]>;
// If we have multiple users of an immediate, it's much smaller to reuse
// the register, rather than encode the immediate in every instruction.
// This has the risk of increasing register pressure from stretched live
// ranges, however, the immediates should be trivial to rematerialize by
// the RA in the event of high register pressure.
// TODO : This is currently enabled for stores and binary ops. There are more
// cases for which this can be enabled, though this catches the bulk of the
// issues.
// TODO2 : This should really also be enabled under O2, but there's currently
// an issue with RA where we don't pull the constants into their users
// when we rematerialize them. I'll follow-up on enabling O2 after we fix that
// issue.
// TODO3 : This is currently limited to single basic blocks (DAG creation
// pulls block immediates to the top and merges them if necessary).
// Eventually, it would be nice to allow ConstantHoisting to merge constants
// globally for potentially added savings.
//
def imm8_su : PatLeaf<(i8 relocImm), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def imm16_su : PatLeaf<(i16 relocImm), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def imm32_su : PatLeaf<(i32 relocImm), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// unsigned field.
def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
def i64immZExt32SExt8 : ImmLeaf<i64, [{
return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
}]>;
// Helper fragments for loads.
// It's safe to fold a zextload/extload from i1 as a regular i8 load. The
// upper bits are guaranteed to be zero and we were going to emit a MOV8rm
// which might get folded during peephole anyway.
def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::LoadExtType ExtType = LD->getExtensionType();
return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
ExtType == ISD::ZEXTLOAD;
}]>;
// It's always safe to treat a anyext i16 load as a i32 load if the i16 is
// known to be 32-bit aligned or better. Ditto for i8 to i16.
def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
if (ExtType == ISD::EXTLOAD)
return LD->getAlignment() >= 2 && !LD->isVolatile();
return false;
}]>;
def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
if (ExtType == ISD::EXTLOAD)
return LD->getAlignment() >= 4 && !LD->isVolatile();
return false;
}]>;
def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
LoadSDNode *Ld = cast<LoadSDNode>(N);
return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
}]>;
def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
LoadSDNode *Ld = cast<LoadSDNode>(N);
return Subtarget->hasSSEUnalignedMem() ||
Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
}]>;
def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
// An 'and' node with a single use.
def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'srl' node with a single use.
def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'trunc' node with a single use.
def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
return N->hasOneUse();
}]>;
//===----------------------------------------------------------------------===//
// Instruction list.
//
// Nop
let hasSideEffects = 0, SchedRW = [WriteNop] in {
def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
"nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
"nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
"nop{q}\t$zero", []>, TB, NotMemoryFoldable,
Requires<[In64BitMode]>;
// Also allow register so we can assemble/disassemble
def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
"nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
"nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
"nop{q}\t$zero", []>, TB, NotMemoryFoldable,
Requires<[In64BitMode]>;
}
// Constructing a stack frame.
def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
"enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
let SchedRW = [WriteALU] in {
let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
Requires<[Not64BitMode]>;
let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions.
//
let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
SchedRW = [WriteSystem] in
def Int_eh_sjlj_setup_dispatch
: PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
OpSize16;
def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
OpSize32, Requires<[Not64BitMode]>;
// Long form for the disassembler.
let isCodeGenOnly = 1, ForceDisassemble = 1 in {
def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
OpSize16, NotMemoryFoldable;
def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
} // isCodeGenOnly = 1, ForceDisassemble = 1
} // mayLoad, SchedRW
let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
OpSize16;
def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
OpSize32, Requires<[Not64BitMode]>;
} // mayStore, mayLoad, SchedRW
let mayStore = 1, SchedRW = [WriteStore] in {
def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
OpSize16;
def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
OpSize32, Requires<[Not64BitMode]>;
// Long form for the disassembler.
let isCodeGenOnly = 1, ForceDisassemble = 1 in {
def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
OpSize16, NotMemoryFoldable;
def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
} // isCodeGenOnly = 1, ForceDisassemble = 1
def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
"push{w}\t$imm", []>, OpSize16;
def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
"push{w}\t$imm", []>, OpSize16;
def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
"push{l}\t$imm", []>, OpSize32,
Requires<[Not64BitMode]>;
def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
"push{l}\t$imm", []>, OpSize32,
Requires<[Not64BitMode]>;
} // mayStore, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
OpSize16;
def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
OpSize32, Requires<[Not64BitMode]>;
} // mayLoad, mayStore, SchedRW
}
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW], Defs = [ESP] in {
let Uses = [ESP] in
def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
[(set GR32:$dst, (int_x86_flags_read_u32))]>,
Requires<[Not64BitMode]>;
let Uses = [RSP] in
def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
[(set GR64:$dst, (int_x86_flags_read_u64))]>,
Requires<[In64BitMode]>;
}
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW] in {
let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
[(int_x86_flags_write_u32 GR32:$src)]>,
Requires<[Not64BitMode]>;
let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
[(int_x86_flags_write_u64 GR64:$src)]>,
Requires<[In64BitMode]>;
}
let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
SchedRW = [WriteLoad] in {
def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
Requires<[Not64BitMode]>;
}
let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
SchedRW = [WriteStore] in {
def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
Requires<[Not64BitMode]>;
}
let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
OpSize32, Requires<[In64BitMode]>;
// Long form for the disassembler.
let isCodeGenOnly = 1, ForceDisassemble = 1 in {
def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
} // isCodeGenOnly = 1, ForceDisassemble = 1
} // mayLoad, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
OpSize32, Requires<[In64BitMode]>;
let mayStore = 1, SchedRW = [WriteStore] in {
def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
OpSize32, Requires<[In64BitMode]>;
// Long form for the disassembler.
let isCodeGenOnly = 1, ForceDisassemble = 1 in {
def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
} // isCodeGenOnly = 1, ForceDisassemble = 1
} // mayStore, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
OpSize32, Requires<[In64BitMode]>;
} // mayLoad, mayStore, SchedRW
}
let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteStore] in {
def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
"push{q}\t$imm", []>, OpSize32,
Requires<[In64BitMode]>;
def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
"push{q}\t$imm", []>, OpSize32,
Requires<[In64BitMode]>;
}
let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
OpSize32, Requires<[Not64BitMode]>;
def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
OpSize16, Requires<[Not64BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
OpSize32, Requires<[Not64BitMode]>;
def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
OpSize16, Requires<[Not64BitMode]>;
}
let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
// This instruction is a consequence of BSWAP32r observing operand size. The
// encoding is valid, but the behavior is undefined.
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
"bswap{w}\t$dst", []>, OpSize16, TB;
// GR32 = bswap GR32
def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
"bswap{l}\t$dst",
[(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
let SchedRW = [WriteBSWAP64] in
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
[(set GR64:$dst, (bswap GR64:$src))]>, TB;
} // Constraints = "$src = $dst", SchedRW
// Bit scan instructions.
let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
PS, OpSize16, Sched<[WriteBSF]>;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
PS, OpSize16, Sched<[WriteBSFLd]>;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
PS, OpSize32, Sched<[WriteBSF]>;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
PS, OpSize32, Sched<[WriteBSFLd]>;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
PS, Sched<[WriteBSF]>;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
PS, Sched<[WriteBSFLd]>;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
PS, OpSize16, Sched<[WriteBSR]>;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
PS, OpSize16, Sched<[WriteBSRLd]>;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
PS, OpSize32, Sched<[WriteBSR]>;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
PS, OpSize32, Sched<[WriteBSRLd]>;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
PS, Sched<[WriteBSR]>;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
PS, Sched<[WriteBSRLd]>;
} // Defs = [EFLAGS]
let SchedRW = [WriteMicrocoded] in {
let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
"movsb\t{$src, $dst|$dst, $src}", []>;
def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
"movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
"movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
"movsq\t{$src, $dst|$dst, $src}", []>,
Requires<[In64BitMode]>;
}
let Defs = [EDI], Uses = [AL,EDI,DF] in
def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
"stosb\t{%al, $dst|$dst, al}", []>;
let Defs = [EDI], Uses = [AX,EDI,DF] in
def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
"stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
let Defs = [EDI], Uses = [EAX,EDI,DF] in
def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
"stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
let Defs = [RDI], Uses = [RAX,RDI,DF] in
def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
"stosq\t{%rax, $dst|$dst, rax}", []>,
Requires<[In64BitMode]>;
let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
"scasb\t{$dst, %al|al, $dst}", []>;
let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
"scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
"scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
"scasq\t{$dst, %rax|rax, $dst}", []>,
Requires<[In64BitMode]>;
let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
"cmpsb\t{$dst, $src|$src, $dst}", []>;
def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
"cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
"cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
"cmpsq\t{$dst, $src|$src, $dst}", []>,
Requires<[In64BitMode]>;
}
} // SchedRW
//===----------------------------------------------------------------------===//
// Move Instructions.
//
let SchedRW = [WriteMove] in {
let hasSideEffects = 0, isMoveReg = 1 in {
def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>;
def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
[(set GR8:$dst, imm:$src)]>;
def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, imm:$src)]>, OpSize16;
def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, relocImm:$src)]>, OpSize32;
def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, i64immSExt32:$src)]>;
}
let isReMaterializable = 1, isMoveImm = 1 in {
def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
"movabs{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, relocImm:$src)]>;
}
// Longer forms that use a ModR/M byte. Needed for disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>,
FoldGenData<"MOV8ri">;
def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
FoldGenData<"MOV16ri">;
def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
FoldGenData<"MOV32ri">;
}
} // SchedRW
let SchedRW = [WriteStore] in {
def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
[(store (i8 imm8_su:$src), addr:$dst)]>;
def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
[(store (i16 imm16_su:$src), addr:$dst)]>, OpSize16;
def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store (i32 imm32_su:$src), addr:$dst)]>, OpSize32;
def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(store i64immSExt32_su:$src, addr:$dst)]>,
Requires<[In64BitMode]>;
} // SchedRW
let hasSideEffects = 0 in {
/// Memory offset versions of moves. The immediate is an address mode sized
/// offset from the segment base.
let SchedRW = [WriteALU] in {
let mayLoad = 1 in {
let Defs = [AL] in
def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
"mov{b}\t{$src, %al|al, $src}", []>,
AdSize32;
let Defs = [AX] in
def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
"mov{w}\t{$src, %ax|ax, $src}", []>,
OpSize16, AdSize32;
let Defs = [EAX] in
def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
"mov{l}\t{$src, %eax|eax, $src}", []>,
OpSize32, AdSize32;
let Defs = [RAX] in
def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
"mov{q}\t{$src, %rax|rax, $src}", []>,
AdSize32;
let Defs = [AL] in
def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
"mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
let Defs = [AX] in
def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
"mov{w}\t{$src, %ax|ax, $src}", []>,
OpSize16, AdSize16;
let Defs = [EAX] in
def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
"mov{l}\t{$src, %eax|eax, $src}", []>,
AdSize16, OpSize32;
} // mayLoad
let mayStore = 1 in {
let Uses = [AL] in
def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
let Uses = [AX] in
def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", []>,
OpSize16, AdSize32;
let Uses = [EAX] in
def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", []>,
OpSize32, AdSize32;
let Uses = [RAX] in
def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
"mov{q}\t{%rax, $dst|$dst, rax}", []>,
AdSize32;
let Uses = [AL] in
def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
let Uses = [AX] in
def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", []>,
OpSize16, AdSize16;
let Uses = [EAX] in
def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", []>,
OpSize32, AdSize16;
} // mayStore
// These forms all have full 64-bit absolute addresses in their instructions
// and use the movabs mnemonic to indicate this specific form.
let mayLoad = 1 in {
let Defs = [AL] in
def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
"movabs{b}\t{$src, %al|al, $src}", []>,
AdSize64;
let Defs = [AX] in
def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
"movabs{w}\t{$src, %ax|ax, $src}", []>,
OpSize16, AdSize64;
let Defs = [EAX] in
def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
"movabs{l}\t{$src, %eax|eax, $src}", []>,
OpSize32, AdSize64;
let Defs = [RAX] in
def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
"movabs{q}\t{$src, %rax|rax, $src}", []>,
AdSize64;
} // mayLoad
let mayStore = 1 in {
let Uses = [AL] in
def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
"movabs{b}\t{%al, $dst|$dst, al}", []>,
AdSize64;
let Uses = [AX] in
def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
"movabs{w}\t{%ax, $dst|$dst, ax}", []>,
OpSize16, AdSize64;
let Uses = [EAX] in
def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
"movabs{l}\t{%eax, $dst|$dst, eax}", []>,
OpSize32, AdSize64;
let Uses = [RAX] in
def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
"movabs{q}\t{%rax, $dst|$dst, rax}", []>,
AdSize64;
} // mayStore
} // SchedRW
} // hasSideEffects = 0
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [WriteMove], isMoveReg = 1 in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>,
FoldGenData<"MOV8rr">;
def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
FoldGenData<"MOV16rr">;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
FoldGenData<"MOV32rr">;
def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>,
FoldGenData<"MOV64rr">;
}
// Reversed version with ".s" suffix for GAS compatibility.
def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
(MOV8rr_REV GR8:$dst, GR8:$src), 0>;
def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
(MOV16rr_REV GR16:$dst, GR16:$src), 0>;
def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
(MOV32rr_REV GR32:$dst, GR32:$src), 0>;
def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
(MOV64rr_REV GR64:$dst, GR64:$src), 0>;
def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
(MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
(MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
(MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
(MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
[(set GR8:$dst, (loadi8 addr:$src))]>;
def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (load addr:$src))]>;
}
let SchedRW = [WriteStore] in {
def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
[(store GR8:$src, addr:$dst)]>;
def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
[(store GR16:$src, addr:$dst)]>, OpSize16;
def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store GR32:$src, addr:$dst)]>, OpSize32;
def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(store GR64:$src, addr:$dst)]>;
} // SchedRW
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
// encoded when a REX prefix is present.
let isCodeGenOnly = 1 in {
let hasSideEffects = 0, isMoveReg = 1 in
def MOV8rr_NOREX : I<0x88, MRMDestReg,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>,
Sched<[WriteMove]>;
let mayStore = 1, hasSideEffects = 0 in
def MOV8mr_NOREX : I<0x88, MRMDestMem,
(outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>,
Sched<[WriteStore]>;
let mayLoad = 1, hasSideEffects = 0,
canFoldAsLoad = 1, isReMaterializable = 1 in
def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
(outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>,
Sched<[WriteLoad]>;
}
// Condition code ops, incl. set if equal/not equal/...
let SchedRW = [WriteLAHFSAHF] in {
let Defs = [EFLAGS], Uses = [AH] in
def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
[(set EFLAGS, (X86sahf AH))]>,
Requires<[HasLAHFSAHF]>;
let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
Requires<[HasLAHFSAHF]>;
} // SchedRW
//===----------------------------------------------------------------------===//
// Bit tests instructions: BT, BTS, BTR, BTC.
let Defs = [EFLAGS] in {
let SchedRW = [WriteBitTest] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
OpSize16, TB, NotMemoryFoldable;
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
OpSize32, TB, NotMemoryFoldable;
def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
NotMemoryFoldable;
} // SchedRW
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
// perspective, this is pretty bizarre. Make these instructions disassembly
// only for now. These instructions are also slow on modern CPUs so that's
// another reason to avoid generating them.
let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[]>, OpSize16, TB, NotMemoryFoldable;
def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[]>, OpSize32, TB, NotMemoryFoldable;
def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[]>, TB, NotMemoryFoldable;
}
let SchedRW = [WriteBitTest] in {
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
OpSize16, TB;
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>,
OpSize32, TB;
def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
} // SchedRW
// Note that these instructions aren't slow because that only applies when the
// other operand is in a register. When it's an immediate, bt is still fast.
let SchedRW = [WriteBitTestImmLd] in {
def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi16 addr:$src1),
i16immSExt8:$src2))]>,
OpSize16, TB;
def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi32 addr:$src1),
i32immSExt8:$src2))]>,
OpSize32, TB;
def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi64 addr:$src1),
i64immSExt8:$src2))]>, TB,
Requires<[In64BitMode]>;
} // SchedRW
let hasSideEffects = 0 in {
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
}
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
Requires<[In64BitMode]>;
}
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
}
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB;
def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB;
def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB;
def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB;
def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
Requires<[In64BitMode]>;
}
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, TB, NotMemoryFoldable;
def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, TB, NotMemoryFoldable;
def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
NotMemoryFoldable;
}
let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
Requires<[In64BitMode]>;
}
} // hasSideEffects = 0
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//
// Atomic support
//
// Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured.
multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
(ins GR8:$val, i8mem:$ptr),
!strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
[(set
GR8:$dst,
(!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$val, i16mem:$ptr),
!strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
[(set
GR16:$dst,
(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
OpSize16;
def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$val, i32mem:$ptr),
!strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
[(set
GR32:$dst,
(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
OpSize32;
def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),
!strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
[(set
GR64:$dst,
(!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
}
}
defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
// Swap between registers.
let SchedRW = [WriteXCHG] in {
let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
(ins GR8:$src1, GR8:$src2),
"xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
(ins GR16:$src1, GR16:$src2),
"xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
OpSize16, NotMemoryFoldable;
def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
(ins GR32:$src1, GR32:$src2),
"xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
OpSize32, NotMemoryFoldable;
def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
(ins GR64:$src1 ,GR64:$src2),
"xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
}
// Swap between EAX and other registers.
let Constraints = "$src = $dst", hasSideEffects = 0 in {
let Uses = [AX], Defs = [AX] in
def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
"xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
let Uses = [EAX], Defs = [EAX] in
def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
"xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
let Uses = [RAX], Defs = [RAX] in
def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"xchg{q}\t{$src, %rax|rax, $src}", []>;
}
} // SchedRW
let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
(ins GR8:$src1, GR8:$src2),
"xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
(ins GR16:$src1, GR16:$src2),
"xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
(ins GR32:$src1, GR32:$src2),
"xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
(ins GR64:$src1, GR64:$src2),
"xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // SchedRW
let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
(ins GR8:$val, i8mem:$ptr),
"xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$val, i16mem:$ptr),
"xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
OpSize16;
def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$val, i32mem:$ptr),
"xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
OpSize32;
def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),
"xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
}
let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
let Defs = [AL, EFLAGS], Uses = [AL] in
def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
"cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
NotMemoryFoldable;
let Defs = [AX, EFLAGS], Uses = [AX] in
def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
NotMemoryFoldable;
let Defs = [EAX, EFLAGS], Uses = [EAX] in
def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
NotMemoryFoldable;
let Defs = [RAX, EFLAGS], Uses = [RAX] in
def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
NotMemoryFoldable;
} // SchedRW, hasSideEffects
let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
hasSideEffects = 0 in {
let Defs = [AL, EFLAGS], Uses = [AL] in
def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
NotMemoryFoldable;
let Defs = [AX, EFLAGS], Uses = [AX] in
def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
NotMemoryFoldable;
let Defs = [EAX, EFLAGS], Uses = [EAX] in
def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
NotMemoryFoldable;
let Defs = [RAX, EFLAGS], Uses = [RAX] in
def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
NotMemoryFoldable;
let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
"cmpxchg8b\t$dst", []>, TB;
let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
"cmpxchg16b\t$dst", []>,
TB, Requires<[HasCmpxchg16b, In64BitMode]>;
} // SchedRW, mayLoad, mayStore, hasSideEffects
// Lock instruction prefix
let SchedRW = [WriteMicrocoded] in
def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
let SchedRW = [WriteNop] in {
// Rex64 instruction prefix
def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
Requires<[In64BitMode]>;
// Data16 instruction prefix
def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
} // SchedRW
// Repeat string operation instruction prefixes
let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
// Repeat (used with INS, OUTS, MOVS, LODS and STOS)
def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
// Repeat while not equal (used with CMPS and SCAS)
def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
}
// String manipulation instructions
let SchedRW = [WriteMicrocoded] in {
let Defs = [AL,ESI], Uses = [ESI,DF] in
def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
"lodsb\t{$src, %al|al, $src}", []>;
let Defs = [AX,ESI], Uses = [ESI,DF] in
def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
"lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
let Defs = [EAX,ESI], Uses = [ESI,DF] in
def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
"lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
let Defs = [RAX,ESI], Uses = [ESI,DF] in
def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
"lodsq\t{$src, %rax|rax, $src}", []>,
Requires<[In64BitMode]>;
}
let SchedRW = [WriteSystem] in {
let Defs = [ESI], Uses = [DX,ESI,DF] in {
def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
"outsb\t{$src, %dx|dx, $src}", []>;
def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
"outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
"outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
}
let Defs = [EDI], Uses = [DX,EDI,DF] in {
def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
"insb\t{%dx, $dst|$dst, dx}", []>;
def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
"insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
"ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
}
}
// EFLAGS management instructions.
let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
}
// DF management instructions.
let SchedRW = [WriteALU], Defs = [DF] in {
def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
}
// Table lookup instructions
let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
let SchedRW = [WriteMicrocoded] in {
// ASCII Adjust After Addition
let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
Requires<[Not64BitMode]>;
// ASCII Adjust AX Before Division
let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
"aad\t$src", []>, Requires<[Not64BitMode]>;
// ASCII Adjust AX After Multiply
let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
"aam\t$src", []>, Requires<[Not64BitMode]>;
// ASCII Adjust AL After Subtraction - sets
let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
Requires<[Not64BitMode]>;
// Decimal Adjust AL after Addition
let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
Requires<[Not64BitMode]>;
// Decimal Adjust AL after Subtraction
let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
Requires<[Not64BitMode]>;
} // SchedRW
let SchedRW = [WriteSystem] in {
// Check Array Index Against Bounds
// Note: "bound" does not have reversed operands in at&t syntax.
def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bound\t$dst, $src", []>, OpSize16,
Requires<[Not64BitMode]>;
def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bound\t$dst, $src", []>, OpSize32,
Requires<[Not64BitMode]>;
// Adjust RPL Field of Segment Selector
def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"arpl\t{$src, $dst|$dst, $src}", []>,
Requires<[Not64BitMode]>, NotMemoryFoldable;
let mayStore = 1 in
def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"arpl\t{$src, $dst|$dst, $src}", []>,
Requires<[Not64BitMode]>, NotMemoryFoldable;
} // SchedRW
//===----------------------------------------------------------------------===//
// MOVBE Instructions
//
let Predicates = [HasMOVBE] in {
let SchedRW = [WriteALULd] in {
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
OpSize16, T8PS;
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
OpSize32, T8PS;
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
T8PS;
}
let SchedRW = [WriteStore] in {
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(store (bswap GR16:$src), addr:$dst)]>,
OpSize16, T8PS;
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(store (bswap GR32:$src), addr:$dst)]>,
OpSize32, T8PS;
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(store (bswap GR64:$src), addr:$dst)]>,
T8PS;
}
}
//===----------------------------------------------------------------------===//
// RDRAND Instruction
//
let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
"rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
OpSize16, PS;
def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
"rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
OpSize32, PS;
def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
"rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
PS;
}
//===----------------------------------------------------------------------===//
// RDSEED Instruction
//
let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
}
//===----------------------------------------------------------------------===//
// LZCNT Instruction
//
let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
XS, OpSize16, Sched<[WriteLZCNT]>;
def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz (loadi16 addr:$src))),
(implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
XS, OpSize32, Sched<[WriteLZCNT]>;
def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctlz (loadi32 addr:$src))),
(implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"lzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
XS, Sched<[WriteLZCNT]>;
def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctlz (loadi64 addr:$src))),
(implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
}
//===----------------------------------------------------------------------===//
// BMI Instructions
//
let Predicates = [HasBMI], Defs = [EFLAGS] in {
def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
XS, OpSize16, Sched<[WriteTZCNT]>;
def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz (loadi16 addr:$src))),
(implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
XS, OpSize32, Sched<[WriteTZCNT]>;
def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (cttz (loadi32 addr:$src))),
(implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"tzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
XS, Sched<[WriteTZCNT]>;
def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"tzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (cttz (loadi64 addr:$src))),
(implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
}
multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
RegisterClass RC, X86MemOperand x86memop> {
let hasSideEffects = 0 in {
def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
T8PS, VEX_4V, Sched<[WriteBLS]>;
let mayLoad = 1 in
def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
T8PS, VEX_4V, Sched<[WriteBLS.Folded]>;
}
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
}
//===----------------------------------------------------------------------===//
// Pattern fragments to auto generate BMI instructions.
//===----------------------------------------------------------------------===//
def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86or_flag node:$lhs, node:$rhs), [{
return hasNoCarryFlagUses(SDValue(N, 1));
}]>;
def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86xor_flag node:$lhs, node:$rhs), [{
return hasNoCarryFlagUses(SDValue(N, 1));
}]>;
let Predicates = [HasBMI] in {
// FIXME: patterns for the load versions are not implemented
def : Pat<(and GR32:$src, (add GR32:$src, -1)),
(BLSR32rr GR32:$src)>;
def : Pat<(and GR64:$src, (add GR64:$src, -1)),
(BLSR64rr GR64:$src)>;
def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
(BLSMSK32rr GR32:$src)>;
def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
(BLSMSK64rr GR64:$src)>;
def : Pat<(and GR32:$src, (ineg GR32:$src)),
(BLSI32rr GR32:$src)>;
def : Pat<(and GR64:$src, (ineg GR64:$src)),
(BLSI64rr GR64:$src)>;
// Versions to match flag producing ops.
// X86and_flag nodes are rarely created. Those should use CMP+AND. We do
// TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
(BLSMSK32rr GR32:$src)>;
def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
(BLSMSK64rr GR64:$src)>;
}
multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, SDNode OpNode,
PatFrag ld_frag, X86FoldableSchedWrite Sched> {
def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
T8PS, VEX, Sched<[Sched]>;
def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
(implicit EFLAGS)]>, T8PS, VEX,
Sched<[Sched.Folded,
// x86memop:$src1
ReadDefault, ReadDefault, ReadDefault, ReadDefault,
ReadDefault,
// RC:$src2
Sched.ReadAfterFold]>;
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
X86bextr, loadi32, WriteBEXTR>;
defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
X86bextr, loadi64, WriteBEXTR>, VEX_W;
}
multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
PatFrag ld_frag, X86FoldableSchedWrite Sched> {
def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
T8PS, VEX, Sched<[Sched]>;
def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
(implicit EFLAGS)]>, T8PS, VEX,
Sched<[Sched.Folded,
// x86memop:$src1
ReadDefault, ReadDefault, ReadDefault, ReadDefault,
ReadDefault,
// RC:$src2
Sched.ReadAfterFold]>;
}
let Predicates = [HasBMI2], Defs = [EFLAGS] in {
defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
X86bzhi, loadi32, WriteBZHI>;
defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
X86bzhi, loadi64, WriteBZHI>, VEX_W;
}
def CountTrailingOnes : SDNodeXForm<imm, [{
// Count the trailing ones in the immediate.
return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
}]>;
def BEXTRMaskXForm : SDNodeXForm<imm, [{
unsigned Length = countTrailingOnes(N->getZExtValue());
return getI32Imm(Length << 8, SDLoc(N));
}]>;
def AndMask64 : ImmLeaf<i64, [{
return isMask_64(Imm) && !isUInt<32>(Imm);
}]>;
// Use BEXTR for 64-bit 'and' with large immediate 'mask'.
let Predicates = [HasBMI, NoBMI2, NoTBM] in {
def : Pat<(and GR64:$src, AndMask64:$mask),
(BEXTR64rr GR64:$src,
(SUBREG_TO_REG (i64 0),
(MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
(BEXTR64rm addr:$src,
(SUBREG_TO_REG (i64 0),
(MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
}
// Use BZHI for 64-bit 'and' with large immediate 'mask'.
let Predicates = [HasBMI2, NoTBM] in {
def : Pat<(and GR64:$src, AndMask64:$mask),
(BZHI64rr GR64:$src,
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
(BZHI64rm addr:$src,
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
}
multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
PatFrag ld_frag> {
def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int RC:$src1, RC:$src2))]>,
VEX_4V, Sched<[WriteALU]>;
def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))]>,
VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
}
let Predicates = [HasBMI2] in {
defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
int_x86_bmi_pdep_32, loadi32>, T8XD;
defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
int_x86_bmi_pdep_64, loadi64>, T8XD, VEX_W;
defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
int_x86_bmi_pext_32, loadi32>, T8XS;
defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
int_x86_bmi_pext_64, loadi64>, T8XS, VEX_W;
}
//===----------------------------------------------------------------------===//
// TBM Instructions
//
let Predicates = [HasTBM], Defs = [EFLAGS] in {
multiclass tbm_ternary_imm<bits<8> opc, RegisterClass RC, string OpcodeStr,
X86MemOperand x86memop, PatFrag ld_frag,
SDNode OpNode, Operand immtype,
SDPatternOperator immoperator,
X86FoldableSchedWrite Sched> {
def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
!strconcat(OpcodeStr,
"\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
[(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
XOP, XOPA, Sched<[Sched]>;
def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src1, immtype:$cntl),
!strconcat(OpcodeStr,
"\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
[(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
XOP, XOPA, Sched<[Sched.Folded]>;
}
defm BEXTRI32 : tbm_ternary_imm<0x10, GR32, "bextr{l}", i32mem, loadi32,
X86bextr, i32imm, imm, WriteBEXTR>;
let ImmT = Imm32S in
defm BEXTRI64 : tbm_ternary_imm<0x10, GR64, "bextr{q}", i64mem, loadi64,
X86bextr, i64i32imm,
i64immSExt32, WriteBEXTR>, VEX_W;
multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
RegisterClass RC, string OpcodeStr,
X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
let hasSideEffects = 0 in {
def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
XOP_4V, XOP9, Sched<[Sched]>;
let mayLoad = 1 in
def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
XOP_4V, XOP9, Sched<[Sched.Folded]>;
}
}
multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite Sched,
Format FormReg, Format FormMem> {
defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
i32mem, Sched>;
defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
i64mem, Sched>, VEX_W;
}
defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
} // HasTBM, EFLAGS
// Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
let Predicates = [HasTBM] in {
def : Pat<(and GR64:$src, AndMask64:$mask),
(BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
(BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
}
//===----------------------------------------------------------------------===//
// Lightweight Profiling Instructions
let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
[(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
[(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
[(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
[(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
multiclass lwpins_intr<RegisterClass RC> {
def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
"lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
[(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, imm:$cntl))]>,
XOP_4V, XOPA;
let mayLoad = 1 in
def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
"lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
[(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), imm:$cntl))]>,
XOP_4V, XOPA;
}
let Defs = [EFLAGS] in {
defm LWPINS32 : lwpins_intr<GR32>;
defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
} // EFLAGS
multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
"lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
[(Int RC:$src0, GR32:$src1, imm:$cntl)]>, XOP_4V, XOPA;
let mayLoad = 1 in
def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
"lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
[(Int RC:$src0, (loadi32 addr:$src1), imm:$cntl)]>,
XOP_4V, XOPA;
}
defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
} // HasLWP, SchedRW
//===----------------------------------------------------------------------===//
// MONITORX/MWAITX Instructions
//
let SchedRW = [ WriteSystem ] in {
let usesCustomInserter = 1 in {
def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
[(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
Requires<[ HasMWAITX ]>;
}
let Uses = [ EAX, ECX, EDX ] in {
def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
TB, Requires<[ HasMWAITX ]>;
}
let Uses = [ ECX, EAX, EBX ] in {
def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
[(int_x86_mwaitx ECX, EAX, EBX)]>,
TB, Requires<[ HasMWAITX ]>;
}
} // SchedRW
def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
Requires<[ Not64BitMode ]>;
def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
Requires<[ In64BitMode ]>;
def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
Requires<[ Not64BitMode ]>;
def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
Requires<[ In64BitMode ]>;
//===----------------------------------------------------------------------===//
// WAITPKG Instructions
//
let SchedRW = [WriteSystem] in {
def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
"umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
"umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
XS, AdSize32, Requires<[HasWAITPKG]>;
def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
"umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
let Uses = [EAX, EDX], Defs = [EFLAGS] in {
def UMWAIT : I<0xAE, MRM6r,
(outs), (ins GR32orGR64:$src), "umwait\t$src",
[(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
XD, Requires<[HasWAITPKG]>;
def TPAUSE : I<0xAE, MRM6r,
(outs), (ins GR32orGR64:$src), "tpause\t$src",
[(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
}
} // SchedRW
//===----------------------------------------------------------------------===//
// MOVDIRI - Move doubleword/quadword as direct store
//
let SchedRW = [WriteStore] in {
def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore32 addr:$dst, GR32:$src)]>,
T8, Requires<[HasMOVDIRI]>;
def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore64 addr:$dst, GR64:$src)]>,
T8, Requires<[In64BitMode, HasMOVDIRI]>;
} // SchedRW
//===----------------------------------------------------------------------===//
// MOVDIR64B - Move 64 bytes as direct store
//
let SchedRW = [WriteStore] in {
def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
"movdir64b\t{$src, $dst|$dst, $src}", []>,
T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR32:$dst, addr:$src)]>,
T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR64:$dst, addr:$src)]>,
T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
// CLZERO Instruction
//
let SchedRW = [WriteSystem] in {
let Uses = [EAX] in
def CLZEROr : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
TB, Requires<[HasCLZERO]>;
let usesCustomInserter = 1 in {
def CLZERO : PseudoI<(outs), (ins i32mem:$src1),
[(int_x86_clzero addr:$src1)]>, Requires<[HasCLZERO]>;
}
} // SchedRW
def : InstAlias<"clzero\t{%eax|eax}", (CLZEROr)>, Requires<[Not64BitMode]>;
def : InstAlias<"clzero\t{%rax|rax}", (CLZEROr)>, Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Pattern fragments to auto generate TBM instructions.
//===----------------------------------------------------------------------===//
let Predicates = [HasTBM] in {
// FIXME: patterns for the load versions are not implemented
def : Pat<(and GR32:$src, (add GR32:$src, 1)),
(BLCFILL32rr GR32:$src)>;
def : Pat<(and GR64:$src, (add GR64:$src, 1)),
(BLCFILL64rr GR64:$src)>;
def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
(BLCI32rr GR32:$src)>;
def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
(BLCI64rr GR64:$src)>;
// Extra patterns because opt can optimize the above patterns to this.
def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
(BLCI32rr GR32:$src)>;
def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
(BLCI64rr GR64:$src)>;
def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
(BLCIC32rr GR32:$src)>;
def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
(BLCIC64rr GR64:$src)>;
def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
(BLCMSK32rr GR32:$src)>;
def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
(BLCMSK64rr GR64:$src)>;
def : Pat<(or GR32:$src, (add GR32:$src, 1)),
(BLCS32rr GR32:$src)>;
def : Pat<(or GR64:$src, (add GR64:$src, 1)),
(BLCS64rr GR64:$src)>;
def : Pat<(or GR32:$src, (add GR32:$src, -1)),
(BLSFILL32rr GR32:$src)>;
def : Pat<(or GR64:$src, (add GR64:$src, -1)),
(BLSFILL64rr GR64:$src)>;
def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
(BLSIC32rr GR32:$src)>;
def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
(BLSIC64rr GR64:$src)>;
def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
(T1MSKC32rr GR32:$src)>;
def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
(T1MSKC64rr GR64:$src)>;
def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
(TZMSK32rr GR32:$src)>;
def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
(TZMSK64rr GR64:$src)>;
// Patterns to match flag producing ops.
// X86and_flag nodes are rarely created. Those should use CMP+AND. We do
// TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
(BLCI32rr GR32:$src)>;
def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
(BLCI64rr GR64:$src)>;
// Extra patterns because opt can optimize the above patterns to this.
def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
(BLCI32rr GR32:$src)>;
def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
(BLCI64rr GR64:$src)>;
def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
(BLCMSK32rr GR32:$src)>;
def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
(BLCMSK64rr GR64:$src)>;
def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
(BLCS32rr GR32:$src)>;
def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
(BLCS64rr GR64:$src)>;
def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
(BLSFILL32rr GR32:$src)>;
def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
(BLSFILL64rr GR64:$src)>;
def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
(BLSIC32rr GR32:$src)>;
def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
(BLSIC64rr GR64:$src)>;
def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
(T1MSKC32rr GR32:$src)>;
def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
(T1MSKC64rr GR64:$src)>;
} // HasTBM
//===----------------------------------------------------------------------===//
// Memory Instructions
//
let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
"clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
[(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
[(int_x86_cldemote addr:$src)]>, TB;
//===----------------------------------------------------------------------===//
// Subsystems.
//===----------------------------------------------------------------------===//
include "X86InstrArithmetic.td"
include "X86InstrCMovSetCC.td"
include "X86InstrExtension.td"
include "X86InstrControl.td"
include "X86InstrShiftRotate.td"
// X87 Floating Point Stack.
include "X86InstrFPStack.td"
// SIMD support (SSE, MMX and AVX)
include "X86InstrFragmentsSIMD.td"
// FMA - Fused Multiply-Add support (requires FMA)
include "X86InstrFMA.td"
// XOP
include "X86InstrXOP.td"
// SSE, MMX and 3DNow! vector support.
include "X86InstrSSE.td"
include "X86InstrAVX512.td"
include "X86InstrMMX.td"
include "X86Instr3DNow.td"
// MPX instructions
include "X86InstrMPX.td"
include "X86InstrVMX.td"
include "X86InstrSVM.td"
include "X86InstrTSX.td"
include "X86InstrSGX.td"
// System instructions.
include "X86InstrSystem.td"
// Compiler Pseudo Instructions and Pat Patterns
include "X86InstrCompiler.td"
include "X86InstrVecCompiler.td"
//===----------------------------------------------------------------------===//
// Assembler Mnemonic Aliases
//===----------------------------------------------------------------------===//
def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"cbw", "cbtw", "att">;
def : MnemonicAlias<"cwde", "cwtl", "att">;
def : MnemonicAlias<"cwd", "cwtd", "att">;
def : MnemonicAlias<"cdq", "cltd", "att">;
def : MnemonicAlias<"cdqe", "cltq", "att">;
def : MnemonicAlias<"cqo", "cqto", "att">;
// In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"loopz", "loope">;
def : MnemonicAlias<"loopnz", "loopne">;
def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
def : MnemonicAlias<"popfd", "popfl", "att">;
def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
// FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
// all modes. However: "push (addr)" and "push $42" should default to
// pushl/pushq depending on the current mode. Similar for "pop %bx"
def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushfd", "pushfl", "att">;
def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"repe", "rep">;
def : MnemonicAlias<"repz", "rep">;
def : MnemonicAlias<"repnz", "repne">;
def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
// Apply 'ret' behavior to 'retn'
def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"retn", "ret", "intel">;
def : MnemonicAlias<"sal", "shl", "intel">;
def : MnemonicAlias<"salb", "shlb", "att">;
def : MnemonicAlias<"salw", "shlw", "att">;
def : MnemonicAlias<"sall", "shll", "att">;
def : MnemonicAlias<"salq", "shlq", "att">;
def : MnemonicAlias<"smovb", "movsb", "att">;
def : MnemonicAlias<"smovw", "movsw", "att">;
def : MnemonicAlias<"smovl", "movsl", "att">;
def : MnemonicAlias<"smovq", "movsq", "att">;
def : MnemonicAlias<"ud2a", "ud2", "att">;
def : MnemonicAlias<"verrw", "verr", "att">;
// MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
def : MnemonicAlias<"acquire", "xacquire", "intel">;
def : MnemonicAlias<"release", "xrelease", "intel">;
// System instruction aliases.
def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
def : MnemonicAlias<"sysret", "sysretl", "att">;
def : MnemonicAlias<"sysexit", "sysexitl", "att">;
def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
// Floating point stack aliases.
def : MnemonicAlias<"fcmovz", "fcmove", "att">;
def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
def : MnemonicAlias<"fcomip", "fcompi">;
def : MnemonicAlias<"fildq", "fildll", "att">;
def : MnemonicAlias<"fistpq", "fistpll", "att">;
def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
def : MnemonicAlias<"fldcww", "fldcw", "att">;
def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
def : MnemonicAlias<"fucomip", "fucompi">;
def : MnemonicAlias<"fwait", "wait">;
def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
def : MnemonicAlias<"xsaveq", "xsave64", "att">;
def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
string VariantName>
: MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
!strconcat(Prefix, NewCond, Suffix), VariantName>;
/// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
/// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
/// example "setz" -> "sete".
multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
string V = ""> {
def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
}
// Aliases for set<CC>
defm : IntegerCondCodeMnemonicAlias<"set", "">;
// Aliases for j<CC>
defm : IntegerCondCodeMnemonicAlias<"j", "">;
// Aliases for cmov<CC>{w,l,q}
defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
// No size suffix for intel-style asm.
defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
//===----------------------------------------------------------------------===//
// Assembler Instruction Aliases
//===----------------------------------------------------------------------===//
// aad/aam default to base 10 if no operand is specified.
def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
// Disambiguate the mem/imm form of bt-without-a-suffix as btl.
// Likewise for btc/btr/bts.
def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
(BT32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
(BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
(BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
(BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
// clr aliases.
def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
// lods aliases. Accept the destination being omitted because it's implicit
// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
// in the destination.
def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
// stos aliases. Accept the source being omitted because it's implicit in
// the mnemonic, or the mnemonic suffix being omitted because it's implicit
// in the source.
def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
// scas aliases. Accept the destination being omitted because it's implicit
// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
// in the destination.
def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
// cmps aliases. Mnemonic suffix being omitted because it's implicit
// in the destination.
def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
// movs aliases. Mnemonic suffix being omitted because it's implicit
// in the destination.
def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
// div and idiv aliases for explicit A register.
def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
// Various unary fpstack operations default to operating on ST1.
// For example, "fxch" -> "fxch %st(1)"
def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
def : InstAlias<"fxch", (XCH_F ST1), 0>;
def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
// Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
// For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
// instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
// gas.
multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
- def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"),
- (Inst RST:$op), EmitAlias>;
- def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"),
+ def : InstAlias<!strconcat(Mnemonic, "\t$op"),
+ (Inst RSTi:$op), EmitAlias>;
+ def : InstAlias<!strconcat(Mnemonic, "\t{%st, %st|st, st}"),
(Inst ST0), EmitAlias>;
}
-defm : FpUnaryAlias<"fadd", ADD_FST0r>;
+defm : FpUnaryAlias<"fadd", ADD_FST0r, 0>;
defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
-defm : FpUnaryAlias<"fsub", SUB_FST0r>;
-defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>;
-defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
-defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>;
-defm : FpUnaryAlias<"fmul", MUL_FST0r>;
-defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
-defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
-defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>;
-defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
-defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>;
+defm : FpUnaryAlias<"fsub", SUB_FST0r, 0>;
+defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0, 0>;
+defm : FpUnaryAlias<"fsubr", SUBR_FST0r, 0>;
+defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0, 0>;
+defm : FpUnaryAlias<"fmul", MUL_FST0r, 0>;
+defm : FpUnaryAlias<"fmulp", MUL_FPrST0, 0>;
+defm : FpUnaryAlias<"fdiv", DIV_FST0r, 0>;
+defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0, 0>;
+defm : FpUnaryAlias<"fdivr", DIVR_FST0r, 0>;
+defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0, 0>;
defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
-defm : FpUnaryAlias<"fcompi", COM_FIPr>;
-defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
+defm : FpUnaryAlias<"fcompi", COM_FIPr, 0>;
+defm : FpUnaryAlias<"fucompi", UCOM_FIPr, 0>;
-// Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
+// Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
// commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
// solely because gas supports it.
-def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>;
-def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>;
-def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>;
-def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>;
-def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>;
-def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>;
+def : InstAlias<"faddp\t{$op, %st|st, $op}", (ADD_FPrST0 RSTi:$op), 0>;
+def : InstAlias<"fmulp\t{$op, %st|st, $op}", (MUL_FPrST0 RSTi:$op), 0>;
+def : InstAlias<"fsub{|r}p\t{$op, %st|st, $op}", (SUBR_FPrST0 RSTi:$op), 0>;
+def : InstAlias<"fsub{r|}p\t{$op, %st|st, $op}", (SUB_FPrST0 RSTi:$op), 0>;
+def : InstAlias<"fdiv{|r}p\t{$op, %st|st, $op}", (DIVR_FPrST0 RSTi:$op), 0>;
+def : InstAlias<"fdiv{r|}p\t{$op, %st|st, $op}", (DIV_FPrST0 RSTi:$op), 0>;
def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
// this is compatible with what GAS does.
def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
// "imul <imm>, B" is an alias for "imul <imm>, B, B".
def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
// ins aliases. Accept the mnemonic suffix being omitted because it's implicit
// in the destination.
def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
// outs aliases. Accept the mnemonic suffix being omitted because it's implicit
// in the source.
def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
// inb %dx -> inb %al, %dx
def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
// Match 'movq <largeimm>, <reg>' as an alias for movabsq.
def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
// Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
// which supports this due to an old AMD documentation bug when 64-bit mode was
// created.
def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
(MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
(MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
// movsx aliases
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
// movzx aliases
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
// Note: No GR32->GR64 movzx form.
// outb %dx -> outb %al, %dx
def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
// 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
// effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
// errors, since its encoding is the most compact.
def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
// shld/shrd op,op -> shld op, op, CL
def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
/* FIXME: This is disabled because the asm matcher is currently incapable of
* matching a fixed immediate like $1.
// "shl X, $1" is an alias for "shl X".
multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
(!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
}
defm : ShiftRotateByOneAlias<"rcl", "RCL">;
defm : ShiftRotateByOneAlias<"rcr", "RCR">;
defm : ShiftRotateByOneAlias<"rol", "ROL">;
defm : ShiftRotateByOneAlias<"ror", "ROR">;
FIXME */
// test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
(TEST8mr i8mem :$mem, GR8 :$val), 0>;
def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
(TEST16mr i16mem:$mem, GR16:$val), 0>;
def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
(TEST32mr i32mem:$mem, GR32:$val), 0>;
def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
(TEST64mr i64mem:$mem, GR64:$val), 0>;
// xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
(XCHG8rm GR8 :$val, i8mem :$mem), 0>;
def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
(XCHG16rm GR16:$val, i16mem:$mem), 0>;
def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
(XCHG32rm GR32:$val, i32mem:$mem), 0>;
def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
(XCHG64rm GR64:$val, i64mem:$mem), 0>;
// xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
// In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
// would get by default because it's defined as NOP. But xchg %eax, %eax implies
// implicit zeroing of the upper 32 bits. So alias to the longer encoding.
def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
(XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
// xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
// we emit an unneeded REX.w prefix.
def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
// These aliases exist to get the parser to prioritize matching 8-bit
// immediate encodings over matching the implicit ax/eax/rax encodings. By
// explicitly mentioning the A register here, these entries will be ordered
// first due to the more explicit immediate type.
def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;
Index: vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.cpp (revision 344166)
@@ -1,762 +1,765 @@
//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetRegisterInfo class.
// This file is responsible for the frame pointer elimination optimization
// on X86.
//
//===----------------------------------------------------------------------===//
#include "X86RegisterInfo.h"
#include "X86FrameLowering.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "X86GenRegisterInfo.inc"
static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true),
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
X86_MC::initLLVMToSEHAndCVRegMapping(this);
// Cache some information.
Is64Bit = TT.isArch64Bit();
IsWin64 = Is64Bit && TT.isOSWindows();
// Use a callee-saved register as the base pointer. These registers must
// not conflict with any ABI requirements. For example, in 32-bit mode PIC
// requires GOT in the EBX register before function calls via PLT GOT pointer.
if (Is64Bit) {
SlotSize = 8;
// This matches the simplified 32-bit pointer code in the data layout
// computation.
// FIXME: Should use the data layout?
bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
} else {
SlotSize = 4;
StackPtr = X86::ESP;
FramePtr = X86::EBP;
BasePtr = X86::ESI;
}
}
bool
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
// ExecutionDomainFix, BreakFalseDeps and PostRAScheduler require liveness.
return true;
}
int
X86RegisterInfo::getSEHRegNum(unsigned i) const {
return getEncodingValue(i);
}
const TargetRegisterClass *
X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
unsigned Idx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
// It behaves just like the sub_8bit_hi index.
if (!Is64Bit && Idx == X86::sub_8bit)
Idx = X86::sub_8bit_hi;
// Forward to TableGen's default version.
return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
}
const TargetRegisterClass *
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B,
unsigned SubIdx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
if (!Is64Bit && SubIdx == X86::sub_8bit) {
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
if (!A)
return nullptr;
}
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
}
const TargetRegisterClass *
X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &MF) const {
// Don't allow super-classes of GR8_NOREX. This class is only used after
// extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
// to the full GR8 register class in 64-bit mode, so we cannot allow the
// reigster class inflation.
//
// The GR8_NOREX class is always used in a way that won't be constrained to a
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
// full GR8 class.
if (RC == &X86::GR8_NOREXRegClass)
return RC;
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
const TargetRegisterClass *Super = RC;
TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
do {
switch (Super->getID()) {
case X86::FR32RegClassID:
case X86::FR64RegClassID:
// If AVX-512 isn't supported we should only inflate to these classes.
if (!Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128RegClassID:
case X86::VR256RegClassID:
// If VLX isn't supported we should only inflate to these classes.
if (!Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128XRegClassID:
case X86::VR256XRegClassID:
// If VLX isn't support we shouldn't inflate to these classes.
if (Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::FR32XRegClassID:
case X86::FR64XRegClassID:
// If AVX-512 isn't support we shouldn't inflate to these classes.
if (Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::GR8RegClassID:
case X86::GR16RegClassID:
case X86::GR32RegClassID:
case X86::GR64RegClassID:
case X86::RFP32RegClassID:
case X86::RFP64RegClassID:
case X86::RFP80RegClassID:
case X86::VR512RegClassID:
// Don't return a super-class that would shrink the spill size.
// That can happen with the vector and float classes.
if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
}
Super = *I++;
} while (Super);
return RC;
}
const TargetRegisterClass *
X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
switch (Kind) {
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
case 0: // Normal GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64RegClass;
// If the target is 64bit but we have been told to use 32bit addresses,
// we can still use 64-bit register as long as we know the high bits
// are zeros.
// Reflect that in the returned register class.
if (Is64Bit) {
// When the target also allows 64-bit frame pointer and we do have a
// frame, this is fine to use it for the address accesses as well.
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
? &X86::LOW32_ADDR_ACCESS_RBPRegClass
: &X86::LOW32_ADDR_ACCESSRegClass;
}
return &X86::GR32RegClass;
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOSPRegClass;
// NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOSPRegClass;
case 2: // NOREX GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREXRegClass;
return &X86::GR32_NOREXRegClass;
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREX_NOSPRegClass;
// NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOREX_NOSPRegClass;
case 4: // Available for tailcall (not callee-saved GPRs).
return getGPRsForTailCall(MF);
}
}
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
const Function &F = MF.getFunction();
if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
}
const TargetRegisterClass *
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &X86::CCRRegClass) {
if (Is64Bit)
return &X86::GR64RegClass;
else
return &X86::GR32RegClass;
}
return RC;
}
unsigned
X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
switch (RC->getID()) {
default:
return 0;
case X86::GR32RegClassID:
return 4 - FPDiff;
case X86::GR64RegClassID:
return 12 - FPDiff;
case X86::VR128RegClassID:
return Is64Bit ? 10 : 4;
case X86::VR64RegClassID:
return 4;
}
}
const MCPhysReg *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "MachineFunction required");
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
const Function &F = MF->getFunction();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->callsEHReturn();
CallingConv::ID CC = F.getCallingConv();
// If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
// convention because it has the CSR list.
if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
CC = CallingConv::X86_INTR;
switch (CC) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_SaveList;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
return CSR_64_AllRegs_SaveList;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_SaveList;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_SaveList;
return CSR_64_RT_AllRegs_SaveList;
case CallingConv::CXX_FAST_TLS:
if (Is64Bit)
return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
break;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_SaveList;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_SaveList;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
return (HasSSE ? CSR_Win64_RegCall_SaveList :
CSR_Win64_RegCall_NoSSE_SaveList);
} else {
return (HasSSE ? CSR_SysV64_RegCall_SaveList :
CSR_SysV64_RegCall_NoSSE_SaveList);
}
} else {
return (HasSSE ? CSR_32_RegCall_SaveList :
CSR_32_RegCall_NoSSE_SaveList);
}
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_SaveList;
break;
case CallingConv::Win64:
if (!HasSSE)
return CSR_Win64_NoSSE_SaveList;
return CSR_Win64_SaveList;
case CallingConv::X86_64_SysV:
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
case CallingConv::X86_INTR:
if (Is64Bit) {
if (HasAVX512)
return CSR_64_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_64_AllRegs_SaveList;
return CSR_64_AllRegs_NoSSE_SaveList;
} else {
if (HasAVX512)
return CSR_32_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_32_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_32_AllRegs_SSE_SaveList;
return CSR_32_AllRegs_SaveList;
}
default:
break;
}
if (Is64Bit) {
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_SaveList
: CSR_64_SwiftError_SaveList;
if (IsWin64)
return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
}
return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
}
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
}
const uint32_t *
X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
switch (CC) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_RegMask;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
return CSR_64_AllRegs_RegMask;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_RegMask;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_RegMask;
return CSR_64_RT_AllRegs_RegMask;
case CallingConv::CXX_FAST_TLS:
if (Is64Bit)
return CSR_64_TLS_Darwin_RegMask;
break;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_RegMask;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_RegMask;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
return (HasSSE ? CSR_Win64_RegCall_RegMask :
CSR_Win64_RegCall_NoSSE_RegMask);
} else {
return (HasSSE ? CSR_SysV64_RegCall_RegMask :
CSR_SysV64_RegCall_NoSSE_RegMask);
}
} else {
return (HasSSE ? CSR_32_RegCall_RegMask :
CSR_32_RegCall_NoSSE_RegMask);
}
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
break;
case CallingConv::Win64:
return CSR_Win64_RegMask;
case CallingConv::X86_64_SysV:
return CSR_64_RegMask;
case CallingConv::X86_INTR:
if (Is64Bit) {
if (HasAVX512)
return CSR_64_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_64_AllRegs_RegMask;
return CSR_64_AllRegs_NoSSE_RegMask;
} else {
if (HasAVX512)
return CSR_32_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_32_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_32_AllRegs_SSE_RegMask;
return CSR_32_AllRegs_RegMask;
}
default:
break;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
const Function &F = MF.getFunction();
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
}
return CSR_32_RegMask;
}
const uint32_t*
X86RegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
return CSR_64_TLS_Darwin_RegMask;
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const X86FrameLowering *TFI = getFrameLowering(MF);
+ // Set the floating point control register as reserved.
+ Reserved.set(X86::FPCW);
+
// Set the stack-pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the Shadow Stack Pointer as reserved.
Reserved.set(X86::SSP);
// Set the instruction pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the frame-pointer register and its aliases as reserved if needed.
if (TFI->hasFP(MF)) {
for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
}
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
CallingConv::ID CC = MF.getFunction().getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
"Stack realignment in presence of dynamic allocas is not supported with"
"this calling convention.");
unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
I.isValid(); ++I)
Reserved.set(*I);
}
// Mark the segment registers as reserved.
Reserved.set(X86::CS);
Reserved.set(X86::SS);
Reserved.set(X86::DS);
Reserved.set(X86::ES);
Reserved.set(X86::FS);
Reserved.set(X86::GS);
// Mark the floating point stack registers as reserved.
for (unsigned n = 0; n != 8; ++n)
Reserved.set(X86::ST0 + n);
// Reserve the registers that only exist in 64-bit mode.
if (!Is64Bit) {
// These 8-bit registers are part of the x86-64 extension even though their
// super-registers are old 32-bits.
Reserved.set(X86::SIL);
Reserved.set(X86::DIL);
Reserved.set(X86::BPL);
Reserved.set(X86::SPL);
Reserved.set(X86::SIH);
Reserved.set(X86::DIH);
Reserved.set(X86::BPH);
Reserved.set(X86::SPH);
for (unsigned n = 0; n != 8; ++n) {
// R8, R9, ...
for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
// XMM8, XMM9, ...
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
for (unsigned n = 16; n != 32; ++n) {
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
assert(checkAllSuperRegsMarked(Reserved,
{X86::SIL, X86::DIL, X86::BPL, X86::SPL,
X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
return Reserved;
}
void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
// Check if the EFLAGS register is marked as live-out. This shouldn't happen,
// because the calling convention defines the EFLAGS register as NOT
// preserved.
//
// Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
// an assert to track this and clear the register afterwards to avoid
// unnecessary crashes during release builds.
assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
"EFLAGS are not live-out from a patchpoint.");
// Also clean other registers that don't need preserving (IP).
for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
Mask[Reg / 32] &= ~(1U << (Reg % 32));
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
static bool CantUseSP(const MachineFrameInfo &MFI) {
return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
}
bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (!EnableBasePointer)
return false;
// When we need stack realignment, we can't address the stack from the frame
// pointer. When we have dynamic allocas or stack-adjusting inline asm, we
// can't address variables from the stack pointer. MS inline asm can
// reference locals while also adjusting the stack pointer. When we can't
// use both the SP and the FP, we need a separate base pointer register.
bool CantUseFP = needsStackRealignment(MF);
return CantUseFP && CantUseSP(MFI);
}
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
if (!TargetRegisterInfo::canRealignStack(MF))
return false;
const MachineFrameInfo &MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
// Stack realignment requires a frame pointer. If we already started
// register allocation with frame pointer elimination, it is too late now.
if (!MRI->canReserveReg(FramePtr))
return false;
// If a base pointer is necessary. Check that it isn't too late to reserve
// it.
if (CantUseSP(MFI))
return MRI->canReserveReg(BasePtr);
return true;
}
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
unsigned Reg, int &FrameIdx) const {
// Since X86 defines assignCalleeSavedSpillSlots which always return true
// this function neither used nor tested.
llvm_unreachable("Unused function on X86. Otherwise need a test case.");
}
// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
// TODO: In this case we should be really trying first to entirely eliminate
// this instruction which is a plain copy.
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
MachineInstr &MI = *II;
unsigned Opc = II->getOpcode();
// Check if this is a LEA of the form 'lea (%esp), %ebx'
if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
MI.getOperand(2).getImm() != 1 ||
MI.getOperand(3).getReg() != X86::NoRegister ||
MI.getOperand(4).getImm() != 0 ||
MI.getOperand(5).getReg() != X86::NoRegister)
return false;
unsigned BasePtr = MI.getOperand(1).getReg();
// In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
// be replaced with a 32-bit operand MOV which will zero extend the upper
// 32-bits of the super register.
if (Opc == X86::LEA64_32r)
BasePtr = getX86SubSuperRegister(BasePtr, 32);
unsigned NewDestReg = MI.getOperand(0).getReg();
const X86InstrInfo *TII =
MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
MI.getOperand(1).isKill());
MI.eraseFromParent();
return true;
}
void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
const X86FrameLowering *TFI = getFrameLowering(MF);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
// Determine base register and offset.
int FIOffset;
unsigned BasePtr;
if (MI.isReturn()) {
assert((!needsStackRealignment(MF) ||
MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
"Return instruction can only reference SP relative frame objects");
FIOffset = TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0);
} else {
FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr);
}
// LOCAL_ESCAPE uses a single offset, with no register. It only works in the
// simple FP case, and doesn't work with stack realignment. On 32-bit, the
// offset is from the traditional base pointer location. On 64-bit, the
// offset is from the SP at the end of the prologue, not the FP location. This
// matches the behavior of llvm.frameaddress.
unsigned Opc = MI.getOpcode();
if (Opc == TargetOpcode::LOCAL_ESCAPE) {
MachineOperand &FI = MI.getOperand(FIOperandNum);
FI.ChangeToImmediate(FIOffset);
return;
}
// For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
// register as source operand, semantic is the same and destination is
// 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
// Don't change BasePtr since it is used later for stack adjustment.
unsigned MachineBasePtr = BasePtr;
if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
// This must be part of a four operand memory reference. Replace the
// FrameIndex with base register. Add an offset to the offset.
MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
if (BasePtr == StackPtr)
FIOffset += SPAdj;
// The frame index format for stackmaps and patchpoints is different from the
// X86 format. It only has a FI and an offset.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
assert(BasePtr == FramePtr && "Expected the FP as base register");
int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
}
if (MI.getOperand(FIOperandNum+3).isImm()) {
// Offset is a 32-bit integer.
int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
int Offset = FIOffset + Imm;
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
"Requesting 64-bit offset in 32-bit immediate!");
if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
} else {
// Offset is symbolic. This is extremely rare.
uint64_t Offset = FIOffset +
(uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
MI.getOperand(FIOperandNum + 3).setOffset(Offset);
}
}
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
unsigned
X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
unsigned FrameReg = getFrameRegister(MF);
if (Subtarget.isTarget64BitILP32())
FrameReg = getX86SubSuperRegister(FrameReg, 32);
return FrameReg;
}
Index: vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.td
===================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.td (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86RegisterInfo.td (revision 344166)
@@ -1,608 +1,614 @@
//===- X86RegisterInfo.td - Describe the X86 Register File --*- tablegen -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 Register file, defining the registers themselves,
// aliases between the registers, and the register classes built out of the
// registers.
//
//===----------------------------------------------------------------------===//
class X86Reg<string n, bits<16> Enc, list<Register> subregs = []> : Register<n> {
let Namespace = "X86";
let HWEncoding = Enc;
let SubRegs = subregs;
}
// Subregister indices.
let Namespace = "X86" in {
def sub_8bit : SubRegIndex<8>;
def sub_8bit_hi : SubRegIndex<8, 8>;
def sub_8bit_hi_phony : SubRegIndex<8, 8>;
def sub_16bit : SubRegIndex<16>;
def sub_16bit_hi : SubRegIndex<16, 16>;
def sub_32bit : SubRegIndex<32>;
def sub_xmm : SubRegIndex<128>;
def sub_ymm : SubRegIndex<256>;
}
//===----------------------------------------------------------------------===//
// Register definitions...
//
// In the register alias definitions below, we define which registers alias
// which others. We only specify which registers the small registers alias,
// because the register file generator is smart enough to figure out that
// AL aliases AX if we tell it that AX aliased AL (for example).
// Dwarf numbering is different for 32-bit and 64-bit, and there are
// variations by target as well. Currently the first entry is for X86-64,
// second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux
// and debug information on X86-32/Darwin)
// 8-bit registers
// Low registers
def AL : X86Reg<"al", 0>;
def DL : X86Reg<"dl", 2>;
def CL : X86Reg<"cl", 1>;
def BL : X86Reg<"bl", 3>;
// High registers. On x86-64, these cannot be used in any instruction
// with a REX prefix.
def AH : X86Reg<"ah", 4>;
def DH : X86Reg<"dh", 6>;
def CH : X86Reg<"ch", 5>;
def BH : X86Reg<"bh", 7>;
// X86-64 only, requires REX.
let CostPerUse = 1 in {
def SIL : X86Reg<"sil", 6>;
def DIL : X86Reg<"dil", 7>;
def BPL : X86Reg<"bpl", 5>;
def SPL : X86Reg<"spl", 4>;
def R8B : X86Reg<"r8b", 8>;
def R9B : X86Reg<"r9b", 9>;
def R10B : X86Reg<"r10b", 10>;
def R11B : X86Reg<"r11b", 11>;
def R12B : X86Reg<"r12b", 12>;
def R13B : X86Reg<"r13b", 13>;
def R14B : X86Reg<"r14b", 14>;
def R15B : X86Reg<"r15b", 15>;
}
let isArtificial = 1 in {
// High byte of the low 16 bits of the super-register:
def SIH : X86Reg<"", -1>;
def DIH : X86Reg<"", -1>;
def BPH : X86Reg<"", -1>;
def SPH : X86Reg<"", -1>;
def R8BH : X86Reg<"", -1>;
def R9BH : X86Reg<"", -1>;
def R10BH : X86Reg<"", -1>;
def R11BH : X86Reg<"", -1>;
def R12BH : X86Reg<"", -1>;
def R13BH : X86Reg<"", -1>;
def R14BH : X86Reg<"", -1>;
def R15BH : X86Reg<"", -1>;
// High word of the low 32 bits of the super-register:
def HAX : X86Reg<"", -1>;
def HDX : X86Reg<"", -1>;
def HCX : X86Reg<"", -1>;
def HBX : X86Reg<"", -1>;
def HSI : X86Reg<"", -1>;
def HDI : X86Reg<"", -1>;
def HBP : X86Reg<"", -1>;
def HSP : X86Reg<"", -1>;
def HIP : X86Reg<"", -1>;
def R8WH : X86Reg<"", -1>;
def R9WH : X86Reg<"", -1>;
def R10WH : X86Reg<"", -1>;
def R11WH : X86Reg<"", -1>;
def R12WH : X86Reg<"", -1>;
def R13WH : X86Reg<"", -1>;
def R14WH : X86Reg<"", -1>;
def R15WH : X86Reg<"", -1>;
}
// 16-bit registers
let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in {
def AX : X86Reg<"ax", 0, [AL,AH]>;
def DX : X86Reg<"dx", 2, [DL,DH]>;
def CX : X86Reg<"cx", 1, [CL,CH]>;
def BX : X86Reg<"bx", 3, [BL,BH]>;
}
let SubRegIndices = [sub_8bit, sub_8bit_hi_phony], CoveredBySubRegs = 1 in {
def SI : X86Reg<"si", 6, [SIL,SIH]>;
def DI : X86Reg<"di", 7, [DIL,DIH]>;
def BP : X86Reg<"bp", 5, [BPL,BPH]>;
def SP : X86Reg<"sp", 4, [SPL,SPH]>;
}
def IP : X86Reg<"ip", 0>;
// X86-64 only, requires REX.
let SubRegIndices = [sub_8bit, sub_8bit_hi_phony], CostPerUse = 1,
CoveredBySubRegs = 1 in {
def R8W : X86Reg<"r8w", 8, [R8B,R8BH]>;
def R9W : X86Reg<"r9w", 9, [R9B,R9BH]>;
def R10W : X86Reg<"r10w", 10, [R10B,R10BH]>;
def R11W : X86Reg<"r11w", 11, [R11B,R11BH]>;
def R12W : X86Reg<"r12w", 12, [R12B,R12BH]>;
def R13W : X86Reg<"r13w", 13, [R13B,R13BH]>;
def R14W : X86Reg<"r14w", 14, [R14B,R14BH]>;
def R15W : X86Reg<"r15w", 15, [R15B,R15BH]>;
}
// 32-bit registers
let SubRegIndices = [sub_16bit, sub_16bit_hi], CoveredBySubRegs = 1 in {
def EAX : X86Reg<"eax", 0, [AX, HAX]>, DwarfRegNum<[-2, 0, 0]>;
def EDX : X86Reg<"edx", 2, [DX, HDX]>, DwarfRegNum<[-2, 2, 2]>;
def ECX : X86Reg<"ecx", 1, [CX, HCX]>, DwarfRegNum<[-2, 1, 1]>;
def EBX : X86Reg<"ebx", 3, [BX, HBX]>, DwarfRegNum<[-2, 3, 3]>;
def ESI : X86Reg<"esi", 6, [SI, HSI]>, DwarfRegNum<[-2, 6, 6]>;
def EDI : X86Reg<"edi", 7, [DI, HDI]>, DwarfRegNum<[-2, 7, 7]>;
def EBP : X86Reg<"ebp", 5, [BP, HBP]>, DwarfRegNum<[-2, 4, 5]>;
def ESP : X86Reg<"esp", 4, [SP, HSP]>, DwarfRegNum<[-2, 5, 4]>;
def EIP : X86Reg<"eip", 0, [IP, HIP]>, DwarfRegNum<[-2, 8, 8]>;
}
// X86-64 only, requires REX
let SubRegIndices = [sub_16bit, sub_16bit_hi], CostPerUse = 1,
CoveredBySubRegs = 1 in {
def R8D : X86Reg<"r8d", 8, [R8W,R8WH]>;
def R9D : X86Reg<"r9d", 9, [R9W,R9WH]>;
def R10D : X86Reg<"r10d", 10, [R10W,R10WH]>;
def R11D : X86Reg<"r11d", 11, [R11W,R11WH]>;
def R12D : X86Reg<"r12d", 12, [R12W,R12WH]>;
def R13D : X86Reg<"r13d", 13, [R13W,R13WH]>;
def R14D : X86Reg<"r14d", 14, [R14W,R14WH]>;
def R15D : X86Reg<"r15d", 15, [R15W,R15WH]>;
}
// 64-bit registers, X86-64 only
let SubRegIndices = [sub_32bit] in {
def RAX : X86Reg<"rax", 0, [EAX]>, DwarfRegNum<[0, -2, -2]>;
def RDX : X86Reg<"rdx", 2, [EDX]>, DwarfRegNum<[1, -2, -2]>;
def RCX : X86Reg<"rcx", 1, [ECX]>, DwarfRegNum<[2, -2, -2]>;
def RBX : X86Reg<"rbx", 3, [EBX]>, DwarfRegNum<[3, -2, -2]>;
def RSI : X86Reg<"rsi", 6, [ESI]>, DwarfRegNum<[4, -2, -2]>;
def RDI : X86Reg<"rdi", 7, [EDI]>, DwarfRegNum<[5, -2, -2]>;
def RBP : X86Reg<"rbp", 5, [EBP]>, DwarfRegNum<[6, -2, -2]>;
def RSP : X86Reg<"rsp", 4, [ESP]>, DwarfRegNum<[7, -2, -2]>;
// These also require REX.
let CostPerUse = 1 in {
def R8 : X86Reg<"r8", 8, [R8D]>, DwarfRegNum<[ 8, -2, -2]>;
def R9 : X86Reg<"r9", 9, [R9D]>, DwarfRegNum<[ 9, -2, -2]>;
def R10 : X86Reg<"r10", 10, [R10D]>, DwarfRegNum<[10, -2, -2]>;
def R11 : X86Reg<"r11", 11, [R11D]>, DwarfRegNum<[11, -2, -2]>;
def R12 : X86Reg<"r12", 12, [R12D]>, DwarfRegNum<[12, -2, -2]>;
def R13 : X86Reg<"r13", 13, [R13D]>, DwarfRegNum<[13, -2, -2]>;
def R14 : X86Reg<"r14", 14, [R14D]>, DwarfRegNum<[14, -2, -2]>;
def R15 : X86Reg<"r15", 15, [R15D]>, DwarfRegNum<[15, -2, -2]>;
def RIP : X86Reg<"rip", 0, [EIP]>, DwarfRegNum<[16, -2, -2]>;
}}
// MMX Registers. These are actually aliased to ST0 .. ST7
def MM0 : X86Reg<"mm0", 0>, DwarfRegNum<[41, 29, 29]>;
def MM1 : X86Reg<"mm1", 1>, DwarfRegNum<[42, 30, 30]>;
def MM2 : X86Reg<"mm2", 2>, DwarfRegNum<[43, 31, 31]>;
def MM3 : X86Reg<"mm3", 3>, DwarfRegNum<[44, 32, 32]>;
def MM4 : X86Reg<"mm4", 4>, DwarfRegNum<[45, 33, 33]>;
def MM5 : X86Reg<"mm5", 5>, DwarfRegNum<[46, 34, 34]>;
def MM6 : X86Reg<"mm6", 6>, DwarfRegNum<[47, 35, 35]>;
def MM7 : X86Reg<"mm7", 7>, DwarfRegNum<[48, 36, 36]>;
// Pseudo Floating Point registers
def FP0 : X86Reg<"fp0", 0>;
def FP1 : X86Reg<"fp1", 0>;
def FP2 : X86Reg<"fp2", 0>;
def FP3 : X86Reg<"fp3", 0>;
def FP4 : X86Reg<"fp4", 0>;
def FP5 : X86Reg<"fp5", 0>;
def FP6 : X86Reg<"fp6", 0>;
def FP7 : X86Reg<"fp7", 0>;
// XMM Registers, used by the various SSE instruction set extensions.
def XMM0: X86Reg<"xmm0", 0>, DwarfRegNum<[17, 21, 21]>;
def XMM1: X86Reg<"xmm1", 1>, DwarfRegNum<[18, 22, 22]>;
def XMM2: X86Reg<"xmm2", 2>, DwarfRegNum<[19, 23, 23]>;
def XMM3: X86Reg<"xmm3", 3>, DwarfRegNum<[20, 24, 24]>;
def XMM4: X86Reg<"xmm4", 4>, DwarfRegNum<[21, 25, 25]>;
def XMM5: X86Reg<"xmm5", 5>, DwarfRegNum<[22, 26, 26]>;
def XMM6: X86Reg<"xmm6", 6>, DwarfRegNum<[23, 27, 27]>;
def XMM7: X86Reg<"xmm7", 7>, DwarfRegNum<[24, 28, 28]>;
// X86-64 only
let CostPerUse = 1 in {
def XMM8: X86Reg<"xmm8", 8>, DwarfRegNum<[25, -2, -2]>;
def XMM9: X86Reg<"xmm9", 9>, DwarfRegNum<[26, -2, -2]>;
def XMM10: X86Reg<"xmm10", 10>, DwarfRegNum<[27, -2, -2]>;
def XMM11: X86Reg<"xmm11", 11>, DwarfRegNum<[28, -2, -2]>;
def XMM12: X86Reg<"xmm12", 12>, DwarfRegNum<[29, -2, -2]>;
def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>;
def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>;
def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>;
def XMM16: X86Reg<"xmm16", 16>, DwarfRegNum<[67, -2, -2]>;
def XMM17: X86Reg<"xmm17", 17>, DwarfRegNum<[68, -2, -2]>;
def XMM18: X86Reg<"xmm18", 18>, DwarfRegNum<[69, -2, -2]>;
def XMM19: X86Reg<"xmm19", 19>, DwarfRegNum<[70, -2, -2]>;
def XMM20: X86Reg<"xmm20", 20>, DwarfRegNum<[71, -2, -2]>;
def XMM21: X86Reg<"xmm21", 21>, DwarfRegNum<[72, -2, -2]>;
def XMM22: X86Reg<"xmm22", 22>, DwarfRegNum<[73, -2, -2]>;
def XMM23: X86Reg<"xmm23", 23>, DwarfRegNum<[74, -2, -2]>;
def XMM24: X86Reg<"xmm24", 24>, DwarfRegNum<[75, -2, -2]>;
def XMM25: X86Reg<"xmm25", 25>, DwarfRegNum<[76, -2, -2]>;
def XMM26: X86Reg<"xmm26", 26>, DwarfRegNum<[77, -2, -2]>;
def XMM27: X86Reg<"xmm27", 27>, DwarfRegNum<[78, -2, -2]>;
def XMM28: X86Reg<"xmm28", 28>, DwarfRegNum<[79, -2, -2]>;
def XMM29: X86Reg<"xmm29", 29>, DwarfRegNum<[80, -2, -2]>;
def XMM30: X86Reg<"xmm30", 30>, DwarfRegNum<[81, -2, -2]>;
def XMM31: X86Reg<"xmm31", 31>, DwarfRegNum<[82, -2, -2]>;
} // CostPerUse
// YMM0-15 registers, used by AVX instructions and
// YMM16-31 registers, used by AVX-512 instructions.
let SubRegIndices = [sub_xmm] in {
foreach Index = 0-31 in {
def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
// ZMM Registers, used by AVX-512 instructions.
let SubRegIndices = [sub_ymm] in {
foreach Index = 0-31 in {
def ZMM#Index : X86Reg<"zmm"#Index, Index, [!cast<X86Reg>("YMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
// Mask Registers, used by AVX-512 instructions.
def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, 95, 95]>;
def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, 96, 96]>;
def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
// Floating point stack registers. These don't map one-to-one to the FP
// pseudo registers, but we still mark them as aliasing FP registers. That
// way both kinds can be live without exceeding the stack depth. ST registers
// are only live around inline assembly.
-def ST0 : X86Reg<"st(0)", 0>, DwarfRegNum<[33, 12, 11]>;
+def ST0 : X86Reg<"st", 0>, DwarfRegNum<[33, 12, 11]>;
def ST1 : X86Reg<"st(1)", 1>, DwarfRegNum<[34, 13, 12]>;
def ST2 : X86Reg<"st(2)", 2>, DwarfRegNum<[35, 14, 13]>;
def ST3 : X86Reg<"st(3)", 3>, DwarfRegNum<[36, 15, 14]>;
def ST4 : X86Reg<"st(4)", 4>, DwarfRegNum<[37, 16, 15]>;
def ST5 : X86Reg<"st(5)", 5>, DwarfRegNum<[38, 17, 16]>;
def ST6 : X86Reg<"st(6)", 6>, DwarfRegNum<[39, 18, 17]>;
def ST7 : X86Reg<"st(7)", 7>, DwarfRegNum<[40, 19, 18]>;
// Floating-point status word
-def FPSW : X86Reg<"fpsw", 0>;
+def FPSW : X86Reg<"fpsr", 0>;
+// Floating-point control word
+def FPCW : X86Reg<"fpcr", 0>;
+
// Status flags register.
//
// Note that some flags that are commonly thought of as part of the status
// flags register are modeled separately. Typically this is due to instructions
// reading and updating those flags independently of all the others. We don't
// want to create false dependencies between these instructions and so we use
// a separate register to model them.
def EFLAGS : X86Reg<"flags", 0>;
// The direction flag.
def DF : X86Reg<"dirflag", 0>;
// Segment registers
def CS : X86Reg<"cs", 1>;
def DS : X86Reg<"ds", 3>;
def SS : X86Reg<"ss", 2>;
def ES : X86Reg<"es", 0>;
def FS : X86Reg<"fs", 4>;
def GS : X86Reg<"gs", 5>;
// Debug registers
def DR0 : X86Reg<"dr0", 0>;
def DR1 : X86Reg<"dr1", 1>;
def DR2 : X86Reg<"dr2", 2>;
def DR3 : X86Reg<"dr3", 3>;
def DR4 : X86Reg<"dr4", 4>;
def DR5 : X86Reg<"dr5", 5>;
def DR6 : X86Reg<"dr6", 6>;
def DR7 : X86Reg<"dr7", 7>;
def DR8 : X86Reg<"dr8", 8>;
def DR9 : X86Reg<"dr9", 9>;
def DR10 : X86Reg<"dr10", 10>;
def DR11 : X86Reg<"dr11", 11>;
def DR12 : X86Reg<"dr12", 12>;
def DR13 : X86Reg<"dr13", 13>;
def DR14 : X86Reg<"dr14", 14>;
def DR15 : X86Reg<"dr15", 15>;
// Control registers
def CR0 : X86Reg<"cr0", 0>;
def CR1 : X86Reg<"cr1", 1>;
def CR2 : X86Reg<"cr2", 2>;
def CR3 : X86Reg<"cr3", 3>;
def CR4 : X86Reg<"cr4", 4>;
def CR5 : X86Reg<"cr5", 5>;
def CR6 : X86Reg<"cr6", 6>;
def CR7 : X86Reg<"cr7", 7>;
def CR8 : X86Reg<"cr8", 8>;
def CR9 : X86Reg<"cr9", 9>;
def CR10 : X86Reg<"cr10", 10>;
def CR11 : X86Reg<"cr11", 11>;
def CR12 : X86Reg<"cr12", 12>;
def CR13 : X86Reg<"cr13", 13>;
def CR14 : X86Reg<"cr14", 14>;
def CR15 : X86Reg<"cr15", 15>;
// Pseudo index registers
def EIZ : X86Reg<"eiz", 4>;
def RIZ : X86Reg<"riz", 4>;
// Bound registers, used in MPX instructions
def BND0 : X86Reg<"bnd0", 0>;
def BND1 : X86Reg<"bnd1", 1>;
def BND2 : X86Reg<"bnd2", 2>;
def BND3 : X86Reg<"bnd3", 3>;
// CET registers - Shadow Stack Pointer
def SSP : X86Reg<"ssp", 0>;
//===----------------------------------------------------------------------===//
// Register Class Definitions... now that we have all of the pieces, define the
// top-level register classes. The order specified in the register list is
// implicitly defined to be the register allocation order.
//
// List call-clobbered registers before callee-save registers. RBX, RBP, (and
// R12, R13, R14, and R15 for X86-64) are callee-save registers.
// In 64-mode, there are 12 additional i8 registers, SIL, DIL, BPL, SPL, and
// R8B, ... R15B.
// Allocate R12 and R13 last, as these require an extra byte when
// encoded in x86_64 instructions.
// FIXME: Allow AH, CH, DH, BH to be used as general-purpose registers in
// 64-bit mode. The main complication is that they cannot be encoded in an
// instruction requiring a REX prefix, while SIL, DIL, BPL, R8D, etc.
// require a REX prefix. For example, "addb %ah, %dil" and "movzbl %ah, %r8d"
// cannot be encoded.
def GR8 : RegisterClass<"X86", [i8], 8,
(add AL, CL, DL, AH, CH, DH, BL, BH, SIL, DIL, BPL, SPL,
R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B)> {
let AltOrders = [(sub GR8, AH, BH, CH, DH)];
let AltOrderSelect = [{
return MF.getSubtarget<X86Subtarget>().is64Bit();
}];
}
let isAllocatable = 0 in
def GRH8 : RegisterClass<"X86", [i8], 8,
(add SIH, DIH, BPH, SPH, R8BH, R9BH, R10BH, R11BH,
R12BH, R13BH, R14BH, R15BH)>;
def GR16 : RegisterClass<"X86", [i16], 16,
(add AX, CX, DX, SI, DI, BX, BP, SP,
R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W)>;
let isAllocatable = 0 in
def GRH16 : RegisterClass<"X86", [i16], 16,
(add HAX, HCX, HDX, HSI, HDI, HBX, HBP, HSP, HIP,
R8WH, R9WH, R10WH, R11WH, R12WH, R13WH, R14WH,
R15WH)>;
def GR32 : RegisterClass<"X86", [i32], 32,
(add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D)>;
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
// RIP isn't really a register and it can't be used anywhere except in an
// address, but it doesn't cause trouble.
// FIXME: it *does* cause trouble - CheckBaseRegAndIndexReg() has extra
// tests because of the inclusion of RIP in this register class.
def GR64 : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
// descriptor.
def SEGMENT_REG : RegisterClass<"X86", [i16], 16, (add CS, DS, SS, ES, FS, GS)>;
// Debug registers.
def DEBUG_REG : RegisterClass<"X86", [i32], 32, (sequence "DR%u", 0, 15)>;
// Control registers.
def CONTROL_REG : RegisterClass<"X86", [i64], 64, (sequence "CR%u", 0, 15)>;
// GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD, GR64_ABCD - Subclasses of
// GR8, GR16, GR32, and GR64 which contain just the "a" "b", "c", and "d"
// registers. On x86-32, GR16_ABCD and GR32_ABCD are classes for registers
// that support 8-bit subreg operations. On x86-64, GR16_ABCD, GR32_ABCD,
// and GR64_ABCD are classes for registers that support 8-bit h-register
// operations.
def GR8_ABCD_L : RegisterClass<"X86", [i8], 8, (add AL, CL, DL, BL)>;
def GR8_ABCD_H : RegisterClass<"X86", [i8], 8, (add AH, CH, DH, BH)>;
def GR16_ABCD : RegisterClass<"X86", [i16], 16, (add AX, CX, DX, BX)>;
def GR32_ABCD : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX, EBX)>;
def GR64_ABCD : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX, RBX)>;
def GR32_TC : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX, ESP)>;
def GR64_TC : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX, RSI, RDI,
R8, R9, R11, RIP, RSP)>;
def GR64_TCW64 : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX,
R8, R9, R10, R11,
RIP, RSP)>;
// GR8_NOREX - GR8 registers which do not require a REX prefix.
def GR8_NOREX : RegisterClass<"X86", [i8], 8,
(add AL, CL, DL, AH, CH, DH, BL, BH)> {
let AltOrders = [(sub GR8_NOREX, AH, BH, CH, DH)];
let AltOrderSelect = [{
return MF.getSubtarget<X86Subtarget>().is64Bit();
}];
}
// GR16_NOREX - GR16 registers which do not require a REX prefix.
def GR16_NOREX : RegisterClass<"X86", [i16], 16,
(add AX, CX, DX, SI, DI, BX, BP, SP)>;
// GR32_NOREX - GR32 registers which do not require a REX prefix.
def GR32_NOREX : RegisterClass<"X86", [i32], 32,
(add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP)>;
// GR64_NOREX - GR64 registers which do not require a REX prefix.
def GR64_NOREX : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, RBX, RBP, RSP, RIP)>;
// GR32_NOSP - GR32 registers except ESP.
def GR32_NOSP : RegisterClass<"X86", [i32], 32, (sub GR32, ESP)>;
// GR64_NOSP - GR64 registers except RSP (and RIP).
def GR64_NOSP : RegisterClass<"X86", [i64], 64, (sub GR64, RSP, RIP)>;
// GR32_NOREX_NOSP - GR32 registers which do not require a REX prefix except
// ESP.
def GR32_NOREX_NOSP : RegisterClass<"X86", [i32], 32,
(and GR32_NOREX, GR32_NOSP)>;
// GR64_NOREX_NOSP - GR64_NOREX registers except RSP.
def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
(and GR64_NOREX, GR64_NOSP)>;
// Register classes used for ABIs that use 32-bit address accesses,
// while using the whole x84_64 ISA.
// In such cases, it is fine to use RIP as we are sure the 32 high
// bits are not set. We do not need variants for NOSP as RIP is not
// allowed there.
// RIP is not spilled anywhere for now, so stick to 32-bit alignment
// to save on memory space.
// FIXME: We could allow all 64bit registers, but we would need
// something to check that the 32 high bits are not set,
// which we do not have right now.
def LOW32_ADDR_ACCESS : RegisterClass<"X86", [i32], 32, (add GR32, RIP)>;
// When RBP is used as a base pointer in a 32-bit addresses environement,
// this is also safe to use the full register to access addresses.
// Since RBP will never be spilled, stick to a 32 alignment to save
// on memory consumption.
def LOW32_ADDR_ACCESS_RBP : RegisterClass<"X86", [i32], 32,
(add LOW32_ADDR_ACCESS, RBP)>;
// A class to support the 'A' assembler constraint: [ER]AX then [ER]DX.
def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)>;
def GR64_AD : RegisterClass<"X86", [i64], 64, (add RAX, RDX)>;
// Classes to support the 64-bit assembler constraint tied to a fixed
// register in 32-bit mode. The second register is always the next in
// the list. Wrap around causes an error.
def GR32_DC : RegisterClass<"X86", [i32], 32, (add EDX, ECX)>;
def GR32_CB : RegisterClass<"X86", [i32], 32, (add ECX, EBX)>;
def GR32_BSI : RegisterClass<"X86", [i32], 32, (add EBX, ESI)>;
def GR32_SIDI : RegisterClass<"X86", [i32], 32, (add ESI, EDI)>;
def GR32_DIBP : RegisterClass<"X86", [i32], 32, (add EDI, EBP)>;
def GR32_BPSP : RegisterClass<"X86", [i32], 32, (add EBP, ESP)>;
// Scalar SSE2 floating point registers.
def FR32 : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 15)>;
def FR64 : RegisterClass<"X86", [f64], 64, (add FR32)>;
// FIXME: This sets up the floating point register files as though they are f64
// values, though they really are f80 values. This will cause us to spill
// values as 64-bit quantities instead of 80-bit quantities, which is much much
// faster on common hardware. In reality, this should be controlled by a
// command line option or something.
def RFP32 : RegisterClass<"X86",[f32], 32, (sequence "FP%u", 0, 6)>;
def RFP64 : RegisterClass<"X86",[f64], 32, (add RFP32)>;
def RFP80 : RegisterClass<"X86",[f80], 32, (add RFP32)>;
// st(7) may be is not allocatable.
def RFP80_7 : RegisterClass<"X86",[f80], 32, (add FP7)> {
let isAllocatable = 0;
}
// Floating point stack registers (these are not allocatable by the
// register allocator - the floating point stackifier is responsible
// for transforming FPn allocations to STn registers)
def RST : RegisterClass<"X86", [f80, f64, f32], 32, (sequence "ST%u", 0, 7)> {
let isAllocatable = 0;
}
+
+// Helper to allow %st to print as %st(0) when its encoded in the instruction.
+def RSTi : RegisterOperand<RST, "printSTiRegOperand">;
// Generic vector registers: VR64 and VR128.
// Ensure that float types are declared first - only float is legal on SSE1.
def VR64: RegisterClass<"X86", [x86mmx], 64, (sequence "MM%u", 0, 7)>;
def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (add FR32)>;
def VR256 : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 15)>;
// Special classes that help the assembly parser choose some alternate
// instructions to favor 2-byte VEX encodings.
def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (sequence "XMM%u", 0, 7)>;
def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (sequence "XMM%u", 8, 15)>;
def VR256L : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 7)>;
def VR256H : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 8, 15)>;
// Status flags registers.
def CCR : RegisterClass<"X86", [i32], 32, (add EFLAGS)> {
let CopyCost = -1; // Don't allow copying of status registers.
let isAllocatable = 0;
}
def FPCCR : RegisterClass<"X86", [i16], 16, (add FPSW)> {
let CopyCost = -1; // Don't allow copying of status registers.
let isAllocatable = 0;
}
def DFCCR : RegisterClass<"X86", [i32], 32, (add DF)> {
let CopyCost = -1; // Don't allow copying of status registers.
let isAllocatable = 0;
}
// AVX-512 vector/mask registers.
def VR512 : RegisterClass<"X86", [v16f32, v8f64, v64i8, v32i16, v16i32, v8i64],
512, (sequence "ZMM%u", 0, 31)>;
// Scalar AVX-512 floating point registers.
def FR32X : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 31)>;
def FR64X : RegisterClass<"X86", [f64], 64, (add FR32X)>;
// Extended VR128 and VR256 for AVX-512 instructions
def VR128X : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (add FR32X)>;
def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 31)>;
// Mask registers
def VK1 : RegisterClass<"X86", [v1i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
def VK1WM : RegisterClass<"X86", [v1i1], 16, (sub VK1, K0)> {let Size = 16;}
def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)> {let Size = 16;}
def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;}
def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
// Bound registers
def BNDR : RegisterClass<"X86", [v2i64], 128, (sequence "BND%u", 0, 3)>;
Index: vendor/llvm/dist-release_80/lib/Transforms/Instrumentation/MemorySanitizer.cpp
===================================================================
--- vendor/llvm/dist-release_80/lib/Transforms/Instrumentation/MemorySanitizer.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/lib/Transforms/Instrumentation/MemorySanitizer.cpp (revision 344166)
@@ -1,4470 +1,4492 @@
//===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file is a part of MemorySanitizer, a detector of uninitialized
/// reads.
///
/// The algorithm of the tool is similar to Memcheck
/// (http://goo.gl/QKbem). We associate a few shadow bits with every
/// byte of the application memory, poison the shadow of the malloc-ed
/// or alloca-ed memory, load the shadow bits on every memory read,
/// propagate the shadow bits through some of the arithmetic
/// instruction (including MOV), store the shadow bits on every memory
/// write, report a bug on some other instructions (e.g. JMP) if the
/// associated shadow is poisoned.
///
/// But there are differences too. The first and the major one:
/// compiler instrumentation instead of binary instrumentation. This
/// gives us much better register allocation, possible compiler
/// optimizations and a fast start-up. But this brings the major issue
/// as well: msan needs to see all program events, including system
/// calls and reads/writes in system libraries, so we either need to
/// compile *everything* with msan or use a binary translation
/// component (e.g. DynamoRIO) to instrument pre-built libraries.
/// Another difference from Memcheck is that we use 8 shadow bits per
/// byte of application memory and use a direct shadow mapping. This
/// greatly simplifies the instrumentation code and avoids races on
/// shadow updates (Memcheck is single-threaded so races are not a
/// concern there. Memcheck uses 2 shadow bits per byte with a slow
/// path storage that uses 8 bits per byte).
///
/// The default value of shadow is 0, which means "clean" (not poisoned).
///
/// Every module initializer should call __msan_init to ensure that the
/// shadow memory is ready. On error, __msan_warning is called. Since
/// parameters and return values may be passed via registers, we have a
/// specialized thread-local shadow for return values
/// (__msan_retval_tls) and parameters (__msan_param_tls).
///
/// Origin tracking.
///
/// MemorySanitizer can track origins (allocation points) of all uninitialized
/// values. This behavior is controlled with a flag (msan-track-origins) and is
/// disabled by default.
///
/// Origins are 4-byte values created and interpreted by the runtime library.
/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
/// of application memory. Propagation of origins is basically a bunch of
/// "select" instructions that pick the origin of a dirty argument, if an
/// instruction has one.
///
/// Every 4 aligned, consecutive bytes of application memory have one origin
/// value associated with them. If these bytes contain uninitialized data
/// coming from 2 different allocations, the last store wins. Because of this,
/// MemorySanitizer reports can show unrelated origins, but this is unlikely in
/// practice.
///
/// Origins are meaningless for fully initialized values, so MemorySanitizer
/// avoids storing origin to memory when a fully initialized value is stored.
/// This way it avoids needless overwritting origin of the 4-byte region on
/// a short (i.e. 1 byte) clean store, and it is also good for performance.
///
/// Atomic handling.
///
/// Ideally, every atomic store of application value should update the
/// corresponding shadow location in an atomic way. Unfortunately, atomic store
/// of two disjoint locations can not be done without severe slowdown.
///
/// Therefore, we implement an approximation that may err on the safe side.
/// In this implementation, every atomically accessed location in the program
/// may only change from (partially) uninitialized to fully initialized, but
/// not the other way around. We load the shadow _after_ the application load,
/// and we store the shadow _before_ the app store. Also, we always store clean
/// shadow (if the application store is atomic). This way, if the store-load
/// pair constitutes a happens-before arc, shadow store and load are correctly
/// ordered such that the load will get either the value that was stored, or
/// some later value (which is always clean).
///
/// This does not work very well with Compare-And-Swap (CAS) and
/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
/// must store the new shadow before the app operation, and load the shadow
/// after the app operation. Computers don't work this way. Current
/// implementation ignores the load aspect of CAS/RMW, always returning a clean
/// value. It implements the store part as a simple atomic store by storing a
/// clean shadow.
///
/// Instrumenting inline assembly.
///
/// For inline assembly code LLVM has little idea about which memory locations
/// become initialized depending on the arguments. It can be possible to figure
/// out which arguments are meant to point to inputs and outputs, but the
/// actual semantics can be only visible at runtime. In the Linux kernel it's
/// also possible that the arguments only indicate the offset for a base taken
/// from a segment register, so it's dangerous to treat any asm() arguments as
/// pointers. We take a conservative approach generating calls to
/// __msan_instrument_asm_store(ptr, size)
/// , which defer the memory unpoisoning to the runtime library.
/// The latter can perform more complex address checks to figure out whether
/// it's safe to touch the shadow memory.
/// Like with atomic operations, we call __msan_instrument_asm_store() before
/// the assembly call, so that changes to the shadow memory will be seen by
/// other threads together with main memory initialization.
///
/// KernelMemorySanitizer (KMSAN) implementation.
///
/// The major differences between KMSAN and MSan instrumentation are:
/// - KMSAN always tracks the origins and implies msan-keep-going=true;
/// - KMSAN allocates shadow and origin memory for each page separately, so
/// there are no explicit accesses to shadow and origin in the
/// instrumentation.
/// Shadow and origin values for a particular X-byte memory location
/// (X=1,2,4,8) are accessed through pointers obtained via the
/// __msan_metadata_ptr_for_load_X(ptr)
/// __msan_metadata_ptr_for_store_X(ptr)
/// functions. The corresponding functions check that the X-byte accesses
/// are possible and returns the pointers to shadow and origin memory.
/// Arbitrary sized accesses are handled with:
/// __msan_metadata_ptr_for_load_n(ptr, size)
/// __msan_metadata_ptr_for_store_n(ptr, size);
/// - TLS variables are stored in a single per-task struct. A call to a
/// function __msan_get_context_state() returning a pointer to that struct
/// is inserted into every instrumented function before the entry block;
/// - __msan_warning() takes a 32-bit origin parameter;
/// - local variables are poisoned with __msan_poison_alloca() upon function
/// entry and unpoisoned with __msan_unpoison_alloca() before leaving the
/// function;
/// - the pass doesn't declare any global variables or add global constructors
/// to the translation unit.
///
/// Also, KMSAN currently ignores uninitialized memory passed into inline asm
/// calls, making sure we're on the safe side wrt. possible false positives.
///
/// KernelMemorySanitizer only supports X86_64 at the moment.
///
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
using namespace llvm;
#define DEBUG_TYPE "msan"
static const unsigned kOriginSize = 4;
static const unsigned kMinOriginAlignment = 4;
static const unsigned kShadowTLSAlignment = 8;
// These constants must be kept in sync with the ones in msan.h.
static const unsigned kParamTLSSize = 800;
static const unsigned kRetvalTLSSize = 800;
// Accesses sizes are powers of two: 1, 2, 4, 8.
static const size_t kNumberOfAccessSizes = 4;
/// Track origins of uninitialized values.
///
/// Adds a section to MemorySanitizer report that points to the allocation
/// (stack or heap) the uninitialized bits came from originally.
static cl::opt<int> ClTrackOrigins("msan-track-origins",
cl::desc("Track origins (allocation sites) of poisoned memory"),
cl::Hidden, cl::init(0));
static cl::opt<bool> ClKeepGoing("msan-keep-going",
cl::desc("keep going after reporting a UMR"),
cl::Hidden, cl::init(false));
static cl::opt<bool> ClPoisonStack("msan-poison-stack",
cl::desc("poison uninitialized stack variables"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
cl::desc("poison uninitialized stack variables with a call"),
cl::Hidden, cl::init(false));
static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
cl::desc("poison uninitialized stack variables with the given pattern"),
cl::Hidden, cl::init(0xff));
static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
cl::desc("poison undef temps"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
cl::desc("exact handling of relational integer ICmp"),
cl::Hidden, cl::init(false));
// When compiling the Linux kernel, we sometimes see false positives related to
// MSan being unable to understand that inline assembly calls may initialize
// local variables.
// This flag makes the compiler conservatively unpoison every memory location
// passed into an assembly call. Note that this may cause false positives.
// Because it's impossible to figure out the array sizes, we can only unpoison
// the first sizeof(type) bytes for each type* pointer.
// The instrumentation is only enabled in KMSAN builds, and only if
// -msan-handle-asm-conservative is on. This is done because we may want to
// quickly disable assembly instrumentation when it breaks.
static cl::opt<bool> ClHandleAsmConservative(
"msan-handle-asm-conservative",
cl::desc("conservative handling of inline assembly"), cl::Hidden,
cl::init(true));
// This flag controls whether we check the shadow of the address
// operand of load or store. Such bugs are very rare, since load from
// a garbage address typically results in SEGV, but still happen
// (e.g. only lower bits of address are garbage, or the access happens
// early at program startup where malloc-ed memory is more likely to
// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
cl::desc("report accesses through a pointer which has poisoned shadow"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
cl::desc("print out instructions with default strict semantics"),
cl::Hidden, cl::init(false));
static cl::opt<int> ClInstrumentationWithCallThreshold(
"msan-instrumentation-with-call-threshold",
cl::desc(
"If the function being instrumented requires more than "
"this number of checks and origin stores, use callbacks instead of "
"inline checks (-1 means never use callbacks)."),
cl::Hidden, cl::init(3500));
static cl::opt<bool>
ClEnableKmsan("msan-kernel",
cl::desc("Enable KernelMemorySanitizer instrumentation"),
cl::Hidden, cl::init(false));
// This is an experiment to enable handling of cases where shadow is a non-zero
// compile-time constant. For some unexplainable reason they were silently
// ignored in the instrumentation.
static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
cl::desc("Insert checks for constant shadow values"),
cl::Hidden, cl::init(false));
// This is off by default because of a bug in gold:
// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
static cl::opt<bool> ClWithComdat("msan-with-comdat",
cl::desc("Place MSan constructors in comdat sections"),
cl::Hidden, cl::init(false));
// These options allow to specify custom memory map parameters
// See MemoryMapParams for details.
static cl::opt<unsigned long long> ClAndMask("msan-and-mask",
cl::desc("Define custom MSan AndMask"),
cl::Hidden, cl::init(0));
static cl::opt<unsigned long long> ClXorMask("msan-xor-mask",
cl::desc("Define custom MSan XorMask"),
cl::Hidden, cl::init(0));
static cl::opt<unsigned long long> ClShadowBase("msan-shadow-base",
cl::desc("Define custom MSan ShadowBase"),
cl::Hidden, cl::init(0));
static cl::opt<unsigned long long> ClOriginBase("msan-origin-base",
cl::desc("Define custom MSan OriginBase"),
cl::Hidden, cl::init(0));
+static const char *const kMsanModuleCtorName = "msan.module_ctor";
static const char *const kMsanInitName = "__msan_init";
namespace {
// Memory map parameters used in application-to-shadow address calculation.
// Offset = (Addr & ~AndMask) ^ XorMask
// Shadow = ShadowBase + Offset
// Origin = OriginBase + Offset
struct MemoryMapParams {
uint64_t AndMask;
uint64_t XorMask;
uint64_t ShadowBase;
uint64_t OriginBase;
};
struct PlatformMemoryMapParams {
const MemoryMapParams *bits32;
const MemoryMapParams *bits64;
};
} // end anonymous namespace
// i386 Linux
static const MemoryMapParams Linux_I386_MemoryMapParams = {
0x000080000000, // AndMask
0, // XorMask (not used)
0, // ShadowBase (not used)
0x000040000000, // OriginBase
};
// x86_64 Linux
static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
0x400000000000, // AndMask
0, // XorMask (not used)
0, // ShadowBase (not used)
0x200000000000, // OriginBase
#else
0, // AndMask (not used)
0x500000000000, // XorMask
0, // ShadowBase (not used)
0x100000000000, // OriginBase
#endif
};
// mips64 Linux
static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0, // AndMask (not used)
0x008000000000, // XorMask
0, // ShadowBase (not used)
0x002000000000, // OriginBase
};
// ppc64 Linux
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
0xE00000000000, // AndMask
0x100000000000, // XorMask
0x080000000000, // ShadowBase
0x1C0000000000, // OriginBase
};
// aarch64 Linux
static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
0, // AndMask (not used)
0x06000000000, // XorMask
0, // ShadowBase (not used)
0x01000000000, // OriginBase
};
// i386 FreeBSD
static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
0x000180000000, // AndMask
0x000040000000, // XorMask
0x000020000000, // ShadowBase
0x000700000000, // OriginBase
};
// x86_64 FreeBSD
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
0xc00000000000, // AndMask
0x200000000000, // XorMask
0x100000000000, // ShadowBase
0x380000000000, // OriginBase
};
// x86_64 NetBSD
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
0, // AndMask
0x500000000000, // XorMask
0, // ShadowBase
0x100000000000, // OriginBase
};
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
&Linux_I386_MemoryMapParams,
&Linux_X86_64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
nullptr,
&Linux_MIPS64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
nullptr,
&Linux_PowerPC64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
nullptr,
&Linux_AArch64_MemoryMapParams,
};
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
&FreeBSD_I386_MemoryMapParams,
&FreeBSD_X86_64_MemoryMapParams,
};
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
nullptr,
&NetBSD_X86_64_MemoryMapParams,
};
namespace {
/// Instrument functions of a module to detect uninitialized reads.
///
/// Instantiating MemorySanitizer inserts the msan runtime library API function
/// declarations into the module if they don't exist already. Instantiating
/// ensures the __msan_init function is in the list of global constructors for
/// the module.
class MemorySanitizer {
public:
MemorySanitizer(Module &M, int TrackOrigins = 0, bool Recover = false,
bool EnableKmsan = false) {
this->CompileKernel =
ClEnableKmsan.getNumOccurrences() > 0 ? ClEnableKmsan : EnableKmsan;
if (ClTrackOrigins.getNumOccurrences() > 0)
this->TrackOrigins = ClTrackOrigins;
else
this->TrackOrigins = this->CompileKernel ? 2 : TrackOrigins;
this->Recover = ClKeepGoing.getNumOccurrences() > 0
? ClKeepGoing
: (this->CompileKernel | Recover);
initializeModule(M);
}
// MSan cannot be moved or copied because of MapParams.
MemorySanitizer(MemorySanitizer &&) = delete;
MemorySanitizer &operator=(MemorySanitizer &&) = delete;
MemorySanitizer(const MemorySanitizer &) = delete;
MemorySanitizer &operator=(const MemorySanitizer &) = delete;
bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
private:
friend struct MemorySanitizerVisitor;
friend struct VarArgAMD64Helper;
friend struct VarArgMIPS64Helper;
friend struct VarArgAArch64Helper;
friend struct VarArgPowerPC64Helper;
void initializeModule(Module &M);
void initializeCallbacks(Module &M);
void createKernelApi(Module &M);
void createUserspaceApi(Module &M);
/// True if we're compiling the Linux kernel.
bool CompileKernel;
/// Track origins (allocation points) of uninitialized values.
int TrackOrigins;
bool Recover;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
// XxxTLS variables represent the per-thread state in MSan and per-task state
// in KMSAN.
// For the userspace these point to thread-local globals. In the kernel land
// they point to the members of a per-task struct obtained via a call to
// __msan_get_context_state().
/// Thread-local shadow storage for function parameters.
Value *ParamTLS;
/// Thread-local origin storage for function parameters.
Value *ParamOriginTLS;
/// Thread-local shadow storage for function return value.
Value *RetvalTLS;
/// Thread-local origin storage for function return value.
Value *RetvalOriginTLS;
/// Thread-local shadow storage for in-register va_arg function
/// parameters (x86_64-specific).
Value *VAArgTLS;
/// Thread-local shadow storage for in-register va_arg function
/// parameters (x86_64-specific).
Value *VAArgOriginTLS;
/// Thread-local shadow storage for va_arg overflow area
/// (x86_64-specific).
Value *VAArgOverflowSizeTLS;
/// Thread-local space used to pass origin value to the UMR reporting
/// function.
Value *OriginTLS;
/// Are the instrumentation callbacks set up?
bool CallbacksInitialized = false;
/// The run-time callback to print a warning.
Value *WarningFn;
// These arrays are indexed by log2(AccessSize).
Value *MaybeWarningFn[kNumberOfAccessSizes];
Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
/// Run-time helper that generates a new origin value for a stack
/// allocation.
Value *MsanSetAllocaOrigin4Fn;
/// Run-time helper that poisons stack on function entry.
Value *MsanPoisonStackFn;
/// Run-time helper that records a store (or any event) of an
/// uninitialized value and returns an updated origin id encoding this info.
Value *MsanChainOriginFn;
/// MSan runtime replacements for memmove, memcpy and memset.
Value *MemmoveFn, *MemcpyFn, *MemsetFn;
/// KMSAN callback for task-local function argument shadow.
Value *MsanGetContextStateFn;
/// Functions for poisoning/unpoisoning local variables
Value *MsanPoisonAllocaFn, *MsanUnpoisonAllocaFn;
/// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
/// pointers.
Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN;
Value *MsanMetadataPtrForLoad_1_8[4];
Value *MsanMetadataPtrForStore_1_8[4];
Value *MsanInstrumentAsmStoreFn;
/// Helper to choose between different MsanMetadataPtrXxx().
Value *getKmsanShadowOriginAccessFn(bool isStore, int size);
/// Memory map parameters used in application-to-shadow calculation.
const MemoryMapParams *MapParams;
/// Custom memory map parameters used when -msan-shadow-base or
// -msan-origin-base is provided.
MemoryMapParams CustomMapParams;
MDNode *ColdCallWeights;
/// Branch weights for origin store.
MDNode *OriginStoreWeights;
/// An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
+
+ Function *MsanCtorFunction;
};
/// A legacy function pass for msan instrumentation.
///
/// Instruments functions to detect unitialized reads.
struct MemorySanitizerLegacyPass : public FunctionPass {
// Pass identification, replacement for typeid.
static char ID;
MemorySanitizerLegacyPass(int TrackOrigins = 0, bool Recover = false,
bool EnableKmsan = false)
: FunctionPass(ID), TrackOrigins(TrackOrigins), Recover(Recover),
EnableKmsan(EnableKmsan) {}
StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
bool runOnFunction(Function &F) override {
return MSan->sanitizeFunction(
F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
}
bool doInitialization(Module &M) override;
Optional<MemorySanitizer> MSan;
int TrackOrigins;
bool Recover;
bool EnableKmsan;
};
} // end anonymous namespace
PreservedAnalyses MemorySanitizerPass::run(Function &F,
FunctionAnalysisManager &FAM) {
MemorySanitizer Msan(*F.getParent(), TrackOrigins, Recover, EnableKmsan);
if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
return PreservedAnalyses::none();
return PreservedAnalyses::all();
}
char MemorySanitizerLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
"MemorySanitizer: detects uninitialized reads.", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
"MemorySanitizer: detects uninitialized reads.", false,
false)
FunctionPass *llvm::createMemorySanitizerLegacyPassPass(int TrackOrigins,
bool Recover,
bool CompileKernel) {
return new MemorySanitizerLegacyPass(TrackOrigins, Recover, CompileKernel);
}
/// Create a non-const global initialized with the given string.
///
/// Creates a writable global for Str so that we can pass it to the
/// run-time lib. Runtime uses first 4 bytes of the string to store the
/// frame ID, so the string needs to be mutable.
static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
StringRef Str) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
GlobalValue::PrivateLinkage, StrConst, "");
}
/// Create KMSAN API callbacks.
void MemorySanitizer::createKernelApi(Module &M) {
IRBuilder<> IRB(*C);
// These will be initialized in insertKmsanPrologue().
RetvalTLS = nullptr;
RetvalOriginTLS = nullptr;
ParamTLS = nullptr;
ParamOriginTLS = nullptr;
VAArgTLS = nullptr;
VAArgOriginTLS = nullptr;
VAArgOverflowSizeTLS = nullptr;
// OriginTLS is unused in the kernel.
OriginTLS = nullptr;
// __msan_warning() in the kernel takes an origin.
WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
IRB.getInt32Ty());
// Requests the per-task context state (kmsan_context_state*) from the
// runtime library.
MsanGetContextStateFn = M.getOrInsertFunction(
"__msan_get_context_state",
PointerType::get(
StructType::get(ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
ArrayType::get(IRB.getInt64Ty(),
kParamTLSSize / 8), /* va_arg_origin */
IRB.getInt64Ty(),
ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
OriginTy),
0));
Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
PointerType::get(IRB.getInt32Ty(), 0));
for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
std::string name_load =
"__msan_metadata_ptr_for_load_" + std::to_string(size);
std::string name_store =
"__msan_metadata_ptr_for_store_" + std::to_string(size);
MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
}
MsanMetadataPtrForLoadN = M.getOrInsertFunction(
"__msan_metadata_ptr_for_load_n", RetTy,
PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
MsanMetadataPtrForStoreN = M.getOrInsertFunction(
"__msan_metadata_ptr_for_store_n", RetTy,
PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
// Functions for poisoning and unpoisoning memory.
MsanPoisonAllocaFn =
M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
MsanUnpoisonAllocaFn = M.getOrInsertFunction(
"__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
}
static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
return M.getOrInsertGlobal(Name, Ty, [&] {
return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
nullptr, Name, nullptr,
GlobalVariable::InitialExecTLSModel);
});
}
/// Insert declarations for userspace-specific functions and globals.
void MemorySanitizer::createUserspaceApi(Module &M) {
IRBuilder<> IRB(*C);
// Create the callback.
// FIXME: this function should have "Cold" calling conv,
// which is not yet implemented.
StringRef WarningFnName = Recover ? "__msan_warning"
: "__msan_warning_noreturn";
WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
// Create the global TLS variables.
RetvalTLS =
getOrInsertGlobal(M, "__msan_retval_tls",
ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
ParamTLS =
getOrInsertGlobal(M, "__msan_param_tls",
ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
ParamOriginTLS =
getOrInsertGlobal(M, "__msan_param_origin_tls",
ArrayType::get(OriginTy, kParamTLSSize / 4));
VAArgTLS =
getOrInsertGlobal(M, "__msan_va_arg_tls",
ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
VAArgOriginTLS =
getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
ArrayType::get(OriginTy, kParamTLSSize / 4));
VAArgOverflowSizeTLS =
getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
OriginTLS = getOrInsertGlobal(M, "__msan_origin_tls", IRB.getInt32Ty());
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
unsigned AccessSize = 1 << AccessSizeIndex;
std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
IRB.getInt32Ty());
FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
IRB.getInt8PtrTy(), IRB.getInt32Ty());
}
MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
"__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
IRB.getInt8PtrTy(), IntptrTy);
MsanPoisonStackFn =
M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
IRB.getInt8PtrTy(), IntptrTy);
}
/// Insert extern declaration of runtime-provided functions and globals.
void MemorySanitizer::initializeCallbacks(Module &M) {
// Only do this once.
if (CallbacksInitialized)
return;
IRBuilder<> IRB(*C);
// Initialize callbacks that are common for kernel and userspace
// instrumentation.
MsanChainOriginFn = M.getOrInsertFunction(
"__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
MemmoveFn = M.getOrInsertFunction(
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy);
MemcpyFn = M.getOrInsertFunction(
"__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IntptrTy);
MemsetFn = M.getOrInsertFunction(
"__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
IntptrTy);
// We insert an empty inline asm after __msan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
MsanInstrumentAsmStoreFn =
M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
if (CompileKernel) {
createKernelApi(M);
} else {
createUserspaceApi(M);
}
CallbacksInitialized = true;
}
Value *MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore, int size) {
Value **Fns =
isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
switch (size) {
case 1:
return Fns[0];
case 2:
return Fns[1];
case 4:
return Fns[2];
case 8:
return Fns[3];
default:
return nullptr;
}
}
/// Module-level initialization.
+///
+/// inserts a call to __msan_init to the module's constructor list.
void MemorySanitizer::initializeModule(Module &M) {
auto &DL = M.getDataLayout();
bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
// Check the overrides first
if (ShadowPassed || OriginPassed) {
CustomMapParams.AndMask = ClAndMask;
CustomMapParams.XorMask = ClXorMask;
CustomMapParams.ShadowBase = ClShadowBase;
CustomMapParams.OriginBase = ClOriginBase;
MapParams = &CustomMapParams;
} else {
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
case Triple::FreeBSD:
switch (TargetTriple.getArch()) {
case Triple::x86_64:
MapParams = FreeBSD_X86_MemoryMapParams.bits64;
break;
case Triple::x86:
MapParams = FreeBSD_X86_MemoryMapParams.bits32;
break;
default:
report_fatal_error("unsupported architecture");
}
break;
case Triple::NetBSD:
switch (TargetTriple.getArch()) {
case Triple::x86_64:
MapParams = NetBSD_X86_MemoryMapParams.bits64;
break;
default:
report_fatal_error("unsupported architecture");
}
break;
case Triple::Linux:
switch (TargetTriple.getArch()) {
case Triple::x86_64:
MapParams = Linux_X86_MemoryMapParams.bits64;
break;
case Triple::x86:
MapParams = Linux_X86_MemoryMapParams.bits32;
break;
case Triple::mips64:
case Triple::mips64el:
MapParams = Linux_MIPS_MemoryMapParams.bits64;
break;
case Triple::ppc64:
case Triple::ppc64le:
MapParams = Linux_PowerPC_MemoryMapParams.bits64;
break;
case Triple::aarch64:
case Triple::aarch64_be:
MapParams = Linux_ARM_MemoryMapParams.bits64;
break;
default:
report_fatal_error("unsupported architecture");
}
break;
default:
report_fatal_error("unsupported operating system");
}
}
C = &(M.getContext());
IRBuilder<> IRB(*C);
IntptrTy = IRB.getIntPtrTy(DL);
OriginTy = IRB.getInt32Ty();
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
if (!CompileKernel) {
- getOrCreateInitFunction(M, kMsanInitName);
+ std::tie(MsanCtorFunction, std::ignore) =
+ getOrCreateSanitizerCtorAndInitFunctions(
+ M, kMsanModuleCtorName, kMsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{},
+ // This callback is invoked when the functions are created the first
+ // time. Hook them into the global ctors list in that case:
+ [&](Function *Ctor, Function *) {
+ if (!ClWithComdat) {
+ appendToGlobalCtors(M, Ctor, 0);
+ return;
+ }
+ Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
+ Ctor->setComdat(MsanCtorComdat);
+ appendToGlobalCtors(M, Ctor, 0, Ctor);
+ });
if (TrackOrigins)
M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
return new GlobalVariable(
M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
IRB.getInt32(TrackOrigins), "__msan_track_origins");
});
if (Recover)
M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
return new GlobalVariable(M, IRB.getInt32Ty(), true,
GlobalValue::WeakODRLinkage,
IRB.getInt32(Recover), "__msan_keep_going");
});
}
}
bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
MSan.emplace(M, TrackOrigins, Recover, EnableKmsan);
return true;
}
namespace {
/// A helper class that handles instrumentation of VarArg
/// functions on a particular platform.
///
/// Implementations are expected to insert the instrumentation
/// necessary to propagate argument shadow through VarArg function
/// calls. Visit* methods are called during an InstVisitor pass over
/// the function, and should avoid creating new basic blocks. A new
/// instance of this class is created for each instrumented function.
struct VarArgHelper {
virtual ~VarArgHelper() = default;
/// Visit a CallSite.
virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
/// Visit a va_start call.
virtual void visitVAStartInst(VAStartInst &I) = 0;
/// Visit a va_copy call.
virtual void visitVACopyInst(VACopyInst &I) = 0;
/// Finalize function instrumentation.
///
/// This method is called after visiting all interesting (see above)
/// instructions in a function.
virtual void finalizeInstrumentation() = 0;
};
struct MemorySanitizerVisitor;
} // end anonymous namespace
static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
MemorySanitizerVisitor &Visitor);
static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
if (TypeSize <= 8) return 0;
return Log2_32_Ceil((TypeSize + 7) / 8);
}
namespace {
/// This class does all the work for a given function. Store and Load
/// instructions store and load corresponding shadow and origin
/// values. Most instructions propagate shadow from arguments to their
/// return values. Certain instructions (most importantly, BranchInst)
/// test their argument shadow and print reports (with a runtime call) if it's
/// non-zero.
struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function &F;
MemorySanitizer &MS;
SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
ValueMap<Value*, Value*> ShadowMap, OriginMap;
std::unique_ptr<VarArgHelper> VAHelper;
const TargetLibraryInfo *TLI;
BasicBlock *ActualFnStart;
// The following flags disable parts of MSan instrumentation based on
// blacklist contents and command-line options.
bool InsertChecks;
bool PropagateShadow;
bool PoisonStack;
bool PoisonUndef;
bool CheckReturnValue;
struct ShadowOriginAndInsertPoint {
Value *Shadow;
Value *Origin;
Instruction *OrigIns;
ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
: Shadow(S), Origin(O), OrigIns(I) {}
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
SmallVector<StoreInst *, 16> StoreList;
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
const TargetLibraryInfo &TLI)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
InsertChecks = SanitizeFunction;
PropagateShadow = SanitizeFunction;
PoisonStack = SanitizeFunction && ClPoisonStack;
PoisonUndef = SanitizeFunction && ClPoisonUndef;
// FIXME: Consider using SpecialCaseList to specify a list of functions that
// must always return fully initialized values. For now, we hardcode "main".
CheckReturnValue = SanitizeFunction && (F.getName() == "main");
MS.initializeCallbacks(*F.getParent());
if (MS.CompileKernel)
ActualFnStart = insertKmsanPrologue(F);
else
ActualFnStart = &F.getEntryBlock();
LLVM_DEBUG(if (!InsertChecks) dbgs()
<< "MemorySanitizer is not inserting checks into '"
<< F.getName() << "'\n");
}
Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
if (MS.TrackOrigins <= 1) return V;
return IRB.CreateCall(MS.MsanChainOriginFn, V);
}
Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
if (IntptrSize == kOriginSize) return Origin;
assert(IntptrSize == kOriginSize * 2);
Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
}
/// Fill memory range with the given origin value.
void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
unsigned Size, unsigned Alignment) {
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
assert(IntptrAlignment >= kMinOriginAlignment);
assert(IntptrSize >= kOriginSize);
unsigned Ofs = 0;
unsigned CurrentAlignment = Alignment;
if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
Value *IntptrOrigin = originToIntptr(IRB, Origin);
Value *IntptrOriginPtr =
IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
for (unsigned i = 0; i < Size / IntptrSize; ++i) {
Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
: IntptrOriginPtr;
IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
Ofs += IntptrSize / kOriginSize;
CurrentAlignment = IntptrAlignment;
}
}
for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
Value *GEP =
i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
CurrentAlignment = kMinOriginAlignment;
}
}
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
Value *OriginPtr, unsigned Alignment, bool AsCall) {
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
if (Shadow->getType()->isAggregateType()) {
paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
OriginAlignment);
} else {
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
if (ConstantShadow) {
if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
OriginAlignment);
return;
}
unsigned TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
Value *ConvertedShadow2 = IRB.CreateZExt(
ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
IRB.CreateCall(Fn, {ConvertedShadow2,
IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
Origin});
} else {
Value *Cmp = IRB.CreateICmpNE(
ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
IRBuilder<> IRBNew(CheckTerm);
paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
OriginAlignment);
}
}
}
void materializeStores(bool InstrumentWithCalls) {
for (StoreInst *SI : StoreList) {
IRBuilder<> IRB(SI);
Value *Val = SI->getValueOperand();
Value *Addr = SI->getPointerOperand();
Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr, *OriginPtr;
Type *ShadowTy = Shadow->getType();
unsigned Alignment = SI->getAlignment();
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
if (SI->isAtomic())
SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
if (MS.TrackOrigins && !SI->isAtomic())
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
OriginAlignment, InstrumentWithCalls);
}
}
/// Helper function to insert a warning at IRB's current insert point.
void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
if (!Origin)
Origin = (Value *)IRB.getInt32(0);
if (MS.CompileKernel) {
IRB.CreateCall(MS.WarningFn, Origin);
} else {
if (MS.TrackOrigins) {
IRB.CreateStore(Origin, MS.OriginTLS);
}
IRB.CreateCall(MS.WarningFn, {});
}
IRB.CreateCall(MS.EmptyAsm, {});
// FIXME: Insert UnreachableInst if !MS.Recover?
// This may invalidate some of the following checks and needs to be done
// at the very end.
}
void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
bool AsCall) {
IRBuilder<> IRB(OrigIns);
LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
if (ConstantShadow) {
if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
insertWarningFn(IRB, Origin);
}
return;
}
const DataLayout &DL = OrigIns->getModule()->getDataLayout();
unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
Value *Fn = MS.MaybeWarningFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
? Origin
: (Value *)IRB.getInt32(0)});
} else {
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
getCleanShadow(ConvertedShadow), "_mscmp");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
Cmp, OrigIns,
/* Unreachable */ !MS.Recover, MS.ColdCallWeights);
IRB.SetInsertPoint(CheckTerm);
insertWarningFn(IRB, Origin);
LLVM_DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
}
}
void materializeChecks(bool InstrumentWithCalls) {
for (const auto &ShadowData : InstrumentationList) {
Instruction *OrigIns = ShadowData.OrigIns;
Value *Shadow = ShadowData.Shadow;
Value *Origin = ShadowData.Origin;
materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
}
LLVM_DEBUG(dbgs() << "DONE:\n" << F);
}
BasicBlock *insertKmsanPrologue(Function &F) {
BasicBlock *ret =
SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI());
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
Constant *Zero = IRB.getInt32(0);
MS.ParamTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(0)}, "param_shadow");
MS.RetvalTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(1)}, "retval_shadow");
MS.VAArgTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(2)}, "va_arg_shadow");
MS.VAArgOriginTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(3)}, "va_arg_origin");
MS.VAArgOverflowSizeTLS = IRB.CreateGEP(
ContextState, {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
MS.ParamOriginTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(5)}, "param_origin");
MS.RetvalOriginTLS =
IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(6)}, "retval_origin");
return ret;
}
/// Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
// blocks, such nodes will not have any shadow value associated with them.
// It's easier to remove unreachable blocks than deal with missing shadow.
removeUnreachableBlocks(F);
// Iterate all BBs in depth-first order and create shadow instructions
// for all instructions (where applicable).
// For PHI nodes we create dummy shadow PHIs which will be finalized later.
for (BasicBlock *BB : depth_first(ActualFnStart))
visit(*BB);
// Finalize PHI nodes.
for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues();
for (size_t v = 0; v < NumValues; v++) {
PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
}
}
VAHelper->finalizeInstrumentation();
bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
InstrumentationList.size() + StoreList.size() >
(unsigned)ClInstrumentationWithCallThreshold;
// Insert shadow value checks.
materializeChecks(InstrumentWithCalls);
// Delayed instrumentation of StoreInst.
// This may not add new address checks.
materializeStores(InstrumentWithCalls);
return true;
}
/// Compute the shadow type that corresponds to a given Value.
Type *getShadowTy(Value *V) {
return getShadowTy(V->getType());
}
/// Compute the shadow type that corresponds to a given Type.
Type *getShadowTy(Type *OrigTy) {
if (!OrigTy->isSized()) {
return nullptr;
}
// For integer type, shadow is the same as the original type.
// This may return weird-sized types like i1.
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
const DataLayout &DL = F.getParent()->getDataLayout();
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
return ArrayType::get(getShadowTy(AT->getElementType()),
AT->getNumElements());
}
if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
SmallVector<Type*, 4> Elements;
for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
Elements.push_back(getShadowTy(ST->getElementType(i)));
StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
/// Flatten a vector type.
Type *getShadowTyNoVec(Type *ty) {
if (VectorType *vt = dyn_cast<VectorType>(ty))
return IntegerType::get(*MS.C, vt->getBitWidth());
return ty;
}
/// Convert a shadow value to it's flattened variant.
Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
Type *Ty = V->getType();
Type *NoVecTy = getShadowTyNoVec(Ty);
if (Ty == NoVecTy) return V;
return IRB.CreateBitCast(V, NoVecTy);
}
/// Compute the integer shadow offset that corresponds to a given
/// application address.
///
/// Offset = (Addr & ~AndMask) ^ XorMask
Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
uint64_t AndMask = MS.MapParams->AndMask;
if (AndMask)
OffsetLong =
IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
uint64_t XorMask = MS.MapParams->XorMask;
if (XorMask)
OffsetLong =
IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
return OffsetLong;
}
/// Compute the shadow and origin addresses corresponding to a given
/// application address.
///
/// Shadow = ShadowBase + Offset
/// Origin = (OriginBase + Offset) & ~3ULL
std::pair<Value *, Value *> getShadowOriginPtrUserspace(Value *Addr,
IRBuilder<> &IRB,
Type *ShadowTy,
unsigned Alignment) {
Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
Value *ShadowLong = ShadowOffset;
uint64_t ShadowBase = MS.MapParams->ShadowBase;
if (ShadowBase != 0) {
ShadowLong =
IRB.CreateAdd(ShadowLong,
ConstantInt::get(MS.IntptrTy, ShadowBase));
}
Value *ShadowPtr =
IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
Value *OriginPtr = nullptr;
if (MS.TrackOrigins) {
Value *OriginLong = ShadowOffset;
uint64_t OriginBase = MS.MapParams->OriginBase;
if (OriginBase != 0)
OriginLong = IRB.CreateAdd(OriginLong,
ConstantInt::get(MS.IntptrTy, OriginBase));
if (Alignment < kMinOriginAlignment) {
uint64_t Mask = kMinOriginAlignment - 1;
OriginLong =
IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
}
OriginPtr =
IRB.CreateIntToPtr(OriginLong, PointerType::get(IRB.getInt32Ty(), 0));
}
return std::make_pair(ShadowPtr, OriginPtr);
}
std::pair<Value *, Value *>
getShadowOriginPtrKernel(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
unsigned Alignment, bool isStore) {
Value *ShadowOriginPtrs;
const DataLayout &DL = F.getParent()->getDataLayout();
int Size = DL.getTypeStoreSize(ShadowTy);
Value *Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
Value *AddrCast =
IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
if (Getter) {
ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
} else {
Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
: MS.MsanMetadataPtrForLoadN,
{AddrCast, SizeVal});
}
Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
return std::make_pair(ShadowPtr, OriginPtr);
}
std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
Type *ShadowTy,
unsigned Alignment,
bool isStore) {
std::pair<Value *, Value *> ret;
if (MS.CompileKernel)
ret = getShadowOriginPtrKernel(Addr, IRB, ShadowTy, Alignment, isStore);
else
ret = getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
return ret;
}
/// Compute the shadow address for a given function argument.
///
/// Shadow = ParamTLS+ArgOffset.
Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
int ArgOffset) {
Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
if (ArgOffset)
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
"_msarg");
}
/// Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
if (ArgOffset)
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
"_msarg_o");
}
/// Compute the shadow address for a retval.
Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
return IRB.CreatePointerCast(MS.RetvalTLS,
PointerType::get(getShadowTy(A), 0),
"_msret");
}
/// Compute the origin address for a retval.
Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
// We keep a single origin for the entire retval. Might be too optimistic.
return MS.RetvalOriginTLS;
}
/// Set SV to be the shadow value for V.
void setShadow(Value *V, Value *SV) {
assert(!ShadowMap.count(V) && "Values may only have one shadow");
ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
}
/// Set Origin to be the origin value for V.
void setOrigin(Value *V, Value *Origin) {
if (!MS.TrackOrigins) return;
assert(!OriginMap.count(V) && "Values may only have one origin");
LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
OriginMap[V] = Origin;
}
Constant *getCleanShadow(Type *OrigTy) {
Type *ShadowTy = getShadowTy(OrigTy);
if (!ShadowTy)
return nullptr;
return Constant::getNullValue(ShadowTy);
}
/// Create a clean shadow value for a given value.
///
/// Clean shadow (all zeroes) means all bits of the value are defined
/// (initialized).
Constant *getCleanShadow(Value *V) {
return getCleanShadow(V->getType());
}
/// Create a dirty shadow of a given shadow type.
Constant *getPoisonedShadow(Type *ShadowTy) {
assert(ShadowTy);
if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
return Constant::getAllOnesValue(ShadowTy);
if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
SmallVector<Constant *, 4> Vals(AT->getNumElements(),
getPoisonedShadow(AT->getElementType()));
return ConstantArray::get(AT, Vals);
}
if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
SmallVector<Constant *, 4> Vals;
for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
return ConstantStruct::get(ST, Vals);
}
llvm_unreachable("Unexpected shadow type");
}
/// Create a dirty shadow for a given value.
Constant *getPoisonedShadow(Value *V) {
Type *ShadowTy = getShadowTy(V);
if (!ShadowTy)
return nullptr;
return getPoisonedShadow(ShadowTy);
}
/// Create a clean (zero) origin.
Value *getCleanOrigin() {
return Constant::getNullValue(MS.OriginTy);
}
/// Get the shadow value for a given Value.
///
/// This function either returns the value set earlier with setShadow,
/// or extracts if from ParamTLS (for function arguments).
Value *getShadow(Value *V) {
if (!PropagateShadow) return getCleanShadow(V);
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (I->getMetadata("nosanitize"))
return getCleanShadow(V);
// For instructions the shadow is already stored in the map.
Value *Shadow = ShadowMap[V];
if (!Shadow) {
LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
(void)I;
assert(Shadow && "No shadow for a value");
}
return Shadow;
}
if (UndefValue *U = dyn_cast<UndefValue>(V)) {
Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
(void)U;
return AllOnes;
}
if (Argument *A = dyn_cast<Argument>(V)) {
// For arguments we compute the shadow on demand and store it in the map.
Value **ShadowPtr = &ShadowMap[V];
if (*ShadowPtr)
return *ShadowPtr;
Function *F = A->getParent();
IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI());
unsigned ArgOffset = 0;
const DataLayout &DL = F->getParent()->getDataLayout();
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
LLVM_DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
unsigned Size =
FArg.hasByValAttr()
? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
: DL.getTypeAllocSize(FArg.getType());
if (A == &FArg) {
bool Overflow = ArgOffset + Size > kParamTLSSize;
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
ArgAlign = DL.getABITypeAlignment(EltType);
}
Value *CpShadowPtr =
getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
/*isStore*/ true)
.first;
// TODO(glider): need to copy origins.
if (Overflow) {
// ParamTLS overflow.
EntryIRB.CreateMemSet(
CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
Size, ArgAlign);
} else {
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
CopyAlign, Size);
LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
(void)Cpy;
}
*ShadowPtr = getCleanShadow(V);
} else {
if (Overflow) {
// ParamTLS overflow.
*ShadowPtr = getCleanShadow(V);
} else {
*ShadowPtr =
EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
}
LLVM_DEBUG(dbgs()
<< " ARG: " << FArg << " ==> " << **ShadowPtr << "\n");
if (MS.TrackOrigins && !Overflow) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
} else {
setOrigin(A, getCleanOrigin());
}
}
ArgOffset += alignTo(Size, kShadowTLSAlignment);
}
assert(*ShadowPtr && "Could not find shadow for an argument");
return *ShadowPtr;
}
// For everything else the shadow is zero.
return getCleanShadow(V);
}
/// Get the shadow for i-th argument of the instruction I.
Value *getShadow(Instruction *I, int i) {
return getShadow(I->getOperand(i));
}
/// Get the origin for a value.
Value *getOrigin(Value *V) {
if (!MS.TrackOrigins) return nullptr;
if (!PropagateShadow) return getCleanOrigin();
if (isa<Constant>(V)) return getCleanOrigin();
assert((isa<Instruction>(V) || isa<Argument>(V)) &&
"Unexpected value type in getOrigin()");
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (I->getMetadata("nosanitize"))
return getCleanOrigin();
}
Value *Origin = OriginMap[V];
assert(Origin && "Missing origin");
return Origin;
}
/// Get the origin for i-th argument of the instruction I.
Value *getOrigin(Instruction *I, int i) {
return getOrigin(I->getOperand(i));
}
/// Remember the place where a shadow check should be inserted.
///
/// This location will be later instrumented with a check that will print a
/// UMR warning in runtime if the shadow value is not 0.
void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
assert(Shadow);
if (!InsertChecks) return;
#ifndef NDEBUG
Type *ShadowTy = Shadow->getType();
assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
"Can only insert checks for integer and vector shadow types");
#endif
InstrumentationList.push_back(
ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
}
/// Remember the place where a shadow check should be inserted.
///
/// This location will be later instrumented with a check that will print a
/// UMR warning in runtime if the value is not fully defined.
void insertShadowCheck(Value *Val, Instruction *OrigIns) {
assert(Val);
Value *Shadow, *Origin;
if (ClCheckConstantShadow) {
Shadow = getShadow(Val);
if (!Shadow) return;
Origin = getOrigin(Val);
} else {
Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
if (!Shadow) return;
Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
}
insertShadowCheck(Shadow, Origin, OrigIns);
}
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
case AtomicOrdering::NotAtomic:
return AtomicOrdering::NotAtomic;
case AtomicOrdering::Unordered:
case AtomicOrdering::Monotonic:
case AtomicOrdering::Release:
return AtomicOrdering::Release;
case AtomicOrdering::Acquire:
case AtomicOrdering::AcquireRelease:
return AtomicOrdering::AcquireRelease;
case AtomicOrdering::SequentiallyConsistent:
return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
case AtomicOrdering::NotAtomic:
return AtomicOrdering::NotAtomic;
case AtomicOrdering::Unordered:
case AtomicOrdering::Monotonic:
case AtomicOrdering::Acquire:
return AtomicOrdering::Acquire;
case AtomicOrdering::Release:
case AtomicOrdering::AcquireRelease:
return AtomicOrdering::AcquireRelease;
case AtomicOrdering::SequentiallyConsistent:
return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
// ------------------- Visitors.
using InstVisitor<MemorySanitizerVisitor>::visit;
void visit(Instruction &I) {
if (!I.getMetadata("nosanitize"))
InstVisitor<MemorySanitizerVisitor>::visit(I);
}
/// Instrument LoadInst
///
/// Loads the corresponding shadow and (optionally) origin.
/// Optionally, checks that the load address is fully defined.
void visitLoadInst(LoadInst &I) {
assert(I.getType()->isSized() && "Load type must have size");
assert(!I.getMetadata("nosanitize"));
IRBuilder<> IRB(I.getNextNode());
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = I.getAlignment();
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
if (ClCheckAccessAddress)
insertShadowCheck(I.getPointerOperand(), &I);
if (I.isAtomic())
I.setOrdering(addAcquireOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
if (PropagateShadow) {
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
setOrigin(&I, IRB.CreateAlignedLoad(OriginPtr, OriginAlignment));
} else {
setOrigin(&I, getCleanOrigin());
}
}
}
/// Instrument StoreInst
///
/// Stores the corresponding shadow and (optionally) origin.
/// Optionally, checks that the store address is fully defined.
void visitStoreInst(StoreInst &I) {
StoreList.push_back(&I);
if (ClCheckAccessAddress)
insertShadowCheck(I.getPointerOperand(), &I);
}
void handleCASOrRMW(Instruction &I) {
assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
IRBuilder<> IRB(&I);
Value *Addr = I.getOperand(0);
Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(),
/*Alignment*/ 1, /*isStore*/ true)
.first;
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
// Only test the conditional argument of cmpxchg instruction.
// The other argument can potentially be uninitialized, but we can not
// detect this situation reliably without possible false positives.
if (isa<AtomicCmpXchgInst>(I))
insertShadowCheck(I.getOperand(1), &I);
IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
void visitAtomicRMWInst(AtomicRMWInst &I) {
handleCASOrRMW(I);
I.setOrdering(addReleaseOrdering(I.getOrdering()));
}
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
handleCASOrRMW(I);
I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
}
// Vector manipulation.
void visitExtractElementInst(ExtractElementInst &I) {
insertShadowCheck(I.getOperand(1), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
"_msprop"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitInsertElementInst(InsertElementInst &I) {
insertShadowCheck(I.getOperand(2), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
I.getOperand(2), "_msprop"));
setOriginForNaryOp(I);
}
void visitShuffleVectorInst(ShuffleVectorInst &I) {
insertShadowCheck(I.getOperand(2), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
I.getOperand(2), "_msprop"));
setOriginForNaryOp(I);
}
// Casts.
void visitSExtInst(SExtInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitZExtInst(ZExtInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitTruncInst(TruncInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitBitCastInst(BitCastInst &I) {
// Special case: if this is the bitcast (there is exactly 1 allowed) between
// a musttail call and a ret, don't instrument. New instructions are not
// allowed after a musttail call.
if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
if (CI->isMustTailCall())
return;
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
setOrigin(&I, getOrigin(&I, 0));
}
void visitPtrToIntInst(PtrToIntInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
"_msprop_ptrtoint"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitIntToPtrInst(IntToPtrInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
"_msprop_inttoptr"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
/// Propagate shadow for bitwise AND.
///
/// This code is exact, i.e. if, for example, a bit in the left argument
/// is defined and 0, then neither the value not definedness of the
/// corresponding bit in B don't affect the resulting shadow.
void visitAnd(BinaryOperator &I) {
IRBuilder<> IRB(&I);
// "And" of 0 and a poisoned value results in unpoisoned value.
// 1&1 => 1; 0&1 => 0; p&1 => p;
// 1&0 => 0; 0&0 => 0; p&0 => 0;
// 1&p => p; 0&p => 0; p&p => p;
// S = (S1 & S2) | (V1 & S2) | (S1 & V2)
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
if (V1->getType() != S1->getType()) {
V1 = IRB.CreateIntCast(V1, S1->getType(), false);
V2 = IRB.CreateIntCast(V2, S2->getType(), false);
}
Value *S1S2 = IRB.CreateAnd(S1, S2);
Value *V1S2 = IRB.CreateAnd(V1, S2);
Value *S1V2 = IRB.CreateAnd(S1, V2);
setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
setOriginForNaryOp(I);
}
void visitOr(BinaryOperator &I) {
IRBuilder<> IRB(&I);
// "Or" of 1 and a poisoned value results in unpoisoned value.
// 1|1 => 1; 0|1 => 1; p|1 => 1;
// 1|0 => 1; 0|0 => 0; p|0 => p;
// 1|p => 1; 0|p => p; p|p => p;
// S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
Value *V1 = IRB.CreateNot(I.getOperand(0));
Value *V2 = IRB.CreateNot(I.getOperand(1));
if (V1->getType() != S1->getType()) {
V1 = IRB.CreateIntCast(V1, S1->getType(), false);
V2 = IRB.CreateIntCast(V2, S2->getType(), false);
}
Value *S1S2 = IRB.CreateAnd(S1, S2);
Value *V1S2 = IRB.CreateAnd(V1, S2);
Value *S1V2 = IRB.CreateAnd(S1, V2);
setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
setOriginForNaryOp(I);
}
/// Default propagation of shadow and/or origin.
///
/// This class implements the general case of shadow propagation, used in all
/// cases where we don't know and/or don't care about what the operation
/// actually does. It converts all input shadow values to a common type
/// (extending or truncating as necessary), and bitwise OR's them.
///
/// This is much cheaper than inserting checks (i.e. requiring inputs to be
/// fully initialized), and less prone to false positives.
///
/// This class also implements the general case of origin propagation. For a
/// Nary operation, result origin is set to the origin of an argument that is
/// not entirely initialized. If there is more than one such arguments, the
/// rightmost of them is picked. It does not matter which one is picked if all
/// arguments are initialized.
template <bool CombineShadow>
class Combiner {
Value *Shadow = nullptr;
Value *Origin = nullptr;
IRBuilder<> &IRB;
MemorySanitizerVisitor *MSV;
public:
Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
: IRB(IRB), MSV(MSV) {}
/// Add a pair of shadow and origin values to the mix.
Combiner &Add(Value *OpShadow, Value *OpOrigin) {
if (CombineShadow) {
assert(OpShadow);
if (!Shadow)
Shadow = OpShadow;
else {
OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
}
}
if (MSV->MS.TrackOrigins) {
assert(OpOrigin);
if (!Origin) {
Origin = OpOrigin;
} else {
Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
// No point in adding something that might result in 0 origin value.
if (!ConstOrigin || !ConstOrigin->isNullValue()) {
Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
Value *Cond =
IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
}
}
}
return *this;
}
/// Add an application value to the mix.
Combiner &Add(Value *V) {
Value *OpShadow = MSV->getShadow(V);
Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
return Add(OpShadow, OpOrigin);
}
/// Set the current combined values as the given instruction's shadow
/// and origin.
void Done(Instruction *I) {
if (CombineShadow) {
assert(Shadow);
Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
MSV->setShadow(I, Shadow);
}
if (MSV->MS.TrackOrigins) {
assert(Origin);
MSV->setOrigin(I, Origin);
}
}
};
using ShadowAndOriginCombiner = Combiner<true>;
using OriginCombiner = Combiner<false>;
/// Propagate origin for arbitrary operation.
void setOriginForNaryOp(Instruction &I) {
if (!MS.TrackOrigins) return;
IRBuilder<> IRB(&I);
OriginCombiner OC(this, IRB);
for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
OC.Add(OI->get());
OC.Done(&I);
}
size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
"Vector of pointers is not a valid shadow type");
return Ty->isVectorTy() ?
Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
Ty->getPrimitiveSizeInBits();
}
/// Cast between two shadow types, extending or truncating as
/// necessary.
Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
bool Signed = false) {
Type *srcTy = V->getType();
size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
if (srcSizeInBits > 1 && dstSizeInBits == 1)
return IRB.CreateICmpNE(V, getCleanShadow(V));
if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
return IRB.CreateIntCast(V, dstTy, Signed);
if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
return IRB.CreateIntCast(V, dstTy, Signed);
Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
Value *V2 =
IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
return IRB.CreateBitCast(V2, dstTy);
// TODO: handle struct types.
}
/// Cast an application value to the type of its own shadow.
Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
Type *ShadowTy = getShadowTy(V);
if (V->getType() == ShadowTy)
return V;
if (V->getType()->isPtrOrPtrVectorTy())
return IRB.CreatePtrToInt(V, ShadowTy);
else
return IRB.CreateBitCast(V, ShadowTy);
}
/// Propagate shadow for arbitrary operation.
void handleShadowOr(Instruction &I) {
IRBuilder<> IRB(&I);
ShadowAndOriginCombiner SC(this, IRB);
for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
SC.Add(OI->get());
SC.Done(&I);
}
// Handle multiplication by constant.
//
// Handle a special case of multiplication by constant that may have one or
// more zeros in the lower bits. This makes corresponding number of lower bits
// of the result zero as well. We model it by shifting the other operand
// shadow left by the required number of bits. Effectively, we transform
// (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
// We use multiplication by 2**N instead of shift to cover the case of
// multiplication by 0, which may occur in some elements of a vector operand.
void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
Value *OtherArg) {
Constant *ShadowMul;
Type *Ty = ConstArg->getType();
if (Ty->isVectorTy()) {
unsigned NumElements = Ty->getVectorNumElements();
Type *EltTy = Ty->getSequentialElementType();
SmallVector<Constant *, 16> Elements;
for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
if (ConstantInt *Elt =
dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
const APInt &V = Elt->getValue();
APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
Elements.push_back(ConstantInt::get(EltTy, V2));
} else {
Elements.push_back(ConstantInt::get(EltTy, 1));
}
}
ShadowMul = ConstantVector::get(Elements);
} else {
if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
const APInt &V = Elt->getValue();
APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
ShadowMul = ConstantInt::get(Ty, V2);
} else {
ShadowMul = ConstantInt::get(Ty, 1);
}
}
IRBuilder<> IRB(&I);
setShadow(&I,
IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
setOrigin(&I, getOrigin(OtherArg));
}
void visitMul(BinaryOperator &I) {
Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
if (constOp0 && !constOp1)
handleMulByConstant(I, constOp0, I.getOperand(1));
else if (constOp1 && !constOp0)
handleMulByConstant(I, constOp1, I.getOperand(0));
else
handleShadowOr(I);
}
void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitSub(BinaryOperator &I) { handleShadowOr(I); }
void visitXor(BinaryOperator &I) { handleShadowOr(I); }
void handleIntegerDiv(Instruction &I) {
IRBuilder<> IRB(&I);
// Strict on the second argument.
insertShadowCheck(I.getOperand(1), &I);
setShadow(&I, getShadow(&I, 0));
setOrigin(&I, getOrigin(&I, 0));
}
void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
// Floating point division is side-effect free. We can not require that the
// divisor is fully initialized and must propagate shadow. See PR37523.
void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
/// Instrument == and != comparisons.
///
/// Sometimes the comparison result is known even if some of the bits of the
/// arguments are not.
void handleEqualityComparison(ICmpInst &I) {
IRBuilder<> IRB(&I);
Value *A = I.getOperand(0);
Value *B = I.getOperand(1);
Value *Sa = getShadow(A);
Value *Sb = getShadow(B);
// Get rid of pointers and vectors of pointers.
// For ints (and vectors of ints), types of A and Sa match,
// and this is a no-op.
A = IRB.CreatePointerCast(A, Sa->getType());
B = IRB.CreatePointerCast(B, Sb->getType());
// A == B <==> (C = A^B) == 0
// A != B <==> (C = A^B) != 0
// Sc = Sa | Sb
Value *C = IRB.CreateXor(A, B);
Value *Sc = IRB.CreateOr(Sa, Sb);
// Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
// Result is defined if one of the following is true
// * there is a defined 1 bit in C
// * C is fully defined
// Si = !(C & ~Sc) && Sc
Value *Zero = Constant::getNullValue(Sc->getType());
Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
Value *Si =
IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
IRB.CreateICmpEQ(
IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
Si->setName("_msprop_icmp");
setShadow(&I, Si);
setOriginForNaryOp(I);
}
/// Build the lowest possible value of V, taking into account V's
/// uninitialized bits.
Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
bool isSigned) {
if (isSigned) {
// Split shadow into sign bit and other bits.
Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
// Maximise the undefined shadow bit, minimize other undefined bits.
return
IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
} else {
// Minimize undefined bits.
return IRB.CreateAnd(A, IRB.CreateNot(Sa));
}
}
/// Build the highest possible value of V, taking into account V's
/// uninitialized bits.
Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
bool isSigned) {
if (isSigned) {
// Split shadow into sign bit and other bits.
Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
// Minimise the undefined shadow bit, maximise other undefined bits.
return
IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
} else {
// Maximize undefined bits.
return IRB.CreateOr(A, Sa);
}
}
/// Instrument relational comparisons.
///
/// This function does exact shadow propagation for all relational
/// comparisons of integers, pointers and vectors of those.
/// FIXME: output seems suboptimal when one of the operands is a constant
void handleRelationalComparisonExact(ICmpInst &I) {
IRBuilder<> IRB(&I);
Value *A = I.getOperand(0);
Value *B = I.getOperand(1);
Value *Sa = getShadow(A);
Value *Sb = getShadow(B);
// Get rid of pointers and vectors of pointers.
// For ints (and vectors of ints), types of A and Sa match,
// and this is a no-op.
A = IRB.CreatePointerCast(A, Sa->getType());
B = IRB.CreatePointerCast(B, Sb->getType());
// Let [a0, a1] be the interval of possible values of A, taking into account
// its undefined bits. Let [b0, b1] be the interval of possible values of B.
// Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
bool IsSigned = I.isSigned();
Value *S1 = IRB.CreateICmp(I.getPredicate(),
getLowestPossibleValue(IRB, A, Sa, IsSigned),
getHighestPossibleValue(IRB, B, Sb, IsSigned));
Value *S2 = IRB.CreateICmp(I.getPredicate(),
getHighestPossibleValue(IRB, A, Sa, IsSigned),
getLowestPossibleValue(IRB, B, Sb, IsSigned));
Value *Si = IRB.CreateXor(S1, S2);
setShadow(&I, Si);
setOriginForNaryOp(I);
}
/// Instrument signed relational comparisons.
///
/// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
/// bit of the shadow. Everything else is delegated to handleShadowOr().
void handleSignedRelationalComparison(ICmpInst &I) {
Constant *constOp;
Value *op = nullptr;
CmpInst::Predicate pre;
if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
op = I.getOperand(0);
pre = I.getPredicate();
} else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
op = I.getOperand(1);
pre = I.getSwappedPredicate();
} else {
handleShadowOr(I);
return;
}
if ((constOp->isNullValue() &&
(pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
(constOp->isAllOnesValue() &&
(pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
IRBuilder<> IRB(&I);
Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
"_msprop_icmp_s");
setShadow(&I, Shadow);
setOrigin(&I, getOrigin(op));
} else {
handleShadowOr(I);
}
}
void visitICmpInst(ICmpInst &I) {
if (!ClHandleICmp) {
handleShadowOr(I);
return;
}
if (I.isEquality()) {
handleEqualityComparison(I);
return;
}
assert(I.isRelational());
if (ClHandleICmpExact) {
handleRelationalComparisonExact(I);
return;
}
if (I.isSigned()) {
handleSignedRelationalComparison(I);
return;
}
assert(I.isUnsigned());
if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
handleRelationalComparisonExact(I);
return;
}
handleShadowOr(I);
}
void visitFCmpInst(FCmpInst &I) {
handleShadowOr(I);
}
void handleShift(BinaryOperator &I) {
IRBuilder<> IRB(&I);
// If any of the S2 bits are poisoned, the whole thing is poisoned.
// Otherwise perform the same shift on S1.
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
S2->getType());
Value *V2 = I.getOperand(1);
Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
}
void visitShl(BinaryOperator &I) { handleShift(I); }
void visitAShr(BinaryOperator &I) { handleShift(I); }
void visitLShr(BinaryOperator &I) { handleShift(I); }
/// Instrument llvm.memmove
///
/// At this point we don't know if llvm.memmove will be inlined or not.
/// If we don't instrument it and it gets inlined,
/// our interceptor will not kick in and we will lose the memmove.
/// If we instrument the call here, but it does not get inlined,
/// we will memove the shadow twice: which is bad in case
/// of overlapping regions. So, we simply lower the intrinsic to a call.
///
/// Similar situation exists for memcpy and memset.
void visitMemMoveInst(MemMoveInst &I) {
IRBuilder<> IRB(&I);
IRB.CreateCall(
MS.MemmoveFn,
{IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
// Similar to memmove: avoid copying shadow twice.
// This is somewhat unfortunate as it may slowdown small constant memcpys.
// FIXME: consider doing manual inline for small constant sizes and proper
// alignment.
void visitMemCpyInst(MemCpyInst &I) {
IRBuilder<> IRB(&I);
IRB.CreateCall(
MS.MemcpyFn,
{IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
// Same as memcpy.
void visitMemSetInst(MemSetInst &I) {
IRBuilder<> IRB(&I);
IRB.CreateCall(
MS.MemsetFn,
{IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
void visitVAStartInst(VAStartInst &I) {
VAHelper->visitVAStartInst(I);
}
void visitVACopyInst(VACopyInst &I) {
VAHelper->visitVACopyInst(I);
}
/// Handle vector store-like intrinsics.
///
/// Instrument intrinsics that look like a simple SIMD store: writes memory,
/// has 1 pointer argument and 1 vector argument, returns void.
bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value* Addr = I.getArgOperand(0);
Value *Shadow = getShadow(&I, 1);
Value *ShadowPtr, *OriginPtr;
// We don't know the pointer alignment (could be unaligned SSE store!).
// Have to assume to worst case.
std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
Addr, IRB, Shadow->getType(), /*Alignment*/ 1, /*isStore*/ true);
IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
// FIXME: factor out common code from materializeStores
if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
return true;
}
/// Handle vector load-like intrinsics.
///
/// Instrument intrinsics that look like a simple SIMD load: reads memory,
/// has 1 pointer argument, returns a vector.
bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
Type *ShadowTy = getShadowTy(&I);
Value *ShadowPtr, *OriginPtr;
if (PropagateShadow) {
// We don't know the pointer alignment (could be unaligned SSE load!).
// Have to assume to worst case.
unsigned Alignment = 1;
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
if (MS.TrackOrigins) {
if (PropagateShadow)
setOrigin(&I, IRB.CreateLoad(OriginPtr));
else
setOrigin(&I, getCleanOrigin());
}
return true;
}
/// Handle (SIMD arithmetic)-like intrinsics.
///
/// Instrument intrinsics with any number of arguments of the same type,
/// equal to the return type. The type should be simple (no aggregates or
/// pointers; vectors are fine).
/// Caller guarantees that this intrinsic does not access memory.
bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
Type *RetTy = I.getType();
if (!(RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy() ||
RetTy->isX86_MMXTy()))
return false;
unsigned NumArgOperands = I.getNumArgOperands();
for (unsigned i = 0; i < NumArgOperands; ++i) {
Type *Ty = I.getArgOperand(i)->getType();
if (Ty != RetTy)
return false;
}
IRBuilder<> IRB(&I);
ShadowAndOriginCombiner SC(this, IRB);
for (unsigned i = 0; i < NumArgOperands; ++i)
SC.Add(I.getArgOperand(i));
SC.Done(&I);
return true;
}
/// Heuristically instrument unknown intrinsics.
///
/// The main purpose of this code is to do something reasonable with all
/// random intrinsics we might encounter, most importantly - SIMD intrinsics.
/// We recognize several classes of intrinsics by their argument types and
/// ModRefBehaviour and apply special intrumentation when we are reasonably
/// sure that we know what the intrinsic does.
///
/// We special-case intrinsics where this approach fails. See llvm.bswap
/// handling as an example of that.
bool handleUnknownIntrinsic(IntrinsicInst &I) {
unsigned NumArgOperands = I.getNumArgOperands();
if (NumArgOperands == 0)
return false;
if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
I.getArgOperand(1)->getType()->isVectorTy() &&
I.getType()->isVoidTy() &&
!I.onlyReadsMemory()) {
// This looks like a vector store.
return handleVectorStoreIntrinsic(I);
}
if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
// This looks like a vector load.
return handleVectorLoadIntrinsic(I);
}
if (I.doesNotAccessMemory())
if (maybeHandleSimpleNomemIntrinsic(I))
return true;
// FIXME: detect and handle SSE maskstore/maskload
return false;
}
void handleBswap(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Op = I.getArgOperand(0);
Type *OpType = Op->getType();
Function *BswapFunc = Intrinsic::getDeclaration(
F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
setOrigin(&I, getOrigin(Op));
}
// Instrument vector convert instrinsic.
//
// This function instruments intrinsics like cvtsi2ss:
// %Out = int_xxx_cvtyyy(%ConvertOp)
// or
// %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
// Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
// number \p Out elements, and (if has 2 arguments) copies the rest of the
// elements from \p CopyOp.
// In most cases conversion involves floating-point value which may trigger a
// hardware exception when not fully initialized. For this reason we require
// \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
// We copy the shadow of \p CopyOp[NumUsedElements:] to \p
// Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
// return a fully initialized value.
void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
IRBuilder<> IRB(&I);
Value *CopyOp, *ConvertOp;
switch (I.getNumArgOperands()) {
case 3:
assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
LLVM_FALLTHROUGH;
case 2:
CopyOp = I.getArgOperand(0);
ConvertOp = I.getArgOperand(1);
break;
case 1:
ConvertOp = I.getArgOperand(0);
CopyOp = nullptr;
break;
default:
llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
}
// The first *NumUsedElements* elements of ConvertOp are converted to the
// same number of output elements. The rest of the output is copied from
// CopyOp, or (if not available) filled with zeroes.
// Combine shadow for elements of ConvertOp that are used in this operation,
// and insert a check.
// FIXME: consider propagating shadow of ConvertOp, at least in the case of
// int->any conversion.
Value *ConvertShadow = getShadow(ConvertOp);
Value *AggShadow = nullptr;
if (ConvertOp->getType()->isVectorTy()) {
AggShadow = IRB.CreateExtractElement(
ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
for (int i = 1; i < NumUsedElements; ++i) {
Value *MoreShadow = IRB.CreateExtractElement(
ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
}
} else {
AggShadow = ConvertShadow;
}
assert(AggShadow->getType()->isIntegerTy());
insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
// Build result shadow by zero-filling parts of CopyOp shadow that come from
// ConvertOp.
if (CopyOp) {
assert(CopyOp->getType() == I.getType());
assert(CopyOp->getType()->isVectorTy());
Value *ResultShadow = getShadow(CopyOp);
Type *EltTy = ResultShadow->getType()->getVectorElementType();
for (int i = 0; i < NumUsedElements; ++i) {
ResultShadow = IRB.CreateInsertElement(
ResultShadow, ConstantInt::getNullValue(EltTy),
ConstantInt::get(IRB.getInt32Ty(), i));
}
setShadow(&I, ResultShadow);
setOrigin(&I, getOrigin(CopyOp));
} else {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
}
// Given a scalar or vector, extract lower 64 bits (or less), and return all
// zeroes if it is zero, and all ones otherwise.
Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
if (S->getType()->isVectorTy())
S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
assert(S->getType()->getPrimitiveSizeInBits() <= 64);
Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
return CreateShadowCast(IRB, S2, T, /* Signed */ true);
}
// Given a vector, extract its first element, and return all
// zeroes if it is zero, and all ones otherwise.
Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
return CreateShadowCast(IRB, S2, T, /* Signed */ true);
}
Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
Type *T = S->getType();
assert(T->isVectorTy());
Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
return IRB.CreateSExt(S2, T);
}
// Instrument vector shift instrinsic.
//
// This function instruments intrinsics like int_x86_avx2_psll_w.
// Intrinsic shifts %In by %ShiftSize bits.
// %ShiftSize may be a vector. In that case the lower 64 bits determine shift
// size, and the rest is ignored. Behavior is defined even if shift size is
// greater than register (or field) width.
void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
assert(I.getNumArgOperands() == 2);
IRBuilder<> IRB(&I);
// If any of the S2 bits are poisoned, the whole thing is poisoned.
// Otherwise perform the same shift on S1.
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
: Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
Value *Shift = IRB.CreateCall(I.getCalledValue(),
{IRB.CreateBitCast(S1, V1->getType()), V2});
Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
}
// Get an X86_MMX-sized vector type.
Type *getMMXVectorTy(unsigned EltSizeInBits) {
const unsigned X86_MMXSizeInBits = 64;
return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
X86_MMXSizeInBits / EltSizeInBits);
}
// Returns a signed counterpart for an (un)signed-saturate-and-pack
// intrinsic.
Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
switch (id) {
case Intrinsic::x86_sse2_packsswb_128:
case Intrinsic::x86_sse2_packuswb_128:
return Intrinsic::x86_sse2_packsswb_128;
case Intrinsic::x86_sse2_packssdw_128:
case Intrinsic::x86_sse41_packusdw:
return Intrinsic::x86_sse2_packssdw_128;
case Intrinsic::x86_avx2_packsswb:
case Intrinsic::x86_avx2_packuswb:
return Intrinsic::x86_avx2_packsswb;
case Intrinsic::x86_avx2_packssdw:
case Intrinsic::x86_avx2_packusdw:
return Intrinsic::x86_avx2_packssdw;
case Intrinsic::x86_mmx_packsswb:
case Intrinsic::x86_mmx_packuswb:
return Intrinsic::x86_mmx_packsswb;
case Intrinsic::x86_mmx_packssdw:
return Intrinsic::x86_mmx_packssdw;
default:
llvm_unreachable("unexpected intrinsic id");
}
}
// Instrument vector pack instrinsic.
//
// This function instruments intrinsics like x86_mmx_packsswb, that
// packs elements of 2 input vectors into half as many bits with saturation.
// Shadow is propagated with the signed variant of the same intrinsic applied
// to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
// EltSizeInBits is used only for x86mmx arguments.
void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
assert(I.getNumArgOperands() == 2);
bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
IRBuilder<> IRB(&I);
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
assert(isX86_MMX || S1->getType()->isVectorTy());
// SExt and ICmpNE below must apply to individual elements of input vectors.
// In case of x86mmx arguments, cast them to appropriate vector types and
// back.
Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
if (isX86_MMX) {
S1 = IRB.CreateBitCast(S1, T);
S2 = IRB.CreateBitCast(S2, T);
}
Value *S1_ext = IRB.CreateSExt(
IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
Value *S2_ext = IRB.CreateSExt(
IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
if (isX86_MMX) {
Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
}
Function *ShadowFn = Intrinsic::getDeclaration(
F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
Value *S =
IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
}
// Instrument sum-of-absolute-differencies intrinsic.
void handleVectorSadIntrinsic(IntrinsicInst &I) {
const unsigned SignificantBitsPerResultElement = 16;
bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
unsigned ZeroBitsPerResultElement =
ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
IRBuilder<> IRB(&I);
Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
S = IRB.CreateBitCast(S, ResTy);
S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
ResTy);
S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
}
// Instrument multiply-add intrinsic.
void handleVectorPmaddIntrinsic(IntrinsicInst &I,
unsigned EltSizeInBits = 0) {
bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
IRBuilder<> IRB(&I);
Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
S = IRB.CreateBitCast(S, ResTy);
S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
ResTy);
S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
}
// Instrument compare-packed intrinsic.
// Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
// all-ones shadow.
void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Type *ResTy = getShadowTy(&I);
Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
Value *S = IRB.CreateSExt(
IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
setShadow(&I, S);
setOriginForNaryOp(I);
}
// Instrument compare-scalar intrinsic.
// This handles both cmp* intrinsics which return the result in the first
// element of a vector, and comi* which return the result as i32.
void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
}
void handleStmxcsr(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value* Addr = I.getArgOperand(0);
Type *Ty = IRB.getInt32Ty();
Value *ShadowPtr =
getShadowOriginPtr(Addr, IRB, Ty, /*Alignment*/ 1, /*isStore*/ true)
.first;
IRB.CreateStore(getCleanShadow(Ty),
IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
}
void handleLdmxcsr(IntrinsicInst &I) {
if (!InsertChecks) return;
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
Type *Ty = IRB.getInt32Ty();
unsigned Alignment = 1;
Value *ShadowPtr, *OriginPtr;
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
Value *Shadow = IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_ldmxcsr");
Value *Origin =
MS.TrackOrigins ? IRB.CreateLoad(OriginPtr) : getCleanOrigin();
insertShadowCheck(Shadow, Origin, &I);
}
void handleMaskedStore(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *V = I.getArgOperand(0);
Value *Addr = I.getArgOperand(1);
unsigned Align = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
Value *Mask = I.getArgOperand(3);
Value *Shadow = getShadow(V);
Value *ShadowPtr;
Value *OriginPtr;
std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
Addr, IRB, Shadow->getType(), Align, /*isStore*/ true);
if (ClCheckAccessAddress) {
insertShadowCheck(Addr, &I);
// Uninitialized mask is kind of like uninitialized address, but not as
// scary.
insertShadowCheck(Mask, &I);
}
IRB.CreateMaskedStore(Shadow, ShadowPtr, Align, Mask);
if (MS.TrackOrigins) {
auto &DL = F.getParent()->getDataLayout();
paintOrigin(IRB, getOrigin(V), OriginPtr,
DL.getTypeStoreSize(Shadow->getType()),
std::max(Align, kMinOriginAlignment));
}
}
bool handleMaskedLoad(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
unsigned Align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
Value *Mask = I.getArgOperand(2);
Value *PassThru = I.getArgOperand(3);
Type *ShadowTy = getShadowTy(&I);
Value *ShadowPtr, *OriginPtr;
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Align, /*isStore*/ false);
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Align, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
if (ClCheckAccessAddress) {
insertShadowCheck(Addr, &I);
insertShadowCheck(Mask, &I);
}
if (MS.TrackOrigins) {
if (PropagateShadow) {
// Choose between PassThru's and the loaded value's origins.
Value *MaskedPassThruShadow = IRB.CreateAnd(
getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
Value *Acc = IRB.CreateExtractElement(
MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
for (int i = 1, N = PassThru->getType()->getVectorNumElements(); i < N;
++i) {
Value *More = IRB.CreateExtractElement(
MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
Acc = IRB.CreateOr(Acc, More);
}
Value *Origin = IRB.CreateSelect(
IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
getOrigin(PassThru), IRB.CreateLoad(OriginPtr));
setOrigin(&I, Origin);
} else {
setOrigin(&I, getCleanOrigin());
}
}
return true;
}
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::bswap:
handleBswap(I);
break;
case Intrinsic::masked_store:
handleMaskedStore(I);
break;
case Intrinsic::masked_load:
handleMaskedLoad(I);
break;
case Intrinsic::x86_sse_stmxcsr:
handleStmxcsr(I);
break;
case Intrinsic::x86_sse_ldmxcsr:
handleLdmxcsr(I);
break;
case Intrinsic::x86_avx512_vcvtsd2usi64:
case Intrinsic::x86_avx512_vcvtsd2usi32:
case Intrinsic::x86_avx512_vcvtss2usi64:
case Intrinsic::x86_avx512_vcvtss2usi32:
case Intrinsic::x86_avx512_cvttss2usi64:
case Intrinsic::x86_avx512_cvttss2usi:
case Intrinsic::x86_avx512_cvttsd2usi64:
case Intrinsic::x86_avx512_cvttsd2usi:
case Intrinsic::x86_avx512_cvtusi2ss:
case Intrinsic::x86_avx512_cvtusi642sd:
case Intrinsic::x86_avx512_cvtusi642ss:
case Intrinsic::x86_sse2_cvtsd2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2ss:
case Intrinsic::x86_sse2_cvttsd2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse_cvttss2si:
handleVectorConvertIntrinsic(I, 1);
break;
case Intrinsic::x86_sse_cvtps2pi:
case Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
case Intrinsic::x86_avx512_psll_w_512:
case Intrinsic::x86_avx512_psll_d_512:
case Intrinsic::x86_avx512_psll_q_512:
case Intrinsic::x86_avx512_pslli_w_512:
case Intrinsic::x86_avx512_pslli_d_512:
case Intrinsic::x86_avx512_pslli_q_512:
case Intrinsic::x86_avx512_psrl_w_512:
case Intrinsic::x86_avx512_psrl_d_512:
case Intrinsic::x86_avx512_psrl_q_512:
case Intrinsic::x86_avx512_psra_w_512:
case Intrinsic::x86_avx512_psra_d_512:
case Intrinsic::x86_avx512_psra_q_512:
case Intrinsic::x86_avx512_psrli_w_512:
case Intrinsic::x86_avx512_psrli_d_512:
case Intrinsic::x86_avx512_psrli_q_512:
case Intrinsic::x86_avx512_psrai_w_512:
case Intrinsic::x86_avx512_psrai_d_512:
case Intrinsic::x86_avx512_psrai_q_512:
case Intrinsic::x86_avx512_psra_q_256:
case Intrinsic::x86_avx512_psra_q_128:
case Intrinsic::x86_avx512_psrai_q_256:
case Intrinsic::x86_avx512_psrai_q_128:
case Intrinsic::x86_avx2_psll_w:
case Intrinsic::x86_avx2_psll_d:
case Intrinsic::x86_avx2_psll_q:
case Intrinsic::x86_avx2_pslli_w:
case Intrinsic::x86_avx2_pslli_d:
case Intrinsic::x86_avx2_pslli_q:
case Intrinsic::x86_avx2_psrl_w:
case Intrinsic::x86_avx2_psrl_d:
case Intrinsic::x86_avx2_psrl_q:
case Intrinsic::x86_avx2_psra_w:
case Intrinsic::x86_avx2_psra_d:
case Intrinsic::x86_avx2_psrli_w:
case Intrinsic::x86_avx2_psrli_d:
case Intrinsic::x86_avx2_psrli_q:
case Intrinsic::x86_avx2_psrai_w:
case Intrinsic::x86_avx2_psrai_d:
case Intrinsic::x86_sse2_psll_w:
case Intrinsic::x86_sse2_psll_d:
case Intrinsic::x86_sse2_psll_q:
case Intrinsic::x86_sse2_pslli_w:
case Intrinsic::x86_sse2_pslli_d:
case Intrinsic::x86_sse2_pslli_q:
case Intrinsic::x86_sse2_psrl_w:
case Intrinsic::x86_sse2_psrl_d:
case Intrinsic::x86_sse2_psrl_q:
case Intrinsic::x86_sse2_psra_w:
case Intrinsic::x86_sse2_psra_d:
case Intrinsic::x86_sse2_psrli_w:
case Intrinsic::x86_sse2_psrli_d:
case Intrinsic::x86_sse2_psrli_q:
case Intrinsic::x86_sse2_psrai_w:
case Intrinsic::x86_sse2_psrai_d:
case Intrinsic::x86_mmx_psll_w:
case Intrinsic::x86_mmx_psll_d:
case Intrinsic::x86_mmx_psll_q:
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_pslli_q:
case Intrinsic::x86_mmx_psrl_w:
case Intrinsic::x86_mmx_psrl_d:
case Intrinsic::x86_mmx_psrl_q:
case Intrinsic::x86_mmx_psra_w:
case Intrinsic::x86_mmx_psra_d:
case Intrinsic::x86_mmx_psrli_w:
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d:
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
case Intrinsic::x86_avx2_psllv_d:
case Intrinsic::x86_avx2_psllv_d_256:
case Intrinsic::x86_avx512_psllv_d_512:
case Intrinsic::x86_avx2_psllv_q:
case Intrinsic::x86_avx2_psllv_q_256:
case Intrinsic::x86_avx512_psllv_q_512:
case Intrinsic::x86_avx2_psrlv_d:
case Intrinsic::x86_avx2_psrlv_d_256:
case Intrinsic::x86_avx512_psrlv_d_512:
case Intrinsic::x86_avx2_psrlv_q:
case Intrinsic::x86_avx2_psrlv_q_256:
case Intrinsic::x86_avx512_psrlv_q_512:
case Intrinsic::x86_avx2_psrav_d:
case Intrinsic::x86_avx2_psrav_d_256:
case Intrinsic::x86_avx512_psrav_d_512:
case Intrinsic::x86_avx512_psrav_q_128:
case Intrinsic::x86_avx512_psrav_q_256:
case Intrinsic::x86_avx512_psrav_q_512:
handleVectorShiftIntrinsic(I, /* Variable */ true);
break;
case Intrinsic::x86_sse2_packsswb_128:
case Intrinsic::x86_sse2_packssdw_128:
case Intrinsic::x86_sse2_packuswb_128:
case Intrinsic::x86_sse41_packusdw:
case Intrinsic::x86_avx2_packsswb:
case Intrinsic::x86_avx2_packssdw:
case Intrinsic::x86_avx2_packuswb:
case Intrinsic::x86_avx2_packusdw:
handleVectorPackIntrinsic(I);
break;
case Intrinsic::x86_mmx_packsswb:
case Intrinsic::x86_mmx_packuswb:
handleVectorPackIntrinsic(I, 16);
break;
case Intrinsic::x86_mmx_packssdw:
handleVectorPackIntrinsic(I, 32);
break;
case Intrinsic::x86_mmx_psad_bw:
case Intrinsic::x86_sse2_psad_bw:
case Intrinsic::x86_avx2_psad_bw:
handleVectorSadIntrinsic(I);
break;
case Intrinsic::x86_sse2_pmadd_wd:
case Intrinsic::x86_avx2_pmadd_wd:
case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
case Intrinsic::x86_avx2_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I);
break;
case Intrinsic::x86_ssse3_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I, 8);
break;
case Intrinsic::x86_mmx_pmadd_wd:
handleVectorPmaddIntrinsic(I, 16);
break;
case Intrinsic::x86_sse_cmp_ss:
case Intrinsic::x86_sse2_cmp_sd:
case Intrinsic::x86_sse_comieq_ss:
case Intrinsic::x86_sse_comilt_ss:
case Intrinsic::x86_sse_comile_ss:
case Intrinsic::x86_sse_comigt_ss:
case Intrinsic::x86_sse_comige_ss:
case Intrinsic::x86_sse_comineq_ss:
case Intrinsic::x86_sse_ucomieq_ss:
case Intrinsic::x86_sse_ucomilt_ss:
case Intrinsic::x86_sse_ucomile_ss:
case Intrinsic::x86_sse_ucomigt_ss:
case Intrinsic::x86_sse_ucomige_ss:
case Intrinsic::x86_sse_ucomineq_ss:
case Intrinsic::x86_sse2_comieq_sd:
case Intrinsic::x86_sse2_comilt_sd:
case Intrinsic::x86_sse2_comile_sd:
case Intrinsic::x86_sse2_comigt_sd:
case Intrinsic::x86_sse2_comige_sd:
case Intrinsic::x86_sse2_comineq_sd:
case Intrinsic::x86_sse2_ucomieq_sd:
case Intrinsic::x86_sse2_ucomilt_sd:
case Intrinsic::x86_sse2_ucomile_sd:
case Intrinsic::x86_sse2_ucomigt_sd:
case Intrinsic::x86_sse2_ucomige_sd:
case Intrinsic::x86_sse2_ucomineq_sd:
handleVectorCompareScalarIntrinsic(I);
break;
case Intrinsic::x86_sse_cmp_ps:
case Intrinsic::x86_sse2_cmp_pd:
// FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
// generates reasonably looking IR that fails in the backend with "Do not
// know how to split the result of this operator!".
handleVectorComparePackedIntrinsic(I);
break;
case Intrinsic::is_constant:
// The result of llvm.is.constant() is always defined.
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
break;
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
break;
}
}
void visitCallSite(CallSite CS) {
Instruction &I = *CS.getInstruction();
assert(!I.getMetadata("nosanitize"));
assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
if (CS.isCall()) {
CallInst *Call = cast<CallInst>(&I);
// For inline asm, do the usual thing: check argument shadow and mark all
// outputs as clean. Note that any side effects of the inline asm that are
// not immediately visible in its constraints are not handled.
if (Call->isInlineAsm()) {
if (ClHandleAsmConservative && MS.CompileKernel)
visitAsmInstruction(I);
else
visitInstruction(I);
return;
}
assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
// We are going to insert code that relies on the fact that the callee
// will become a non-readonly function after it is instrumented by us. To
// prevent this code from being optimized out, mark that function
// non-readonly in advance.
if (Function *Func = Call->getCalledFunction()) {
// Clear out readonly/readnone attributes.
AttrBuilder B;
B.addAttribute(Attribute::ReadOnly)
.addAttribute(Attribute::ReadNone);
Func->removeAttributes(AttributeList::FunctionIndex, B);
}
maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
}
IRBuilder<> IRB(&I);
unsigned ArgOffset = 0;
LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n");
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned i = ArgIt - CS.arg_begin();
if (!A->getType()->isSized()) {
LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
continue;
}
unsigned Size = 0;
Value *Store = nullptr;
// Compute the Shadow for arg even if it is ByVal, because
// in that case getShadow() will copy the actual arg shadow to
// __msan_param_tls.
Value *ArgShadow = getShadow(A);
Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
<< " Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
const DataLayout &DL = F.getParent()->getDataLayout();
if (CS.paramHasAttr(i, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
if (ArgOffset + Size > kParamTLSSize) break;
unsigned ParamAlignment = CS.getParamAlignment(i);
unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
Value *AShadowPtr =
getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
/*isStore*/ false)
.first;
Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
Alignment, Size);
// TODO(glider): need to copy origins.
} else {
Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
Constant *Cst = dyn_cast<Constant>(ArgShadow);
if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
}
if (MS.TrackOrigins && !ArgIsInitialized)
IRB.CreateStore(getOrigin(A),
getOriginPtrForArgument(A, IRB, ArgOffset));
(void)Store;
assert(Size != 0 && Store != nullptr);
LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n");
ArgOffset += alignTo(Size, 8);
}
LLVM_DEBUG(dbgs() << " done with call args\n");
FunctionType *FT = CS.getFunctionType();
if (FT->isVarArg()) {
VAHelper->visitCallSite(CS, IRB);
}
// Now, get the shadow for the RetVal.
if (!I.getType()->isSized()) return;
// Don't emit the epilogue for musttail call returns.
if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
IRBuilder<> IRBBefore(&I);
// Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
BasicBlock::iterator NextInsn;
if (CS.isCall()) {
NextInsn = ++I.getIterator();
assert(NextInsn != I.getParent()->end());
} else {
BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
if (!NormalDest->getSinglePredecessor()) {
// FIXME: this case is tricky, so we are just conservative here.
// Perhaps we need to split the edge between this BB and NormalDest,
// but a naive attempt to use SplitEdge leads to a crash.
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
return;
}
// FIXME: NextInsn is likely in a basic block that has not been visited yet.
// Anything inserted there will be instrumented by MSan later!
NextInsn = NormalDest->getFirstInsertionPt();
assert(NextInsn != NormalDest->end() &&
"Could not find insertion point for retval shadow load");
}
IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow =
IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
kShadowTLSAlignment, "_msret");
setShadow(&I, RetvalShadow);
if (MS.TrackOrigins)
setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
}
bool isAMustTailRetVal(Value *RetVal) {
if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
RetVal = I->getOperand(0);
}
if (auto *I = dyn_cast<CallInst>(RetVal)) {
return I->isMustTailCall();
}
return false;
}
void visitReturnInst(ReturnInst &I) {
IRBuilder<> IRB(&I);
Value *RetVal = I.getReturnValue();
if (!RetVal) return;
// Don't emit the epilogue for musttail call returns.
if (isAMustTailRetVal(RetVal)) return;
Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
if (CheckReturnValue) {
insertShadowCheck(RetVal, &I);
Value *Shadow = getCleanShadow(RetVal);
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
} else {
Value *Shadow = getShadow(RetVal);
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
if (MS.TrackOrigins)
IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
}
}
void visitPHINode(PHINode &I) {
IRBuilder<> IRB(&I);
if (!PropagateShadow) {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
return;
}
ShadowPHINodes.push_back(&I);
setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
"_msphi_s"));
if (MS.TrackOrigins)
setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
"_msphi_o"));
}
Value *getLocalVarDescription(AllocaInst &I) {
SmallString<2048> StackDescriptionStorage;
raw_svector_ostream StackDescription(StackDescriptionStorage);
// We create a string with a description of the stack allocation and
// pass it into __msan_set_alloca_origin.
// It will be printed by the run-time if stack-originated UMR is found.
// The first 4 bytes of the string are set to '----' and will be replaced
// by __msan_va_arg_overflow_size_tls at the first call.
StackDescription << "----" << I.getName() << "@" << F.getName();
return createPrivateNonConstGlobalForString(*F.getParent(),
StackDescription.str());
}
void instrumentAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall(MS.MsanPoisonStackFn,
{IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
} else {
Value *ShadowBase, *OriginBase;
std::tie(ShadowBase, OriginBase) =
getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(), 1, /*isStore*/ true);
Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlignment());
}
if (PoisonStack && MS.TrackOrigins) {
Value *Descr = getLocalVarDescription(I);
IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
{IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
IRB.CreatePointerCast(&F, MS.IntptrTy)});
}
}
void instrumentAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
Value *Descr = getLocalVarDescription(I);
if (PoisonStack) {
IRB.CreateCall(MS.MsanPoisonAllocaFn,
{IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
} else {
IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
{IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
}
}
void visitAllocaInst(AllocaInst &I) {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
IRBuilder<> IRB(I.getNextNode());
const DataLayout &DL = F.getParent()->getDataLayout();
uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
if (I.isArrayAllocation())
Len = IRB.CreateMul(Len, I.getArraySize());
if (MS.CompileKernel)
instrumentAllocaKmsan(I, IRB, Len);
else
instrumentAllocaUserspace(I, IRB, Len);
}
void visitSelectInst(SelectInst& I) {
IRBuilder<> IRB(&I);
// a = select b, c, d
Value *B = I.getCondition();
Value *C = I.getTrueValue();
Value *D = I.getFalseValue();
Value *Sb = getShadow(B);
Value *Sc = getShadow(C);
Value *Sd = getShadow(D);
// Result shadow if condition shadow is 0.
Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
Value *Sa1;
if (I.getType()->isAggregateType()) {
// To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
// an extra "select". This results in much more compact IR.
// Sa = select Sb, poisoned, (select b, Sc, Sd)
Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
} else {
// Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
// If Sb (condition is poisoned), look for bits in c and d that are equal
// and both unpoisoned.
// If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
// Cast arguments to shadow-compatible type.
C = CreateAppToShadowCast(IRB, C);
D = CreateAppToShadowCast(IRB, D);
// Result shadow if condition shadow is 1.
Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
}
Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
setShadow(&I, Sa);
if (MS.TrackOrigins) {
// Origins are always i32, so any vector conditions must be flattened.
// FIXME: consider tracking vector origins for app vectors?
if (B->getType()->isVectorTy()) {
Type *FlatTy = getShadowTyNoVec(B->getType());
B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
ConstantInt::getNullValue(FlatTy));
Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
ConstantInt::getNullValue(FlatTy));
}
// a = select b, c, d
// Oa = Sb ? Ob : (b ? Oc : Od)
setOrigin(
&I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
getOrigin(I.getFalseValue()))));
}
}
void visitLandingPadInst(LandingPadInst &I) {
// Do nothing.
// See https://github.com/google/sanitizers/issues/504
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
void visitCatchSwitchInst(CatchSwitchInst &I) {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
void visitFuncletPadInst(FuncletPadInst &I) {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
void visitGetElementPtrInst(GetElementPtrInst &I) {
handleShadowOr(I);
}
void visitExtractValueInst(ExtractValueInst &I) {
IRBuilder<> IRB(&I);
Value *Agg = I.getAggregateOperand();
LLVM_DEBUG(dbgs() << "ExtractValue: " << I << "\n");
Value *AggShadow = getShadow(Agg);
LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
LLVM_DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
setShadow(&I, ResShadow);
setOriginForNaryOp(I);
}
void visitInsertValueInst(InsertValueInst &I) {
IRBuilder<> IRB(&I);
LLVM_DEBUG(dbgs() << "InsertValue: " << I << "\n");
Value *AggShadow = getShadow(I.getAggregateOperand());
Value *InsShadow = getShadow(I.getInsertedValueOperand());
LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
LLVM_DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
LLVM_DEBUG(dbgs() << " Res: " << *Res << "\n");
setShadow(&I, Res);
setOriginForNaryOp(I);
}
void dumpInst(Instruction &I) {
if (CallInst *CI = dyn_cast<CallInst>(&I)) {
errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
} else {
errs() << "ZZZ " << I.getOpcodeName() << "\n";
}
errs() << "QQQ " << I << "\n";
}
void visitResumeInst(ResumeInst &I) {
LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
// Nothing to do here.
}
void visitCleanupReturnInst(CleanupReturnInst &CRI) {
LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
// Nothing to do here.
}
void visitCatchReturnInst(CatchReturnInst &CRI) {
LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
// Nothing to do here.
}
void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
const DataLayout &DL, bool isOutput) {
// For each assembly argument, we check its value for being initialized.
// If the argument is a pointer, we assume it points to a single element
// of the corresponding type (or to a 8-byte word, if the type is unsized).
// Each such pointer is instrumented with a call to the runtime library.
Type *OpType = Operand->getType();
// Check the operand value itself.
insertShadowCheck(Operand, &I);
if (!OpType->isPointerTy() || !isOutput) {
assert(!isOutput);
return;
}
Type *ElType = OpType->getPointerElementType();
if (!ElType->isSized())
return;
int Size = DL.getTypeStoreSize(ElType);
Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
}
/// Get the number of output arguments returned by pointers.
int getNumOutputArgs(InlineAsm *IA, CallInst *CI) {
int NumRetOutputs = 0;
int NumOutputs = 0;
Type *RetTy = dyn_cast<Value>(CI)->getType();
if (!RetTy->isVoidTy()) {
// Register outputs are returned via the CallInst return value.
StructType *ST = dyn_cast_or_null<StructType>(RetTy);
if (ST)
NumRetOutputs = ST->getNumElements();
else
NumRetOutputs = 1;
}
InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
for (size_t i = 0, n = Constraints.size(); i < n; i++) {
InlineAsm::ConstraintInfo Info = Constraints[i];
switch (Info.Type) {
case InlineAsm::isOutput:
NumOutputs++;
break;
default:
break;
}
}
return NumOutputs - NumRetOutputs;
}
void visitAsmInstruction(Instruction &I) {
// Conservative inline assembly handling: check for poisoned shadow of
// asm() arguments, then unpoison the result and all the memory locations
// pointed to by those arguments.
// An inline asm() statement in C++ contains lists of input and output
// arguments used by the assembly code. These are mapped to operands of the
// CallInst as follows:
// - nR register outputs ("=r) are returned by value in a single structure
// (SSA value of the CallInst);
// - nO other outputs ("=m" and others) are returned by pointer as first
// nO operands of the CallInst;
// - nI inputs ("r", "m" and others) are passed to CallInst as the
// remaining nI operands.
// The total number of asm() arguments in the source is nR+nO+nI, and the
// corresponding CallInst has nO+nI+1 operands (the last operand is the
// function to be called).
const DataLayout &DL = F.getParent()->getDataLayout();
CallInst *CI = dyn_cast<CallInst>(&I);
IRBuilder<> IRB(&I);
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
int OutputArgs = getNumOutputArgs(IA, CI);
// The last operand of a CallInst is the function itself.
int NumOperands = CI->getNumOperands() - 1;
// Check input arguments. Doing so before unpoisoning output arguments, so
// that we won't overwrite uninit values before checking them.
for (int i = OutputArgs; i < NumOperands; i++) {
Value *Operand = CI->getOperand(i);
instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
}
// Unpoison output arguments. This must happen before the actual InlineAsm
// call, so that the shadow for memory published in the asm() statement
// remains valid.
for (int i = 0; i < OutputArgs; i++) {
Value *Operand = CI->getOperand(i);
instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
}
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
void visitInstruction(Instruction &I) {
// Everything else: stop propagating and check for poisoned shadow.
if (ClDumpStrictInstructions)
dumpInst(I);
LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
Value *Operand = I.getOperand(i);
if (Operand->getType()->isSized())
insertShadowCheck(Operand, &I);
}
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
};
/// AMD64-specific implementation of VarArgHelper.
struct VarArgAMD64Helper : public VarArgHelper {
// An unfortunate workaround for asymmetric lowering of va_arg stuff.
// See a comment in visitCallSite for more details.
static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
static const unsigned AMD64FpEndOffsetSSE = 176;
// If SSE is disabled, fp_offset in va_list is zero.
static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
unsigned AMD64FpEndOffset;
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
Value *VAArgTLSCopy = nullptr;
Value *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
: F(F), MS(MS), MSV(MSV) {
AMD64FpEndOffset = AMD64FpEndOffsetSSE;
for (const auto &Attr : F.getAttributes().getFnAttributes()) {
if (Attr.isStringAttribute() &&
(Attr.getKindAsString() == "target-features")) {
if (Attr.getValueAsString().contains("-sse"))
AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
break;
}
}
}
ArgKind classifyArgument(Value* arg) {
// A very rough approximation of X86_64 argument classification rules.
Type *T = arg->getType();
if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
return AK_FloatingPoint;
if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
return AK_GeneralPurpose;
if (T->isPointerTy())
return AK_GeneralPurpose;
return AK_Memory;
}
// For VarArg functions, store the argument shadow in an ABI-specific format
// that corresponds to va_list layout.
// We do this because Clang lowers va_arg in the frontend, and this pass
// only sees the low level code that deals with va_list internals.
// A much easier alternative (provided that Clang emits va_arg instructions)
// would have been to associate each live instance of va_list with a copy of
// MSanParamTLS, and extract shadow on va_arg() call in the argument list
// order.
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned ArgNo = CS.getArgumentNo(ArgIt);
bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
// ByVal arguments always go to the overflow area.
// Fixed arguments passed through the overflow area will be stepped
// over by va_start, so don't count them towards the offset.
if (IsFixed)
continue;
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *ShadowBase = getShadowPtrForVAArgument(
RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
Value *OriginBase = nullptr;
if (MS.TrackOrigins)
OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += alignTo(ArgSize, 8);
if (!ShadowBase)
continue;
Value *ShadowPtr, *OriginPtr;
std::tie(ShadowPtr, OriginPtr) =
MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
/*isStore*/ false);
IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
kShadowTLSAlignment, ArgSize);
if (MS.TrackOrigins)
IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
kShadowTLSAlignment, ArgSize);
} else {
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
AK = AK_Memory;
if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
AK = AK_Memory;
Value *ShadowBase, *OriginBase = nullptr;
switch (AK) {
case AK_GeneralPurpose:
ShadowBase =
getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
if (MS.TrackOrigins)
OriginBase =
getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
GpOffset += 8;
break;
case AK_FloatingPoint:
ShadowBase =
getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
if (MS.TrackOrigins)
OriginBase =
getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
FpOffset += 16;
break;
case AK_Memory:
if (IsFixed)
continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
ShadowBase =
getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
if (MS.TrackOrigins)
OriginBase =
getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
OverflowOffset += alignTo(ArgSize, 8);
}
// Take fixed arguments into account for GpOffset and FpOffset,
// but don't actually store shadows for them.
// TODO(glider): don't call get*PtrForVAArgument() for them.
if (IsFixed)
continue;
if (!ShadowBase)
continue;
Value *Shadow = MSV.getShadow(A);
IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
if (MS.TrackOrigins) {
Value *Origin = MSV.getOrigin(A);
unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
std::max(kShadowTLSAlignment, kMinOriginAlignment));
}
}
}
Constant *OverflowSize =
ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
unsigned ArgOffset, unsigned ArgSize) {
// Make sure we don't overflow __msan_va_arg_tls.
if (ArgOffset + ArgSize > kParamTLSSize)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
"_msarg_va_s");
}
/// Compute the origin address for a given va_arg.
Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
// getOriginPtrForVAArgument() is always called after
// getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
// overflow.
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
"_msarg_va_o");
}
void unpoisonVAListTagForInst(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) =
MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
/*isStore*/ true);
// Unpoison the whole __va_list_tag.
// FIXME: magic ABI constants.
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 24, Alignment, false);
// We shouldn't need to zero out the origins, as they're only checked for
// nonzero shadow.
}
void visitVAStartInst(VAStartInst &I) override {
if (F.getCallingConv() == CallingConv::Win64)
return;
VAStartInstrumentationList.push_back(&I);
unpoisonVAListTagForInst(I);
}
void visitVACopyInst(VACopyInst &I) override {
if (F.getCallingConv() == CallingConv::Win64) return;
unpoisonVAListTagForInst(I);
}
void finalizeInstrumentation() override {
assert(!VAArgOverflowSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
VAArgOverflowSize);
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
if (MS.TrackOrigins) {
VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSOriginCopy, 8, MS.VAArgOriginTLS, 8, CopySize);
}
}
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 16)),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 16;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
AMD64FpEndOffset);
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 8)),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
AMD64FpEndOffset);
IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
VAArgOverflowSize);
if (MS.TrackOrigins) {
SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
AMD64FpEndOffset);
IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
VAArgOverflowSize);
}
}
}
};
/// MIPS64-specific implementation of VarArgHelper.
struct VarArgMIPS64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
CS.getFunctionType()->getNumParams(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Triple TargetTriple(F.getParent()->getTargetTriple());
Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
if (TargetTriple.getArch() == Triple::mips64) {
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system
if (ArgSize < 8)
VAArgOffset += (8 - ArgSize);
}
Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
VAArgOffset += ArgSize;
VAArgOffset = alignTo(VAArgOffset, 8);
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
}
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
unsigned ArgOffset, unsigned ArgSize) {
// Make sure we don't overflow __msan_va_arg_tls.
if (ArgOffset + ArgSize > kParamTLSSize)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
"_msarg");
}
void visitVAStartInst(VAStartInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 8, Alignment, false);
}
void visitVACopyInst(VACopyInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 8, Alignment, false);
}
void finalizeInstrumentation() override {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
}
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
CopySize);
}
}
};
/// AArch64-specific implementation of VarArgHelper.
struct VarArgAArch64Helper : public VarArgHelper {
static const unsigned kAArch64GrArgSize = 64;
static const unsigned kAArch64VrArgSize = 128;
static const unsigned AArch64GrBegOffset = 0;
static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
// Make VR space aligned to 16 bytes.
static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
+ kAArch64VrArgSize;
static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
Value *VAArgTLSCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
ArgKind classifyArgument(Value* arg) {
Type *T = arg->getType();
if (T->isFPOrFPVectorTy())
return AK_FloatingPoint;
if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
|| (T->isPointerTy()))
return AK_GeneralPurpose;
return AK_Memory;
}
// The instrumentation stores the argument shadow in a non ABI-specific
// format because it does not know which argument is named (since Clang,
// like x86_64 case, lowers the va_args in the frontend and this pass only
// sees the low level code that deals with va_list internals).
// The first seven GR registers are saved in the first 56 bytes of the
// va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
// the remaining arguments.
// Using constant offset within the va_arg TLS array allows fast copy
// in the finalize instrumentation.
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned GrOffset = AArch64GrBegOffset;
unsigned VrOffset = AArch64VrBegOffset;
unsigned OverflowOffset = AArch64VAEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned ArgNo = CS.getArgumentNo(ArgIt);
bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
AK = AK_Memory;
if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
AK = AK_Memory;
Value *Base;
switch (AK) {
case AK_GeneralPurpose:
Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
GrOffset += 8;
break;
case AK_FloatingPoint:
Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
VrOffset += 16;
break;
case AK_Memory:
// Don't count fixed arguments in the overflow area - va_start will
// skip right over them.
if (IsFixed)
continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
alignTo(ArgSize, 8));
OverflowOffset += alignTo(ArgSize, 8);
break;
}
// Count Gp/Vr fixed arguments to their respective offsets, but don't
// bother to actually store a shadow.
if (IsFixed)
continue;
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
unsigned ArgOffset, unsigned ArgSize) {
// Make sure we don't overflow __msan_va_arg_tls.
if (ArgOffset + ArgSize > kParamTLSSize)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
"_msarg");
}
void visitVAStartInst(VAStartInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 32, Alignment, false);
}
void visitVACopyInst(VACopyInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 32, Alignment, false);
}
// Retrieve a va_list field of 'void*' size.
Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
Value *SaveAreaPtrPtr =
IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt64PtrTy(*MS.C));
return IRB.CreateLoad(SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
Value *SaveAreaPtr =
IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt32PtrTy(*MS.C));
Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
void finalizeInstrumentation() override {
assert(!VAArgOverflowSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
VAArgOverflowSize);
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
}
Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
// Instrument va_start, copy va_list shadow from the backup copy of
// the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
// The variadic ABI for AArch64 creates two areas to save the incoming
// argument registers (one for 64-bit general register xn-x7 and another
// for 128-bit FP/SIMD vn-v7).
// We need then to propagate the shadow arguments on both regions
// 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
// The remaning arguments are saved on shadow for 'va::stack'.
// One caveat is it requires only to propagate the non-named arguments,
// however on the call site instrumentation 'all' the arguments are
// saved. So to copy the shadow values from the va_arg TLS array
// we need to adjust the offset for both GR and VR fields based on
// the __{gr,vr}_offs value (since they are stores based on incoming
// named arguments).
// Read the stack pointer from the va_list.
Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
// Read both the __gr_top and __gr_off and add them up.
Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
// Read both the __vr_top and __vr_off and add them up.
Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
// It does not know how many named arguments is being used and, on the
// callsite all the arguments were saved. Since __gr_off is defined as
// '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
// argument by ignoring the bytes of shadow from named arguments.
Value *GrRegSaveAreaShadowPtrOff =
IRB.CreateAdd(GrArgSize, GrOffSaveArea);
Value *GrRegSaveAreaShadowPtr =
MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
/*Alignment*/ 8, /*isStore*/ true)
.first;
Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
GrRegSaveAreaShadowPtrOff);
Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize);
// Again, but for FP/SIMD values.
Value *VrRegSaveAreaShadowPtrOff =
IRB.CreateAdd(VrArgSize, VrOffSaveArea);
Value *VrRegSaveAreaShadowPtr =
MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
/*Alignment*/ 8, /*isStore*/ true)
.first;
Value *VrSrcPtr = IRB.CreateInBoundsGEP(
IRB.getInt8Ty(),
IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
IRB.getInt32(AArch64VrBegOffset)),
VrRegSaveAreaShadowPtrOff);
Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize);
// And finally for remaining arguments.
Value *StackSaveAreaShadowPtr =
MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
/*Alignment*/ 16, /*isStore*/ true)
.first;
Value *StackSrcPtr =
IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
IRB.getInt32(AArch64VAEndOffset));
IRB.CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16,
VAArgOverflowSize);
}
}
};
/// PowerPC64-specific implementation of VarArgHelper.
struct VarArgPowerPC64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
// they are mostly aligned to 8 bytes, but vectors and i128 arrays
// are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
// and QPX vectors are aligned to 32 bytes. For that reason, we
// compute current offset from stack pointer (which is always properly
// aligned), and offset for the first vararg, then subtract them.
unsigned VAArgBase;
Triple TargetTriple(F.getParent()->getTargetTriple());
// Parameter save area starts at 48 bytes from frame pointer for ABIv1,
// and 32 bytes for ABIv2. This is usually determined by target
// endianness, but in theory could be overriden by function attribute.
// For simplicity, we ignore it here (it'd only matter for QPX vectors).
if (TargetTriple.getArch() == Triple::ppc64)
VAArgBase = 48;
else
VAArgBase = 32;
unsigned VAArgOffset = VAArgBase;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned ArgNo = CS.getArgumentNo(ArgIt);
bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
uint64_t ArgAlign = CS.getParamAlignment(ArgNo);
if (ArgAlign < 8)
ArgAlign = 8;
VAArgOffset = alignTo(VAArgOffset, ArgAlign);
if (!IsFixed) {
Value *Base = getShadowPtrForVAArgument(
RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
if (Base) {
Value *AShadowPtr, *AOriginPtr;
std::tie(AShadowPtr, AOriginPtr) =
MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
kShadowTLSAlignment, /*isStore*/ false);
IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
kShadowTLSAlignment, ArgSize);
}
}
VAArgOffset += alignTo(ArgSize, 8);
} else {
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
uint64_t ArgAlign = 8;
if (A->getType()->isArrayTy()) {
// Arrays are aligned to element size, except for long double
// arrays, which are aligned to 8 bytes.
Type *ElementTy = A->getType()->getArrayElementType();
if (!ElementTy->isPPC_FP128Ty())
ArgAlign = DL.getTypeAllocSize(ElementTy);
} else if (A->getType()->isVectorTy()) {
// Vectors are naturally aligned.
ArgAlign = DL.getTypeAllocSize(A->getType());
}
if (ArgAlign < 8)
ArgAlign = 8;
VAArgOffset = alignTo(VAArgOffset, ArgAlign);
if (DL.isBigEndian()) {
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system
if (ArgSize < 8)
VAArgOffset += (8 - ArgSize);
}
if (!IsFixed) {
Base = getShadowPtrForVAArgument(A->getType(), IRB,
VAArgOffset - VAArgBase, ArgSize);
if (Base)
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
VAArgOffset += ArgSize;
VAArgOffset = alignTo(VAArgOffset, 8);
}
if (IsFixed)
VAArgBase = VAArgOffset;
}
Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
VAArgOffset - VAArgBase);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
}
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
unsigned ArgOffset, unsigned ArgSize) {
// Make sure we don't overflow __msan_va_arg_tls.
if (ArgOffset + ArgSize > kParamTLSSize)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
"_msarg");
}
void visitVAStartInst(VAStartInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 8, Alignment, false);
}
void visitVACopyInst(VACopyInst &I) override {
IRBuilder<> IRB(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr, *OriginPtr;
unsigned Alignment = 8;
std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
// Unpoison the whole __va_list_tag.
// FIXME: magic ABI constants.
IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
/* size */ 8, Alignment, false);
}
void finalizeInstrumentation() override {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
}
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
CopySize);
}
}
};
/// A no-op implementation of VarArgHelper.
struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
void visitVAStartInst(VAStartInst &I) override {}
void visitVACopyInst(VACopyInst &I) override {}
void finalizeInstrumentation() override {}
};
} // end anonymous namespace
static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
MemorySanitizerVisitor &Visitor) {
// VarArg handling is only implemented on AMD64. False positives are possible
// on other platforms.
Triple TargetTriple(Func.getParent()->getTargetTriple());
if (TargetTriple.getArch() == Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);
else if (TargetTriple.isMIPS64())
return new VarArgMIPS64Helper(Func, Msan, Visitor);
else if (TargetTriple.getArch() == Triple::aarch64)
return new VarArgAArch64Helper(Func, Msan, Visitor);
else if (TargetTriple.getArch() == Triple::ppc64 ||
TargetTriple.getArch() == Triple::ppc64le)
return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
+ if (!CompileKernel && (&F == MsanCtorFunction))
+ return false;
MemorySanitizerVisitor Visitor(F, *this, TLI);
// Clear out readonly/readnone attributes.
AttrBuilder B;
B.addAttribute(Attribute::ReadOnly)
.addAttribute(Attribute::ReadNone);
F.removeAttributes(AttributeList::FunctionIndex, B);
return Visitor.runOnFunction();
}
Index: vendor/llvm/dist-release_80/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll (revision 344166)
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple arm64-apple-ios -mattr=+lse %s -o - | FileCheck %s
+
+; Only "even,even+1" pairs are valid for CASP instructions. Make sure LLVM
+; doesn't allocate odd ones and that it can copy them around properly. N.b. we
+; don't actually check that they're sequential because FileCheck can't; odd/even
+; will have to be good enough.
+define void @test_atomic_cmpxchg_i128_register_shuffling(i128* %addr, i128 %desired, i128 %new) nounwind {
+; CHECK-LABEL: test_atomic_cmpxchg_i128_register_shuffling:
+; CHECK-DAG: mov [[DESIRED_LO:x[0-9]*[02468]]], x1
+; CHECK-DAG: mov [[DESIRED_HI:x[0-9]*[13579]]], x2
+; CHECK-DAG: mov [[NEW_LO:x[0-9]*[02468]]], x3
+; CHECK-DAG: mov [[NEW_HI:x[0-9]*[13579]]], x4
+; CHECK: caspal [[DESIRED_LO]], [[DESIRED_HI]], [[NEW_LO]], [[NEW_HI]], [x0]
+
+ %res = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ ret void
+}
Index: vendor/llvm/dist-release_80/test/CodeGen/AArch64/seqpaircopy.mir
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/AArch64/seqpaircopy.mir (nonexistent)
+++ vendor/llvm/dist-release_80/test/CodeGen/AArch64/seqpaircopy.mir (revision 344166)
@@ -0,0 +1,23 @@
+# RUN: llc -o - %s -mtriple=aarch64-- -mattr=+v8.1a -run-pass=postrapseudos | FileCheck %s
+---
+# CHECK-LABEL: name: copy_xseqpairs
+name: copy_xseqpairs
+body: |
+ bb.0:
+ ; CHECK: $x4_x5 = CASPALX $x4_x5, $x2_x3, $x0
+ ; CHECK: $x0 = ORRXrs $xzr, $x4, 0
+ ; CHECK: $x1 = ORRXrs $xzr, $x5, 0
+ $x4_x5 = CASPALX $x4_x5, $x2_x3, $x0
+ $x0_x1 = COPY $x4_x5
+...
+---
+# CHECK-LABEL: name: copy_wseqpairs
+name: copy_wseqpairs
+body: |
+ bb.0:
+ ; CHECK: $w4_w5 = CASPALW $w4_w5, $w2_w3, $x0
+ ; CHECK: $w0 = ORRWrs $wzr, $w4, 0
+ ; CHECK: $w1 = ORRWrs $wzr, $w5, 0
+ $w4_w5 = CASPALW $w4_w5, $w2_w3, $x0
+ $w0_w1 = COPY $w4_w5
+...
Index: vendor/llvm/dist-release_80/test/CodeGen/MIR/X86/memory-operands.mir
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/MIR/X86/memory-operands.mir (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/MIR/X86/memory-operands.mir (revision 344166)
@@ -1,563 +1,563 @@
# RUN: llc -march=x86-64 -run-pass none -o - %s | FileCheck %s
# This test ensures that the MIR parser parses the machine memory operands
# correctly.
--- |
define i32 @test(i32* %a) {
entry:
%b = load i32, i32* %a
store i32 42, i32* %a
ret i32 %b
}
define void @test2(i32* %"a value") {
entry2:
%b = load i32, i32* %"a value"
%c = add i32 %b, 1
store i32 %c, i32* %"a value"
ret void
}
define void @test3(i32*) {
entry3:
%1 = alloca i32
%b = load i32, i32* %0
%c = add i32 %b, 1
store i32 %c, i32* %1
ret void
}
define i32 @volatile_inc(i32* %x) {
entry:
%0 = load volatile i32, i32* %x
%1 = add i32 %0, 1
store volatile i32 %1, i32* %x
ret i32 %1
}
define void @non_temporal_store(i32* %a, i32 %b) {
entry:
store i32 %b, i32* %a, align 16, !nontemporal !0
ret void
}
!0 = !{i32 1}
define i32 @invariant_load(i32* %x) {
entry:
%v = load i32, i32* %x, !invariant.load !1
ret i32 %v
}
!1 = !{}
define void @memory_offset(<8 x float>* %vec) {
entry:
%v = load <8 x float>, <8 x float>* %vec
%v2 = insertelement <8 x float> %v, float 0.0, i32 4
store <8 x float> %v2, <8 x float>* %vec
ret void
}
define void @memory_alignment(<8 x float>* %vec) {
entry:
%v = load <8 x float>, <8 x float>* %vec
%v2 = insertelement <8 x float> %v, float 0.0, i32 4
store <8 x float> %v2, <8 x float>* %vec
ret void
}
define double @constant_pool_psv(double %a) {
entry:
%b = fadd double %a, 3.250000e+00
ret double %b
}
declare x86_fp80 @cosl(x86_fp80) #0
define x86_fp80 @stack_psv(x86_fp80 %x) {
entry:
%y = call x86_fp80 @cosl(x86_fp80 %x) #0
ret x86_fp80 %y
}
attributes #0 = { readonly }
@G = external global i32
define i32 @got_psv() {
entry:
%a = load i32, i32* @G
%b = add i32 %a, 1
ret i32 %b
}
@0 = external global i32
define i32 @global_value() {
entry:
%a = load i32, i32* @G
%b = add i32 %a, 1
%c = load i32, i32* @0
%d = add i32 %b, %c
ret i32 %d
}
define i32 @jumptable_psv(i32 %in) {
entry:
switch i32 %in, label %def [
i32 0, label %lbl1
i32 1, label %lbl2
i32 2, label %lbl3
i32 3, label %lbl4
]
def:
ret i32 0
lbl1:
ret i32 1
lbl2:
ret i32 2
lbl3:
ret i32 4
lbl4:
ret i32 8
}
%struct.XXH_state64_t = type { i32, i32, i64, i64, i64 }
@a = common global i32 0, align 4
define i32 @tbaa_metadata() {
entry:
%0 = load i32, i32* @a, align 4, !tbaa !2
%1 = inttoptr i32 %0 to %struct.XXH_state64_t*
%total_len2 = bitcast %struct.XXH_state64_t* %1 to i32*
%2 = load i32, i32* %total_len2, align 4, !tbaa !6
ret i32 %2
}
!2 = !{!3, !3, i64 0}
!3 = !{!"int", !4, i64 0}
!4 = !{!"omnipotent char", !5, i64 0}
!5 = !{!"Simple C/C++ TBAA"}
!6 = !{!7, !3, i64 0}
!7 = !{!"XXH_state64_t", !3, i64 0, !3, i64 4, !8, i64 8, !8, i64 16, !8, i64 24}
!8 = !{!"long long", !4, i64 0}
define void @aa_scope(float* nocapture %a, float* nocapture readonly %c) #1 {
entry:
%0 = load float, float* %c, align 4, !alias.scope !9
%arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !9
%1 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
}
attributes #1 = { nounwind uwtable }
!9 = distinct !{!9, !10, !"some scope"}
!10 = distinct !{!10, !"some domain"}
define zeroext i1 @range_metadata(i8* %x) {
entry:
%0 = load i8, i8* %x, align 1, !range !11
%tobool = trunc i8 %0 to i1
ret i1 %tobool
}
!11 = !{i8 0, i8 2}
%st = type { i32, i32 }
@values = common global [50 x %st] zeroinitializer, align 16
define void @gep_value(i64 %d) {
entry:
%conv = trunc i64 %d to i32
store i32 %conv, i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0), align 16
ret void
}
define i8* @undef_value() {
entry:
%0 = load i8*, i8** undef, align 8
ret i8* %0
}
define void @dummy0() { ret void }
define void @dummy1() { ret void }
define void @dummy2() { ret void }
define void @dummy3() { ret void }
...
---
name: test
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK: $eax = MOV32rm $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.a)
; CHECK-NEXT: MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 42 :: (store 4 into %ir.a)
$eax = MOV32rm $rdi, 1, _, 0, _ :: (load 4 from %ir.a)
MOV32mi killed $rdi, 1, _, 0, _, 42 :: (store 4 into %ir.a)
RETQ $eax
...
---
name: test2
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry2:
liveins: $rdi
; CHECK: INC32m killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value")
INC32m killed $rdi, 1, _, 0, _, implicit-def dead $eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value")
RETQ
...
---
name: test3
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
frameInfo:
maxAlignment: 4
stack:
- { id: 0, offset: -12, size: 4, alignment: 4 }
body: |
bb.0.entry3:
liveins: $rdi
; Verify that the unnamed local values can be serialized.
; CHECK-LABEL: name: test3
; CHECK: $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.0)
; CHECK: MOV32mr $rsp, 1, $noreg, -4, $noreg, killed $eax :: (store 4 into %ir.1)
$eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 4 from %ir.0)
$eax = INC32r killed $eax, implicit-def dead $eflags
MOV32mr $rsp, 1, _, -4, _, killed $eax :: (store 4 into %ir.1)
RETQ
...
---
name: volatile_inc
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK: name: volatile_inc
; CHECK: $eax = MOV32rm $rdi, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.x)
; CHECK: MOV32mr killed $rdi, 1, $noreg, 0, $noreg, $eax :: (volatile store 4 into %ir.x)
$eax = MOV32rm $rdi, 1, _, 0, _ :: (volatile load 4 from %ir.x)
$eax = INC32r killed $eax, implicit-def dead $eflags
MOV32mr killed $rdi, 1, _, 0, _, $eax :: (volatile store 4 into %ir.x)
RETQ $eax
...
---
name: non_temporal_store
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
- { reg: '$esi' }
body: |
bb.0.entry:
liveins: $esi, $rdi
; CHECK: name: non_temporal_store
; CHECK: MOVNTImr killed $rdi, 1, $noreg, 0, $noreg, killed $esi :: (non-temporal store 4 into %ir.a)
MOVNTImr killed $rdi, 1, _, 0, _, killed $esi :: (non-temporal store 4 into %ir.a)
RETQ
...
---
name: invariant_load
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK: name: invariant_load
; CHECK: $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (invariant load 4 from %ir.x)
$eax = MOV32rm killed $rdi, 1, _, 0, _ :: (invariant load 4 from %ir.x)
RETQ $eax
...
---
name: memory_offset
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK: name: memory_offset
; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec)
; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16)
; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec)
; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16)
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16)
RETQ
...
---
name: memory_alignment
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK: name: memory_alignment
; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec, align 32)
; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16, align 32)
; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec, align 32)
; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
RETQ
...
---
name: constant_pool_psv
tracksRegLiveness: true
liveins:
- { reg: '$xmm0' }
constants:
- id: 0
value: 'double 3.250000e+00'
body: |
bb.0.entry:
liveins: $xmm0
; CHECK: name: constant_pool_psv
; CHECK: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg :: (load 8 from constant-pool)
; CHECK-NEXT: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg :: (load 8 from constant-pool + 8)
$xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ :: (load 8 from constant-pool)
$xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ :: (load 8 from constant-pool + 8)
RETQ $xmm0
...
---
name: stack_psv
tracksRegLiveness: true
frameInfo:
stackSize: 24
maxAlignment: 16
adjustsStack: true
hasCalls: true
maxCallFrameSize: 16
fixedStack:
- { id: 0, offset: 0, size: 10, alignment: 16, isImmutable: true, isAliased: false }
body: |
bb.0.entry:
$rsp = frame-setup SUB64ri8 $rsp, 24, implicit-def dead $eflags
CFI_INSTRUCTION def_cfa_offset 32
LD_F80m $rsp, 1, $noreg, 32, $noreg, implicit-def dead $fpsw
; CHECK: name: stack_psv
- ; CHECK: ST_FP80m $rsp, 1, $noreg, 0, $noreg, implicit-def dead $fpsw :: (store 10 into stack, align 16)
- ST_FP80m $rsp, 1, _, 0, _, implicit-def dead $fpsw :: (store 10 into stack, align 16)
+ ; CHECK: ST_FP80m $rsp, 1, $noreg, 0, $noreg, implicit-def dead $fpsw, implicit $fpcw :: (store 10 into stack, align 16)
+ ST_FP80m $rsp, 1, _, 0, _, implicit-def dead $fpsw, implicit $fpcw :: (store 10 into stack, align 16)
CALL64pcrel32 &cosl, csr_64, implicit $rsp, implicit-def $rsp, implicit-def $fp0
$rsp = ADD64ri8 $rsp, 24, implicit-def dead $eflags
RETQ
...
---
name: got_psv
tracksRegLiveness: true
body: |
bb.0.entry:
; CHECK: name: got_psv
; CHECK: $rax = MOV64rm $rip, 1, $noreg, @G, $noreg :: (load 8 from got)
$rax = MOV64rm $rip, 1, _, @G, _ :: (load 8 from got)
$eax = MOV32rm killed $rax, 1, _, 0, _
$eax = INC32r killed $eax, implicit-def dead $eflags
RETQ $eax
...
---
name: global_value
tracksRegLiveness: true
body: |
bb.0.entry:
$rax = MOV64rm $rip, 1, _, @G, _
; CHECK-LABEL: name: global_value
; CHECK: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from @G)
; CHECK: $ecx = MOV32rm killed $rcx, 1, $noreg, 0, $noreg, implicit-def $rcx :: (load 4 from @0)
$eax = MOV32rm killed $rax, 1, _, 0, _, implicit-def $rax :: (load 4 from @G)
$rcx = MOV64rm $rip, 1, _, @0, _
$ecx = MOV32rm killed $rcx, 1, _, 0, _, implicit-def $rcx :: (load 4 from @0)
$eax = LEA64_32r killed $rax, 1, killed $rcx, 1, _
RETQ $eax
...
---
name: jumptable_psv
tracksRegLiveness: true
liveins:
- { reg: '$edi' }
jumpTable:
kind: label-difference32
entries:
- id: 0
blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ]
body: |
bb.0.entry:
successors: %bb.2.def, %bb.1.entry
liveins: $edi
$eax = MOV32rr $edi, implicit-def $rax
CMP32ri8 killed $edi, 3, implicit-def $eflags
JA_1 %bb.2.def, implicit killed $eflags
bb.1.entry:
successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4
liveins: $rax
$rcx = LEA64r $rip, 1, _, %jump-table.0, _
; CHECK: name: jumptable_psv
; CHECK: $rax = MOVSX64rm32 $rcx, 4, killed $rax, 0, $noreg :: (load 4 from jump-table, align 8)
$rax = MOVSX64rm32 $rcx, 4, killed $rax, 0, _ :: (load 4 from jump-table, align 8)
$rax = ADD64rr killed $rax, killed $rcx, implicit-def dead $eflags
JMP64r killed $rax
bb.2.def:
$eax = MOV32r0 implicit-def dead $eflags
RETQ $eax
bb.3.lbl1:
$eax = MOV32ri 1
RETQ $eax
bb.4.lbl2:
$eax = MOV32ri 2
RETQ $eax
bb.5.lbl3:
$eax = MOV32ri 4
RETQ $eax
bb.6.lbl4:
$eax = MOV32ri 8
RETQ $eax
...
---
name: tbaa_metadata
tracksRegLiveness: true
body: |
bb.0.entry:
$rax = MOV64rm $rip, 1, _, @a, _ :: (load 8 from got)
; CHECK-LABEL: name: tbaa_metadata
; CHECK: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from @a, !tbaa !2)
; CHECK-NEXT: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg :: (load 4 from %ir.total_len2, !tbaa !6)
$eax = MOV32rm killed $rax, 1, _, 0, _, implicit-def $rax :: (load 4 from @a, !tbaa !2)
$eax = MOV32rm killed $rax, 1, _, 0, _ :: (load 4 from %ir.total_len2, !tbaa !6)
RETQ $eax
...
---
name: aa_scope
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
- { reg: '$rsi' }
body: |
bb.0.entry:
liveins: $rdi, $rsi
; CHECK-LABEL: name: aa_scope
; CHECK: $xmm0 = MOVSSrm $rsi, 1, $noreg, 0, $noreg :: (load 4 from %ir.c, !alias.scope !9)
$xmm0 = MOVSSrm $rsi, 1, _, 0, _ :: (load 4 from %ir.c, !alias.scope !9)
; CHECK-NEXT: MOVSSmr $rdi, 1, $noreg, 20, $noreg, killed $xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9)
MOVSSmr $rdi, 1, _, 20, _, killed $xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9)
$xmm0 = MOVSSrm killed $rsi, 1, _, 0, _ :: (load 4 from %ir.c)
MOVSSmr killed $rdi, 1, _, 28, _, killed $xmm0 :: (store 4 into %ir.arrayidx)
RETQ
...
---
name: range_metadata
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
; CHECK-LABEL: name: range_metadata
; CHECK: $al = MOV8rm killed $rdi, 1, $noreg, 0, $noreg :: (load 1 from %ir.x, !range !11)
$al = MOV8rm killed $rdi, 1, _, 0, _ :: (load 1 from %ir.x, !range !11)
RETQ $al
...
---
name: gep_value
tracksRegLiveness: true
liveins:
- { reg: '$rdi' }
body: |
bb.0.entry:
liveins: $rdi
$rax = MOV64rm $rip, 1, _, @values, _ :: (load 8 from got)
; CHECK-LABEL: gep_value
; CHECK: MOV32mr killed $rax, 1, $noreg, 0, $noreg, $edi, implicit killed $rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16)
MOV32mr killed $rax, 1, _, 0, _, $edi, implicit killed $rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16)
RETQ
...
---
name: undef_value
tracksRegLiveness: true
body: |
bb.0.entry:
; CHECK-LABEL: name: undef_value
; CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load 8 from `i8** undef`)
$rax = MOV64rm undef $rax, 1, _, 0, _ :: (load 8 from `i8** undef`)
RETQ $rax
...
---
# Test memory operand without associated value.
# CHECK-LABEL: name: dummy0
# CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load 8)
name: dummy0
tracksRegLiveness: true
body: |
bb.0:
$rax = MOV64rm undef $rax, 1, _, 0, _ :: (load 8)
RETQ $rax
...
---
# Test parsing of stack references in machine memory operands.
# CHECK-LABEL: name: dummy1
# CHECK: $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load 8 from %stack.0)
name: dummy1
tracksRegLiveness: true
stack:
- { id: 0, size: 4, alignment: 4 }
body: |
bb.0:
$rax = MOV64rm $rsp, 1, _, 0, _ :: (load 8 from %stack.0)
RETQ $rax
...
---
# Test parsing of unknown size in machine memory operands without alignment.
# CHECK-LABEL: name: dummy2
# CHECK: $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load unknown-size from %stack.0, align 1)
name: dummy2
tracksRegLiveness: true
stack:
- { id: 0, size: 4, alignment: 4 }
body: |
bb.0:
$rax = MOV64rm $rsp, 1, _, 0, _ :: (load unknown-size from %stack.0)
RETQ $rax
...
---
# Test parsing of unknown size in machine memory operands with alignment.
# CHECK-LABEL: name: dummy3
# CHECK: $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load unknown-size from %stack.0, align 4)
name: dummy3
tracksRegLiveness: true
stack:
- { id: 0, size: 4, alignment: 4 }
body: |
bb.0:
$rax = MOV64rm $rsp, 1, _, 0, _ :: (load unknown-size from %stack.0, align 4)
RETQ $rax
...
Index: vendor/llvm/dist-release_80/test/CodeGen/Mips/micromips-b-range.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/Mips/micromips-b-range.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/Mips/micromips-b-range.ll (revision 344166)
@@ -1,97 +1,97 @@
; RUN: llc -march=mips -relocation-model=pic -mattr=+micromips \
; RUN: -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
; CHECK-LABEL: foo:
; CHECK-NEXT: 0: 41 a2 00 00 lui $2, 0
; CHECK-NEXT: 4: 30 42 00 00 addiu $2, $2, 0
; CHECK-NEXT: 8: 03 22 11 50 addu $2, $2, $25
; CHECK-NEXT: c: fc 42 00 00 lw $2, 0($2)
; CHECK-NEXT: 10: 69 20 lw16 $2, 0($2)
; CHECK-NEXT: 12: 40 c2 00 14 bgtz $2, 44 <foo+0x3e>
; CHECK-NEXT: 16: 00 00 00 00 nop
; CHECK-NEXT: 1a: 33 bd ff f8 addiu $sp, $sp, -8
; CHECK-NEXT: 1e: fb fd 00 00 sw $ra, 0($sp)
; CHECK-NEXT: 22: 41 a1 00 01 lui $1, 1
; CHECK-NEXT: 26: 40 60 00 02 bal 8 <foo+0x2e>
-; CHECK-NEXT: 2a: 30 21 04 68 addiu $1, $1, 1128
+; CHECK-NEXT: 2a: 30 21 04 69 addiu $1, $1, 1129
; CHECK-NEXT: 2e: 00 3f 09 50 addu $1, $ra, $1
; CHECK-NEXT: 32: ff fd 00 00 lw $ra, 0($sp)
; CHECK-NEXT: 36: 00 01 0f 3c jr $1
; CHECK-NEXT: 3a: 33 bd 00 08 addiu $sp, $sp, 8
; CHECK-NEXT: 3e: 94 00 00 02 b 8 <foo+0x46>
; CHECK-NEXT: 42: 00 00 00 00 nop
; CHECK-NEXT: 46: 30 20 4e 1f addiu $1, $zero, 19999
; CHECK-NEXT: 4a: b4 22 00 14 bne $2, $1, 44 <foo+0x76>
; CHECK-NEXT: 4e: 00 00 00 00 nop
; CHECK-NEXT: 52: 33 bd ff f8 addiu $sp, $sp, -8
; CHECK-NEXT: 56: fb fd 00 00 sw $ra, 0($sp)
; CHECK-NEXT: 5a: 41 a1 00 01 lui $1, 1
; CHECK-NEXT: 5e: 40 60 00 02 bal 8 <foo+0x66>
-; CHECK-NEXT: 62: 30 21 04 5c addiu $1, $1, 1116
+; CHECK-NEXT: 62: 30 21 04 5d addiu $1, $1, 1117
; CHECK-NEXT: 66: 00 3f 09 50 addu $1, $ra, $1
; CHECK-NEXT: 6a: ff fd 00 00 lw $ra, 0($sp)
; CHECK-NEXT: 6e: 00 01 0f 3c jr $1
; CHECK-NEXT: 72: 33 bd 00 08 addiu $sp, $sp, 8
; CHECK-NEXT: 76: 30 20 27 0f addiu $1, $zero, 9999
; CHECK-NEXT: 7a: 94 22 00 14 beq $2, $1, 44 <foo+0xa6>
; CHECK-NEXT: 7e: 00 00 00 00 nop
; CHECK-NEXT: 82: 33 bd ff f8 addiu $sp, $sp, -8
; CHECK-NEXT: 86: fb fd 00 00 sw $ra, 0($sp)
; CHECK-NEXT: 8a: 41 a1 00 01 lui $1, 1
; CHECK-NEXT: 8e: 40 60 00 02 bal 8 <foo+0x96>
-; CHECK-NEXT: 92: 30 21 04 2c addiu $1, $1, 1068
+; CHECK-NEXT: 92: 30 21 04 2d addiu $1, $1, 1069
; CHECK-NEXT: 96: 00 3f 09 50 addu $1, $ra, $1
; CHECK-NEXT: 9a: ff fd 00 00 lw $ra, 0($sp)
; CHECK-NEXT: 9e: 00 01 0f 3c jr $1
; CHECK-NEXT: a2: 33 bd 00 08 addiu $sp, $sp, 8
; CHECK: ...
; CHECK-NEXT: 1046a: 94 00 00 02 b 8 <foo+0x10472>
; CHECK-NEXT: 1046e: 00 00 00 00 nop
; CHECK-NEXT: 10472: 33 bd ff f8 addiu $sp, $sp, -8
; CHECK-NEXT: 10476: fb fd 00 00 sw $ra, 0($sp)
; CHECK-NEXT: 1047a: 41 a1 00 01 lui $1, 1
; CHECK-NEXT: 1047e: 40 60 00 02 bal 8 <foo+0x10486>
-; CHECK-NEXT: 10482: 30 21 04 00 addiu $1, $1, 1024
+; CHECK-NEXT: 10482: 30 21 04 01 addiu $1, $1, 1025
; CHECK-NEXT: 10486: 00 3f 09 50 addu $1, $ra, $1
; CHECK-NEXT: 1048a: ff fd 00 00 lw $ra, 0($sp)
; CHECK-NEXT: 1048e: 00 01 0f 3c jr $1
; CHECK-NEXT: 10492: 33 bd 00 08 addiu $sp, $sp, 8
; CHECK-NEXT: 10496: 94 00 00 02 b 8 <foo+0x1049e>
@x = external global i32, align 4
define void @foo() {
%1 = load i32, i32* @x, align 4
%2 = icmp sgt i32 %1, 0
br i1 %2, label %la, label %lf
la:
switch i32 %1, label %le [
i32 9999, label %lb
i32 19999, label %lc
]
lb:
tail call void asm sideeffect ".space 0", ""()
br label %le
lc:
tail call void asm sideeffect ".space 0", ""()
br label %le
le:
tail call void asm sideeffect ".space 66500", ""()
br label %lg
lf:
tail call void asm sideeffect ".space 0", ""()
br label %lg
lg:
tail call void asm sideeffect ".space 0", ""()
br label %li
li:
tail call void asm sideeffect ".space 0", ""()
ret void
}
Index: vendor/llvm/dist-release_80/test/CodeGen/SystemZ/memcmp-01.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/SystemZ/memcmp-01.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/SystemZ/memcmp-01.ll (revision 344166)
@@ -1,221 +1,221 @@
; Test memcmp using CLC, with i32 results.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
declare signext i32 @memcmp(i8 *%src1, i8 *%src2, i64 %size)
; Zero-length comparisons should be optimized away.
define i32 @f1(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f1:
; CHECK: lhi %r2, 0
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 0)
ret i32 %res
}
; Check a case where the result is used as an integer.
define i32 @f2(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f2:
-; CHECK: clc 0(2,%r2), 0(%r3)
-; CHECK: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll %r2, [[REG]], 31
+; CHECK: clc 0(2,%r3), 0(%r2)
+; CHECK: ipm %r2
+; CHECK: sll %r2, 2
+; CHECK: sra %r2, 30
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 2)
ret i32 %res
}
; Check a case where the result is tested for equality.
define void @f3(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f3:
-; CHECK: clc 0(3,%r2), 0(%r3)
+; CHECK: clc 0(3,%r3), 0(%r2)
; CHECK-NEXT: ber %r14
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 3)
%cmp = icmp eq i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Check a case where the result is tested for inequality.
define void @f4(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f4:
-; CHECK: clc 0(4,%r2), 0(%r3)
+; CHECK: clc 0(4,%r3), 0(%r2)
; CHECK-NEXT: blhr %r14
; CHECK: br %r14
entry:
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 4)
%cmp = icmp ne i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Check a case where the result is tested via slt.
define void @f5(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f5:
-; CHECK: clc 0(5,%r2), 0(%r3)
-; CHECK-NEXT: blr %r14
+; CHECK: clc 0(5,%r3), 0(%r2)
+; CHECK-NEXT: bhr %r14
; CHECK: br %r14
entry:
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 5)
%cmp = icmp slt i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Check a case where the result is tested for sgt.
define void @f6(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f6:
-; CHECK: clc 0(6,%r2), 0(%r3)
-; CHECK-NEXT: bhr %r14
+; CHECK: clc 0(6,%r3), 0(%r2)
+; CHECK-NEXT: blr %r14
; CHECK: br %r14
entry:
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 6)
%cmp = icmp sgt i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Check the upper end of the CLC range. Here the result is used both as
; an integer and for branching.
define i32 @f7(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f7:
-; CHECK: clc 0(256,%r2), 0(%r3)
-; CHECK: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll %r2, [[REG]], 31
+; CHECK: clc 0(256,%r3), 0(%r2)
+; CHECK: ipm %r2
+; CHECK: sll %r2, 2
+; CHECK: sra %r2, 30
; CHECK: blr %r14
; CHECK: br %r14
entry:
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 256)
%cmp = icmp slt i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret i32 %res
}
; 257 bytes needs two CLCs.
define i32 @f8(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f8:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(1,%r2), 256(%r3)
+; CHECK: clc 256(1,%r3), 256(%r2)
; CHECK: [[LABEL]]:
; CHECK: ipm [[REG:%r[0-5]]]
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 257)
ret i32 %res
}
; Test a comparison of 258 bytes in which the CC result can be used directly.
define void @f9(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f9:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(1,%r2), 256(%r3)
+; CHECK: clc 256(1,%r3), 256(%r2)
; CHECK: [[LABEL]]:
-; CHECK-NEXT: blr %r14
+; CHECK-NEXT: bhr %r14
; CHECK: br %r14
entry:
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 257)
%cmp = icmp slt i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Test the largest size that can use two CLCs.
define i32 @f10(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f10:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(256,%r2), 256(%r3)
+; CHECK: clc 256(256,%r3), 256(%r2)
; CHECK: [[LABEL]]:
; CHECK: ipm [[REG:%r[0-5]]]
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 512)
ret i32 %res
}
; Test the smallest size that needs 3 CLCs.
define i32 @f11(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f11:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(256,%r2), 256(%r3)
+; CHECK: clc 256(256,%r3), 256(%r2)
; CHECK: jlh [[LABEL]]
-; CHECK: clc 512(1,%r2), 512(%r3)
+; CHECK: clc 512(1,%r3), 512(%r2)
; CHECK: [[LABEL]]:
; CHECK: ipm [[REG:%r[0-5]]]
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 513)
ret i32 %res
}
; Test the largest size than can use 3 CLCs.
define i32 @f12(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f12:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(256,%r2), 256(%r3)
+; CHECK: clc 256(256,%r3), 256(%r2)
; CHECK: jlh [[LABEL]]
-; CHECK: clc 512(256,%r2), 512(%r3)
+; CHECK: clc 512(256,%r3), 512(%r2)
; CHECK: [[LABEL]]:
; CHECK: ipm [[REG:%r[0-5]]]
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 768)
ret i32 %res
}
; The next size up uses a loop instead. We leave the more complicated
; loop tests to memcpy-01.ll, which shares the same form.
define i32 @f13(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f13:
; CHECK: lghi [[COUNT:%r[0-5]]], 3
; CHECK: [[LOOP:.L[^:]*]]:
-; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: clc 0(256,%r3), 0(%r2)
; CHECK: jlh [[LABEL:\..*]]
; CHECK-DAG: la %r2, 256(%r2)
; CHECK-DAG: la %r3, 256(%r3)
; CHECK: brctg [[COUNT]], [[LOOP]]
-; CHECK: clc 0(1,%r2), 0(%r3)
+; CHECK: clc 0(1,%r3), 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: ipm [[REG:%r[0-5]]]
; CHECK: br %r14
%res = call i32 @memcmp(i8 *%src1, i8 *%src2, i64 769)
ret i32 %res
}
Index: vendor/llvm/dist-release_80/test/CodeGen/SystemZ/strcmp-01.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/SystemZ/strcmp-01.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/SystemZ/strcmp-01.ll (revision 344166)
@@ -1,70 +1,70 @@
; Test strcmp using CLST, i32 version.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
declare signext i32 @strcmp(i8 *%src1, i8 *%src2)
; Check a case where the result is used as an integer.
define i32 @f1(i8 *%src1, i8 *%src2) {
; CHECK-LABEL: f1:
; CHECK: lhi %r0, 0
; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
+; CHECK: clst %r3, %r2
; CHECK-NEXT: jo [[LABEL]]
; CHECK-NEXT: %bb.{{[0-9]+}}
-; CHECK-NEXT: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll %r2, [[REG]], 31
+; CHECK-NEXT: ipm %r2
+; CHECK: sll %r2, 2
+; CHECK: sra %r2, 30
; CHECK: br %r14
%res = call i32 @strcmp(i8 *%src1, i8 *%src2)
ret i32 %res
}
; Check a case where the result is tested for equality.
define void @f2(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f2:
; CHECK: lhi %r0, 0
; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
+; CHECK: clst %r3, %r2
; CHECK-NEXT: jo [[LABEL]]
; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: ber %r14
; CHECK: br %r14
%res = call i32 @strcmp(i8 *%src1, i8 *%src2)
%cmp = icmp eq i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret void
}
; Test a case where the result is used both as an integer and for
; branching.
define i32 @f3(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK-LABEL: f3:
; CHECK: lhi %r0, 0
; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
+; CHECK: clst %r3, %r2
; CHECK-NEXT: jo [[LABEL]]
; CHECK-NEXT: %bb.{{[0-9]+}}
-; CHECK-NEXT: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll %r2, [[REG]], 31
+; CHECK-NEXT: ipm %r2
+; CHECK: sll %r2, 2
+; CHECK: sra %r2, 30
; CHECK: blr %r14
; CHECK: br %r14
entry:
%res = call i32 @strcmp(i8 *%src1, i8 *%src2)
%cmp = icmp slt i32 %res, 0
br i1 %cmp, label %exit, label %store
store:
store i32 0, i32 *%dest
br label %exit
exit:
ret i32 %res
}
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/call.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/call.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/call.ll (revision 344166)
@@ -1,230 +1,230 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-temporary-workarounds=false -mattr=+sign-ext,+simd128 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 -wasm-temporary-workarounds=false -mattr=+sign-ext,+simd128 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -mattr=+sign-ext,+simd128 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 -mattr=+sign-ext,+simd128 | FileCheck %s
; Test that basic call operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
declare i32 @i32_nullary()
declare i32 @i32_unary(i32)
declare i32 @i32_binary(i32, i32)
declare i64 @i64_nullary()
declare float @float_nullary()
declare double @double_nullary()
declare <16 x i8> @v128_nullary()
declare void @void_nullary()
; CHECK-LABEL: call_i32_nullary:
; CHECK-NEXT: .functype call_i32_nullary () -> (i32){{$}}
; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_nullary@FUNCTION{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_i32_nullary() {
%r = call i32 @i32_nullary()
ret i32 %r
}
; CHECK-LABEL: call_i64_nullary:
; CHECK-NEXT: .functype call_i64_nullary () -> (i64){{$}}
; CHECK-NEXT: {{^}} i64.call $push[[NUM:[0-9]+]]=, i64_nullary@FUNCTION{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @call_i64_nullary() {
%r = call i64 @i64_nullary()
ret i64 %r
}
; CHECK-LABEL: call_float_nullary:
; CHECK-NEXT: .functype call_float_nullary () -> (f32){{$}}
; CHECK-NEXT: {{^}} f32.call $push[[NUM:[0-9]+]]=, float_nullary@FUNCTION{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define float @call_float_nullary() {
%r = call float @float_nullary()
ret float %r
}
; CHECK-LABEL: call_double_nullary:
; CHECK-NEXT: .functype call_double_nullary () -> (f64){{$}}
; CHECK-NEXT: {{^}} f64.call $push[[NUM:[0-9]+]]=, double_nullary@FUNCTION{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define double @call_double_nullary() {
%r = call double @double_nullary()
ret double %r
}
; CHECK-LABEL: call_v128_nullary:
; CHECK-NEXT: .functype call_v128_nullary () -> (v128){{$}}
; CHECK-NEXT: {{^}} v128.call $push[[NUM:[0-9]+]]=, v128_nullary@FUNCTION{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define <16 x i8> @call_v128_nullary() {
%r = call <16 x i8> @v128_nullary()
ret <16 x i8> %r
}
; CHECK-LABEL: call_void_nullary:
; CHECK-NEXT: .functype call_void_nullary () -> (){{$}}
; CHECK-NEXT: {{^}} call void_nullary@FUNCTION{{$}}
; CHECK-NEXT: return{{$}}
define void @call_void_nullary() {
call void @void_nullary()
ret void
}
; CHECK-LABEL: call_i32_unary:
; CHECK-NEXT: .functype call_i32_unary (i32) -> (i32){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_unary@FUNCTION, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_i32_unary(i32 %a) {
%r = call i32 @i32_unary(i32 %a)
ret i32 %r
}
; CHECK-LABEL: call_i32_binary:
; CHECK-NEXT: .functype call_i32_binary (i32, i32) -> (i32){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_binary@FUNCTION, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_i32_binary(i32 %a, i32 %b) {
%r = call i32 @i32_binary(i32 %a, i32 %b)
ret i32 %r
}
; CHECK-LABEL: call_indirect_void:
; CHECK-NEXT: .functype call_indirect_void (i32) -> (){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} call_indirect $pop[[L0]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_void(void ()* %callee) {
call void %callee()
ret void
}
; CHECK-LABEL: call_indirect_i32:
; CHECK-NEXT: .functype call_indirect_i32 (i32) -> (i32){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_indirect_i32(i32 ()* %callee) {
%t = call i32 %callee()
ret i32 %t
}
; CHECK-LABEL: call_indirect_i64:
; CHECK-NEXT: .functype call_indirect_i64 (i32) -> (i64){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} i64.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @call_indirect_i64(i64 ()* %callee) {
%t = call i64 %callee()
ret i64 %t
}
; CHECK-LABEL: call_indirect_float:
; CHECK-NEXT: .functype call_indirect_float (i32) -> (f32){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} f32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define float @call_indirect_float(float ()* %callee) {
%t = call float %callee()
ret float %t
}
; CHECK-LABEL: call_indirect_double:
; CHECK-NEXT: .functype call_indirect_double (i32) -> (f64){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} f64.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define double @call_indirect_double(double ()* %callee) {
%t = call double %callee()
ret double %t
}
; CHECK-LABEL: call_indirect_v128:
; CHECK-NEXT: .functype call_indirect_v128 (i32) -> (v128){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} v128.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define <16 x i8> @call_indirect_v128(<16 x i8> ()* %callee) {
%t = call <16 x i8> %callee()
ret <16 x i8> %t
}
; CHECK-LABEL: call_indirect_arg:
; CHECK-NEXT: .functype call_indirect_arg (i32, i32) -> (){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 1{{$}}
; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} call_indirect $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
call void %callee(i32 %arg)
ret void
}
; CHECK-LABEL: call_indirect_arg_2:
; CHECK-NEXT: .functype call_indirect_arg_2 (i32, i32, i32) -> (){{$}}
; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 1{{$}}
; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 2{{$}}
; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]], $pop[[L2]]{{$}}
; CHECK-NEXT: drop $pop[[NUM]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_arg_2(i32 (i32, i32)* %callee, i32 %arg, i32 %arg2) {
call i32 %callee(i32 %arg, i32 %arg2)
ret void
}
; CHECK-LABEL: tail_call_void_nullary:
; CHECK-NEXT: .functype tail_call_void_nullary () -> (){{$}}
; CHECK-NEXT: {{^}} call void_nullary@FUNCTION{{$}}
; CHECK-NEXT: return{{$}}
define void @tail_call_void_nullary() {
tail call void @void_nullary()
ret void
}
; CHECK-LABEL: fastcc_tail_call_void_nullary:
; CHECK-NEXT: .functype fastcc_tail_call_void_nullary () -> (){{$}}
; CHECK-NEXT: {{^}} call void_nullary@FUNCTION{{$}}
; CHECK-NEXT: return{{$}}
define void @fastcc_tail_call_void_nullary() {
tail call fastcc void @void_nullary()
ret void
}
; CHECK-LABEL: coldcc_tail_call_void_nullary:
; CHECK-NEXT: .functype coldcc_tail_call_void_nullary () -> (){{$}}
; CHECK-NEXT: {{^}} call void_nullary@FUNCTION{{$}}
; CHECK-NEXT: return{{$}}
define void @coldcc_tail_call_void_nullary() {
tail call coldcc void @void_nullary()
ret void
}
; CHECK-LABEL: call_constexpr:
; CHECK-NEXT: .functype call_constexpr () -> (){{$}}
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 2{{$}}
; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, 3{{$}}
; CHECK-NEXT: call .Lvararg_func_bitcast@FUNCTION, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: call other_void_nullary@FUNCTION{{$}}
; CHECK-NEXT: call void_nullary@FUNCTION{{$}}
; CHECK-NEXT: return{{$}}
declare void @vararg_func(...)
declare void @other_void_nullary()
define void @call_constexpr() {
bb0:
call void bitcast (void (...)* @vararg_func to void (i32, i32)*)(i32 2, i32 3)
br label %bb1
bb1:
call void select (i1 0, void ()* @void_nullary, void ()* @other_void_nullary)()
br label %bb2
bb2:
call void inttoptr (i32 ptrtoint (void ()* @void_nullary to i32) to void ()*)()
ret void
}
; TODO: test the following:
; - More argument combinations.
; - Tail call.
; - Interesting returns (struct, multiple).
; - Vararg.
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll (revision 344166)
@@ -1,31 +1,31 @@
-; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false -wasm-keep-registers | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -wasm-keep-registers | FileCheck %s
; Test that function pointer casts casting away varargs are replaced with
; wrappers.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
define void @callWithArgs() {
entry:
call void bitcast (void (...)* @underspecified to void (i32, i32)*)(i32 0, i32 1)
call void(...) bitcast (void (i32, i32)* @specified to void (...)*)(i32 0, i32 1)
ret void
}
declare void @underspecified(...)
declare void @specified(i32, i32)
; CHECK: callWithArgs:
; CHECK: i32.const $push1=, 0
; CHECK-NEXT: i32.const $push0=, 1
; CHECK-NEXT: call .Lunderspecified_bitcast@FUNCTION, $pop1, $pop0
; CHECK: call .Lspecified_bitcast@FUNCTION, $pop{{[0-9]+$}}
; CHECK: .Lunderspecified_bitcast:
; CHECK-NEXT: .functype .Lunderspecified_bitcast (i32, i32) -> (){{$}}
; CHECK: call underspecified@FUNCTION, $pop{{[0-9]+$}}
; CHECK: .Lspecified_bitcast:
; CHECK-NEXT: .functype .Lspecified_bitcast (i32) -> (){{$}}
; CHECK: call specified@FUNCTION, $pop{{[0-9]+}}, $pop{{[0-9]+$}}
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/function-bitcasts.ll (revision 344166)
@@ -1,203 +1,203 @@
-; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -enable-emscripten-cxx-exceptions -wasm-temporary-workarounds=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -enable-emscripten-cxx-exceptions | FileCheck %s
; Test that function pointer casts are replaced with wrappers.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
declare void @has_i32_arg(i32)
declare void @has_struct_arg({i32})
declare i32 @has_i32_ret()
declare void @vararg(...)
declare void @plain(i32)
declare void @foo0()
declare void @foo1()
declare void @foo2()
declare void @foo3()
; CHECK-LABEL: test:
; CHECK: call .Lhas_i32_arg_bitcast.2@FUNCTION{{$}}
; CHECK-NEXT: call .Lhas_i32_arg_bitcast.2@FUNCTION{{$}}
; CHECK-NEXT: call .Lhas_i32_ret_bitcast@FUNCTION{{$}}
; CHECK-NEXT: i32.call $drop=, has_i32_ret@FUNCTION
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0
; CHECK-NEXT: call .Lfoo0_bitcast@FUNCTION, $pop[[L0]]{{$}}
; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, 0
; CHECK-NEXT: call .Lfoo0_bitcast@FUNCTION, $pop[[L1]]{{$}}
; CHECK-NEXT: i32.const $push[[L2:[0-9]+]]=, 0
; CHECK-NEXT: call .Lfoo0_bitcast@FUNCTION, $pop[[L2]]{{$}}
; CHECK-NEXT: call foo0@FUNCTION
; CHECK-NEXT: i32.call $drop=, .Lfoo1_bitcast@FUNCTION{{$}}
; CHECK-NEXT: call foo2@FUNCTION{{$}}
; CHECK-NEXT: call foo1@FUNCTION{{$}}
; CHECK-NEXT: call foo3@FUNCTION{{$}}
; CHECK-NEXT: end_function
define void @test() {
entry:
call void bitcast (void (i32)* @has_i32_arg to void ()*)()
call void bitcast (void (i32)* @has_i32_arg to void ()*)()
call void bitcast (i32 ()* @has_i32_ret to void ()*)()
call i32 bitcast (i32 ()* @has_i32_ret to i32 ()*)()
call void bitcast (void ()* @foo0 to void (i32)*)(i32 0)
%p = bitcast void ()* @foo0 to void (i32)*
call void %p(i32 0)
%q = bitcast void ()* @foo0 to void (i32)*
call void %q(i32 0)
%r = bitcast void (i32)* %q to void ()*
call void %r()
%t = call i32 bitcast (void ()* @foo1 to i32 ()*)()
call void bitcast (void ()* @foo2 to void ()*)()
call void @foo1()
call void @foo3()
ret void
}
; CHECK-LABEL: test_structs:
; CHECK: call .Lhas_i32_arg_bitcast.1@FUNCTION, $pop{{[0-9]+}}, $pop{{[0-9]+$}}
; CHECK: call .Lhas_i32_arg_bitcast@FUNCTION, $0, $pop2
; CHECK: call .Lhas_struct_arg_bitcast@FUNCTION{{$}}
define void @test_structs() {
entry:
call void bitcast (void (i32)* @has_i32_arg to void (i32, {i32})*)(i32 5, {i32} {i32 6})
call {i32, i64} bitcast (void (i32)* @has_i32_arg to {i32, i64} (i32)*)(i32 7)
call void bitcast (void ({i32})* @has_struct_arg to void ()*)()
ret void
}
; CHECK-LABEL: test_structs_unhandled:
; CHECK: call has_struct_arg@FUNCTION, $pop{{[0-9]+$}}
; CHECK: call has_struct_arg@FUNCTION, $pop{{[0-9]+$}}
; CHECK: call has_i32_ret@FUNCTION, $pop{{[0-9]+$}}
define void @test_structs_unhandled() {
entry:
call void @has_struct_arg({i32} {i32 3})
call void bitcast (void ({i32})* @has_struct_arg to void ({i64})*)({i64} {i64 4})
call {i32, i32} bitcast (i32 ()* @has_i32_ret to {i32, i32} ()*)()
ret void
}
; CHECK-LABEL: test_varargs:
; CHECK: global.set
; CHECK: i32.const $push[[L3:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: call .Lvararg_bitcast@FUNCTION, $pop[[L3]]{{$}}
; CHECK-NEXT: i32.const $push[[L4:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: i32.store 0($[[L5:[0-9]+]]), $pop[[L4]]{{$}}
; CHECK-NEXT: call .Lplain_bitcast@FUNCTION, $[[L5]]{{$}}
define void @test_varargs() {
call void bitcast (void (...)* @vararg to void (i32)*)(i32 0)
call void (...) bitcast (void (i32)* @plain to void (...)*)(i32 0)
ret void
}
; Don't use wrappers when the value is stored in memory
@global_func = hidden local_unnamed_addr global void ()* null
; CHECK-LABEL: test_store:
; CHECK: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
; CHECK-NEXT: i32.store global_func($pop[[L0]]), $pop[[L1]]{{$}}
define void @test_store() {
%1 = bitcast i32 ()* @has_i32_ret to void ()*
store void ()* %1, void ()** @global_func
ret void
}
; CHECK-LABEL: test_load:
; CHECK-NEXT: .functype test_load () -> (i32){{$}}
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: i32.load $push[[L1:[0-9]+]]=, global_func($pop[[L0]]){{$}}
; CHECK-NEXT: i32.call_indirect $push{{[0-9]+}}=, $pop[[L1]]{{$}}
define i32 @test_load() {
%1 = load i32 ()*, i32 ()** bitcast (void ()** @global_func to i32 ()**)
%2 = call i32 %1()
ret i32 %2
}
; Don't use wrappers when the value is passed to a function call
declare void @call_func(i32 ()*)
; CHECK-LABEL: test_argument:
; CHECK: i32.const $push[[L0:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
; CHECK-NEXT: call call_func@FUNCTION, $pop[[L0]]{{$}}
; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, has_i32_arg@FUNCTION{{$}}
; CHECK-NEXT: call call_func@FUNCTION, $pop[[L1]]{{$}}
define void @test_argument() {
call void @call_func(i32 ()* @has_i32_ret)
call void @call_func(i32 ()* bitcast (void (i32)* @has_i32_arg to i32 ()*))
ret void
}
; Invokes should be treated like calls
; CHECK-LABEL: test_invoke:
; CHECK: i32.const $push[[L1:[0-9]+]]=, call_func@FUNCTION{{$}}
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
; CHECK-NEXT: call "__invoke_void_i32()*"@FUNCTION, $pop[[L1]], $pop[[L0]]{{$}}
; CHECK: i32.const $push[[L3:[0-9]+]]=, call_func@FUNCTION{{$}}
; CHECK-NEXT: i32.const $push[[L2:[0-9]+]]=, has_i32_arg@FUNCTION{{$}}
; CHECK-NEXT: call "__invoke_void_i32()*"@FUNCTION, $pop[[L3]], $pop[[L2]]{{$}}
; CHECK: i32.const $push[[L4:[0-9]+]]=, .Lhas_i32_arg_bitcast.2@FUNCTION{{$}}
; CHECK-NEXT: call __invoke_void@FUNCTION, $pop[[L4]]{{$}}
declare i32 @personality(...)
define void @test_invoke() personality i32 (...)* @personality {
entry:
invoke void @call_func(i32 ()* @has_i32_ret)
to label %cont unwind label %lpad
cont:
invoke void @call_func(i32 ()* bitcast (void (i32)* @has_i32_arg to i32 ()*))
to label %cont2 unwind label %lpad
cont2:
invoke void bitcast (void (i32)* @has_i32_arg to void ()*)()
to label %end unwind label %lpad
lpad:
%0 = landingpad { i8*, i32 }
catch i8* null
br label %end
end:
ret void
}
; CHECK-LABEL: .Lhas_i32_arg_bitcast:
; CHECK-NEXT: .functype .Lhas_i32_arg_bitcast (i32, i32) -> ()
; CHECK-NEXT: call has_i32_arg@FUNCTION, $1{{$}}
; CHECK-NEXT: end_function
; CHECK-LABEL: .Lhas_i32_arg_bitcast.1:
; CHECK-NEXT: .functype .Lhas_i32_arg_bitcast.1 (i32, i32) -> ()
; CHECK-NEXT: call has_i32_arg@FUNCTION, $0{{$}}
; CHECK-NEXT: end_function
; CHECK-LABEL: .Lhas_i32_arg_bitcast.2:
; CHECK: call has_i32_arg@FUNCTION, $0{{$}}
; CHECK-NEXT: end_function
; CHECK-LABEL: .Lhas_i32_ret_bitcast:
; CHECK: call $drop=, has_i32_ret@FUNCTION{{$}}
; CHECK-NEXT: end_function
; CHECK-LABEL: .Lvararg_bitcast:
; CHECK: call vararg@FUNCTION, $1{{$}}
; CHECK: end_function
; CHECK-LABEL: .Lplain_bitcast:
; CHECK: call plain@FUNCTION, $1{{$}}
; CHECK: end_function
; CHECK-LABEL: .Lfoo0_bitcast:
; CHECK-NEXT: .functype .Lfoo0_bitcast (i32) -> ()
; CHECK-NEXT: call foo0@FUNCTION{{$}}
; CHECK-NEXT: end_function
; CHECK-LABEL: .Lfoo1_bitcast:
; CHECK-NEXT: .functype .Lfoo1_bitcast () -> (i32)
; CHECK-NEXT: call foo1@FUNCTION{{$}}
; CHECK-NEXT: local.copy $push0=, $0
; CHECK-NEXT: end_function
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/import-module.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/import-module.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/import-module.ll (revision 344166)
@@ -1,19 +1,20 @@
; RUN: llc < %s -asm-verbose=false -wasm-keep-registers | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
define void @test() {
call void @foo()
call void @plain()
ret void
}
declare void @foo() #0
declare void @plain()
-attributes #0 = { "wasm-import-module"="bar" }
+attributes #0 = { "wasm-import-module"="bar" "wasm-import-name"="qux" }
; CHECK-NOT: .import_module plain
; CHECK: .import_module foo, bar
+; CHECK: .import_name foo, qux
; CHECK-NOT: .import_module plain
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-declaration.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-declaration.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-declaration.ll (revision 344166)
@@ -1,20 +1,18 @@
-; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
; Test main functions with alternate signatures.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
-declare void @main()
+declare i32 @main()
-define void @foo() {
- call void @main()
- ret void
+define i32 @foo() {
+ %t = call i32 @main()
+ ret i32 %t
}
-; CHECK-NOT: __original_main
; CHECK-LABEL: foo:
-; CHECK-NEXT: .functype foo () -> ()
-; CHECK-NEXT: call main@FUNCTION
+; CHECK-NEXT: .functype foo () -> (i32)
+; CHECK-NEXT: call __original_main@FUNCTION
; CHECK-NEXT: end_function
-; CHECK-NOT: __original_main
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-no-args.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-no-args.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-no-args.ll (revision 344166)
@@ -1,18 +1,19 @@
-; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
; Test main functions with alternate signatures.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
-define void @main() {
- ret void
+define i32 @main() {
+ ret i32 0
}
-; CHECK-LABEL: .L__original_main:
-; CHECK-NEXT: .functype .L__original_main () -> ()
+; CHECK-LABEL: __original_main:
+; CHECK-NEXT: .functype __original_main () -> (i32)
+; CHECK-NEXT: i32.const 0
; CHECK-NEXT: end_function
; CHECK-LABEL: main:
; CHECK-NEXT: .functype main (i32, i32) -> (i32)
-; CHECK: call .L__original_main@FUNCTION
+; CHECK: call __original_main@FUNCTION
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-three-args.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-three-args.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-three-args.ll (revision 344166)
@@ -0,0 +1,16 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+
+; Test that main function with a non-standard third argument is
+; not wrapped.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+define i32 @main(i32 %a, i8** %b, i8** %c) {
+ ret i32 0
+}
+
+; CHECK-LABEL: main:
+; CHECK-NEXT: .functype main (i32, i32, i32) -> (i32)
+
+; CHECK-NOT: __original_main:
Index: vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-with-args.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-with-args.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/WebAssembly/main-with-args.ll (revision 344166)
@@ -1,15 +1,15 @@
-; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
; Test that main function with expected signature is not wrapped
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
define i32 @main(i32 %a, i8** %b) {
ret i32 0
}
; CHECK-LABEL: main:
; CHECK-NEXT: .functype main (i32, i32) -> (i32)
; CHECK-NOT: __original_main:
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/and-su.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/and-su.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/and-su.ll (revision 344166)
@@ -1,84 +1,84 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- | FileCheck %s
; Don't duplicate the load.
define fastcc i32 @foo(i32* %p) nounwind {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: movl (%ecx), %eax
; CHECK-NEXT: andl $10, %eax
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # %bb.1: # %bb63
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_2: # %bb76
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retl
%t0 = load i32, i32* %p
%t2 = and i32 %t0, 10
%t3 = icmp ne i32 %t2, 0
br i1 %t3, label %bb63, label %bb76
bb63:
ret i32 %t2
bb76:
ret i32 0
}
define fastcc double @bar(i32 %hash, double %x, double %y) nounwind {
; CHECK-LABEL: bar:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: fldl 16(%ebp)
; CHECK-NEXT: fldl 8(%ebp)
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: andl $15, %eax
; CHECK-NEXT: cmpl $8, %eax
; CHECK-NEXT: jb .LBB1_2
; CHECK-NEXT: # %bb.1: # %bb10
; CHECK-NEXT: testb $1, %cl
; CHECK-NEXT: je .LBB1_3
; CHECK-NEXT: .LBB1_2: # %bb11
; CHECK-NEXT: fchs
; CHECK-NEXT: .LBB1_3: # %bb13
; CHECK-NEXT: testb $2, %cl
; CHECK-NEXT: je .LBB1_5
; CHECK-NEXT: # %bb.4: # %bb14
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: fchs
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: .LBB1_5: # %bb16
-; CHECK-NEXT: faddp %st(1)
+; CHECK-NEXT: faddp %st, %st(1)
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
entry:
%0 = and i32 %hash, 15
%1 = icmp ult i32 %0, 8
br i1 %1, label %bb11, label %bb10
bb10:
%2 = and i32 %hash, 1
%3 = icmp eq i32 %2, 0
br i1 %3, label %bb13, label %bb11
bb11:
%4 = fsub double -0.000000e+00, %x
br label %bb13
bb13:
%iftmp.9.0 = phi double [ %4, %bb11 ], [ %x, %bb10 ]
%5 = and i32 %hash, 2
%6 = icmp eq i32 %5, 0
br i1 %6, label %bb16, label %bb14
bb14:
%7 = fsub double -0.000000e+00, %y
br label %bb16
bb16:
%iftmp.10.0 = phi double [ %7, %bb14 ], [ %y, %bb13 ]
%8 = fadd double %iftmp.9.0, %iftmp.10.0
ret double %8
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/avx512-regcall-NoMask.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/avx512-regcall-NoMask.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/avx512-regcall-NoMask.ll (revision 344166)
@@ -1,1319 +1,1319 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; FIXME: Fix machine verifier issues and remove -verify-machineinstrs=0. PR39437.
; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq -verify-machineinstrs=0 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq -verify-machineinstrs=0 | FileCheck %s --check-prefix=WIN64
; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq -verify-machineinstrs=0 | FileCheck %s --check-prefix=LINUXOSX64
; Test regcall when receiving/returning i1
define x86_regcallcc i1 @test_argReti1(i1 %a) {
; X32-LABEL: test_argReti1:
; X32: # %bb.0:
; X32-NEXT: incb %al
; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i1 %a, 1
ret i1 %add
}
; Test regcall when passing/retrieving i1
define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
; X32-LABEL: test_CallargReti1:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incb %al
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: calll _test_argReti1
; X32-NEXT: incb %al
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti1:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incb %al
; WIN64-NEXT: movzbl %al, %eax
; WIN64-NEXT: callq test_argReti1
; WIN64-NEXT: incb %al
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti1:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: movzbl %al, %eax
; LINUXOSX64-NEXT: callq test_argReti1
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = add i1 %a, 1
%c = call x86_regcallcc i1 @test_argReti1(i1 %b)
%d = add i1 %c, 1
ret i1 %d
}
; Test regcall when receiving/returning i8
define x86_regcallcc i8 @test_argReti8(i8 %a) {
; X32-LABEL: test_argReti8:
; X32: # %bb.0:
; X32-NEXT: incb %al
; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i8 %a, 1
ret i8 %add
}
; Test regcall when passing/retrieving i8
define x86_regcallcc i8 @test_CallargReti8(i8 %a) {
; X32-LABEL: test_CallargReti8:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incb %al
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: calll _test_argReti8
; X32-NEXT: incb %al
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti8:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incb %al
; WIN64-NEXT: movzbl %al, %eax
; WIN64-NEXT: callq test_argReti8
; WIN64-NEXT: incb %al
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti8:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: movzbl %al, %eax
; LINUXOSX64-NEXT: callq test_argReti8
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = add i8 %a, 1
%c = call x86_regcallcc i8 @test_argReti8(i8 %b)
%d = add i8 %c, 1
ret i8 %d
}
; Test regcall when receiving/returning i16
define x86_regcallcc i16 @test_argReti16(i16 %a) {
; X32-LABEL: test_argReti16:
; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: retq
%add = add i16 %a, 1
ret i16 %add
}
; Test regcall when passing/retrieving i16
define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; X32-LABEL: test_CallargReti16:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: incl %eax
; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti16:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti16
; WIN64-NEXT: # kill: def $ax killed $ax def $eax
; WIN64-NEXT: incl %eax
; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti16:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti16
; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = add i16 %a, 1
%c = call x86_regcallcc i16 @test_argReti16(i16 %b)
%d = add i16 %c, 1
ret i16 %d
}
; Test regcall when receiving/returning i32
define x86_regcallcc i32 @test_argReti32(i32 %a) {
; X32-LABEL: test_argReti32:
; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti32:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti32:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: retq
%add = add i32 %a, 1
ret i32 %add
}
; Test regcall when passing/retrieving i32
define x86_regcallcc i32 @test_CallargReti32(i32 %a) {
; X32-LABEL: test_CallargReti32:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti32
; X32-NEXT: incl %eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti32:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti32
; WIN64-NEXT: incl %eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti32:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti32
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = add i32 %a, 1
%c = call x86_regcallcc i32 @test_argReti32(i32 %b)
%d = add i32 %c, 1
ret i32 %d
}
; Test regcall when receiving/returning i64
define x86_regcallcc i64 @test_argReti64(i64 %a) {
; X32-LABEL: test_argReti64:
; X32: # %bb.0:
; X32-NEXT: addl $3, %eax
; X32-NEXT: adcl $1, %ecx
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti64:
; WIN64: # %bb.0:
; WIN64-NEXT: movabsq $4294967299, %rcx # imm = 0x100000003
; WIN64-NEXT: addq %rcx, %rax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti64:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: movabsq $4294967299, %rcx # imm = 0x100000003
; LINUXOSX64-NEXT: addq %rcx, %rax
; LINUXOSX64-NEXT: retq
%add = add i64 %a, 4294967299
ret i64 %add
}
; Test regcall when passing/retrieving i64
define x86_regcallcc i64 @test_CallargReti64(i64 %a) {
; X32-LABEL: test_CallargReti64:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: addl $1, %eax
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: calll _test_argReti64
; X32-NEXT: addl $1, %eax
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti64:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incq %rax
; WIN64-NEXT: callq test_argReti64
; WIN64-NEXT: incq %rax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti64:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incq %rax
; LINUXOSX64-NEXT: callq test_argReti64
; LINUXOSX64-NEXT: incq %rax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = add i64 %a, 1
%c = call x86_regcallcc i64 @test_argReti64(i64 %b)
%d = add i64 %c, 1
ret i64 %d
}
; Test regcall when receiving/returning float
define x86_regcallcc float @test_argRetFloat(float %a) {
; X32-LABEL: test_argRetFloat:
; X32: # %bb.0:
; X32-NEXT: vaddss __real@3f800000, %xmm0, %xmm0
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetFloat:
; WIN64: # %bb.0:
; WIN64-NEXT: vaddss __real@{{.*}}(%rip), %xmm0, %xmm0
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetFloat:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; LINUXOSX64-NEXT: retq
%add = fadd float 1.0, %a
ret float %add
}
; Test regcall when passing/retrieving float
define x86_regcallcc float @test_CallargRetFloat(float %a) {
; X32-LABEL: test_CallargRetFloat:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $24, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X32-NEXT: vaddss %xmm4, %xmm0, %xmm0
; X32-NEXT: calll _test_argRetFloat
; X32-NEXT: vaddss %xmm4, %xmm0, %xmm0
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: addl $24, %esp
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetFloat:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $16, %rsp
; WIN64-NEXT: .seh_stackalloc 16
; WIN64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; WIN64-NEXT: .seh_savexmm 8, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
; WIN64-NEXT: vaddss %xmm8, %xmm0, %xmm0
; WIN64-NEXT: callq test_argRetFloat
; WIN64-NEXT: vaddss %xmm8, %xmm0, %xmm0
; WIN64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; WIN64-NEXT: addq $16, %rsp
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetFloat:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $16, %rsp
; LINUXOSX64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
; LINUXOSX64-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
; LINUXOSX64-NEXT: vaddss %xmm8, %xmm0, %xmm0
; LINUXOSX64-NEXT: callq test_argRetFloat
; LINUXOSX64-NEXT: vaddss %xmm8, %xmm0, %xmm0
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: addq $16, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = fadd float 1.0, %a
%c = call x86_regcallcc float @test_argRetFloat(float %b)
%d = fadd float 1.0, %c
ret float %d
}
; Test regcall when receiving/returning double
define x86_regcallcc double @test_argRetDouble(double %a) {
; X32-LABEL: test_argRetDouble:
; X32: # %bb.0:
; X32-NEXT: vaddsd __real@3ff0000000000000, %xmm0, %xmm0
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetDouble:
; WIN64: # %bb.0:
; WIN64-NEXT: vaddsd __real@{{.*}}(%rip), %xmm0, %xmm0
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetDouble:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; LINUXOSX64-NEXT: retq
%add = fadd double %a, 1.0
ret double %add
}
; Test regcall when passing/retrieving double
define x86_regcallcc double @test_CallargRetDouble(double %a) {
; X32-LABEL: test_CallargRetDouble:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $24, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
; X32-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
; X32-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; X32-NEXT: calll _test_argRetDouble
; X32-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: addl $24, %esp
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetDouble:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $16, %rsp
; WIN64-NEXT: .seh_stackalloc 16
; WIN64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; WIN64-NEXT: .seh_savexmm 8, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
; WIN64-NEXT: vaddsd %xmm8, %xmm0, %xmm0
; WIN64-NEXT: callq test_argRetDouble
; WIN64-NEXT: vaddsd %xmm8, %xmm0, %xmm0
; WIN64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; WIN64-NEXT: addq $16, %rsp
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetDouble:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $16, %rsp
; LINUXOSX64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
; LINUXOSX64-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
; LINUXOSX64-NEXT: vaddsd %xmm8, %xmm0, %xmm0
; LINUXOSX64-NEXT: callq test_argRetDouble
; LINUXOSX64-NEXT: vaddsd %xmm8, %xmm0, %xmm0
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: addq $16, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = fadd double 1.0, %a
%c = call x86_regcallcc double @test_argRetDouble(double %b)
%d = fadd double 1.0, %c
ret double %d
}
; Test regcall when receiving/returning long double
define x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %a0) nounwind {
; X32-LABEL: test_argRetf80:
; X32: # %bb.0:
-; X32-NEXT: fadd %st(0), %st(0)
+; X32-NEXT: fadd %st, %st(0)
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetf80:
; WIN64: # %bb.0:
-; WIN64-NEXT: fadd %st(0), %st(0)
+; WIN64-NEXT: fadd %st, %st(0)
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetf80:
; LINUXOSX64: # %bb.0:
-; LINUXOSX64-NEXT: fadd %st(0), %st(0)
+; LINUXOSX64-NEXT: fadd %st, %st(0)
; LINUXOSX64-NEXT: retq
%r0 = fadd x86_fp80 %a0, %a0
ret x86_fp80 %r0
}
; Test regcall when passing/retrieving long double
define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) {
; X32-LABEL: test_CallargRetf80:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
-; X32-NEXT: fadd %st(0), %st(0)
+; X32-NEXT: fadd %st, %st(0)
; X32-NEXT: calll _test_argRetf80
-; X32-NEXT: fadd %st(0), %st(0)
+; X32-NEXT: fadd %st, %st(0)
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetf80:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
-; WIN64-NEXT: fadd %st(0), %st(0)
+; WIN64-NEXT: fadd %st, %st(0)
; WIN64-NEXT: callq test_argRetf80
-; WIN64-NEXT: fadd %st(0), %st(0)
+; WIN64-NEXT: fadd %st, %st(0)
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetf80:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
-; LINUXOSX64-NEXT: fadd %st(0), %st(0)
+; LINUXOSX64-NEXT: fadd %st, %st(0)
; LINUXOSX64-NEXT: callq test_argRetf80
-; LINUXOSX64-NEXT: fadd %st(0), %st(0)
+; LINUXOSX64-NEXT: fadd %st, %st(0)
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = fadd x86_fp80 %a, %a
%c = call x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %b)
%d = fadd x86_fp80 %c, %c
ret x86_fp80 %d
}
; Test regcall when receiving/returning pointer
define x86_regcallcc [4 x i32]* @test_argRetPointer([4 x i32]* %a) {
; X32-LABEL: test_argRetPointer:
; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetPointer:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetPointer:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: retq
%b = ptrtoint [4 x i32]* %a to i32
%c = add i32 %b, 1
%d = inttoptr i32 %c to [4 x i32]*
ret [4 x i32]* %d
}
; Test regcall when passing/retrieving pointer
define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
; X32-LABEL: test_CallargRetPointer:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argRetPointer
; X32-NEXT: incl %eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetPointer:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argRetPointer
; WIN64-NEXT: incl %eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetPointer:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argRetPointer
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = ptrtoint [4 x i32]* %a to i32
%c = add i32 %b, 1
%d = inttoptr i32 %c to [4 x i32]*
%e = call x86_regcallcc [4 x i32]* @test_argRetPointer([4 x i32]* %d)
%f = ptrtoint [4 x i32]* %e to i32
%g = add i32 %f, 1
%h = inttoptr i32 %g to [4 x i32]*
ret [4 x i32]* %h
}
; Test regcall when receiving/returning 128 bit vector
define x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i1> %x, <4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: test_argRet128Vector:
; X32: # %bb.0:
; X32-NEXT: vpslld $31, %xmm0, %xmm0
; X32-NEXT: vpmovd2m %xmm0, %k1
; X32-NEXT: vpblendmd %xmm1, %xmm2, %xmm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet128Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: vpslld $31, %xmm0, %xmm0
; WIN64-NEXT: vpmovd2m %xmm0, %k1
; WIN64-NEXT: vpblendmd %xmm1, %xmm2, %xmm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet128Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vpslld $31, %xmm0, %xmm0
; LINUXOSX64-NEXT: vpmovd2m %xmm0, %k1
; LINUXOSX64-NEXT: vpblendmd %xmm1, %xmm2, %xmm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <4 x i1> %x, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %d
}
; Test regcall when passing/retrieving 128 bit vector
define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i1> %x, <4 x i32> %a) {
; X32-LABEL: test_CallargRet128Vector:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $40, %esp
; X32-NEXT: vmovups %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32-NEXT: vmovdqa %xmm1, %xmm4
; X32-NEXT: vpslld $31, %xmm0, %xmm1
; X32-NEXT: vpmovd2m %xmm1, %k1
; X32-NEXT: kmovw %k1, {{[-0-9]+}}(%e{{[sb]}}p) # 2-byte Spill
; X32-NEXT: vmovdqa %xmm4, %xmm1
; X32-NEXT: vmovdqa %xmm4, %xmm2
; X32-NEXT: calll _test_argRet128Vector
; X32-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k1 # 2-byte Reload
; X32-NEXT: vmovdqa32 %xmm4, %xmm0 {%k1}
; X32-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm4 # 16-byte Reload
; X32-NEXT: addl $40, %esp
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet128Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $32, %rsp
; WIN64-NEXT: .seh_stackalloc 32
; WIN64-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; WIN64-NEXT: .seh_savexmm 8, 16
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: vmovdqa %xmm1, %xmm8
; WIN64-NEXT: vpslld $31, %xmm0, %xmm1
; WIN64-NEXT: vpmovd2m %xmm1, %k1
; WIN64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; WIN64-NEXT: vmovdqa %xmm8, %xmm1
; WIN64-NEXT: vmovdqa %xmm8, %xmm2
; WIN64-NEXT: callq test_argRet128Vector
; WIN64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; WIN64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1}
; WIN64-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; WIN64-NEXT: addq $32, %rsp
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet128Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $32, %rsp
; LINUXOSX64-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 48
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
; LINUXOSX64-NEXT: vmovdqa %xmm1, %xmm8
; LINUXOSX64-NEXT: vpslld $31, %xmm0, %xmm1
; LINUXOSX64-NEXT: vpmovd2m %xmm1, %k1
; LINUXOSX64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; LINUXOSX64-NEXT: vmovdqa %xmm8, %xmm1
; LINUXOSX64-NEXT: vmovdqa %xmm8, %xmm2
; LINUXOSX64-NEXT: callq test_argRet128Vector
; LINUXOSX64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; LINUXOSX64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1}
; LINUXOSX64-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: addq $32, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = call x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i1> %x, <4 x i32> %a, <4 x i32> %a)
%c = select <4 x i1> %x, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %c
}
; Test regcall when receiving/returning 256 bit vector
define x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i1> %x, <8 x i32> %a, <8 x i32> %b) {
; X32-LABEL: test_argRet256Vector:
; X32: # %bb.0:
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet256Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: kmovd %eax, %k1
; WIN64-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet256Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: kmovd %eax, %k1
; LINUXOSX64-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <8 x i1> %x, <8 x i32> %a, <8 x i32> %b
ret <8 x i32> %d
}
; Test regcall when passing/retrieving 256 bit vector
define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i1> %x, <8 x i32> %a) {
; X32-LABEL: test_CallargRet256Vector:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $88, %esp
; X32-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: kmovw %k1, {{[-0-9]+}}(%e{{[sb]}}p) # 2-byte Spill
; X32-NEXT: vmovdqa %ymm0, %ymm1
; X32-NEXT: calll _test_argRet256Vector
; X32-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k1 # 2-byte Reload
; X32-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %ymm1 # 32-byte Reload
; X32-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1}
; X32-NEXT: addl $88, %esp
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet256Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $80, %rsp
; WIN64-NEXT: .seh_stackalloc 80
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; WIN64-NEXT: kmovd %eax, %k1
; WIN64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; WIN64-NEXT: vmovdqa %ymm0, %ymm1
; WIN64-NEXT: callq test_argRet256Vector
; WIN64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; WIN64-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; WIN64-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1}
; WIN64-NEXT: addq $80, %rsp
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet256Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $80, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 96
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; LINUXOSX64-NEXT: kmovd %eax, %k1
; LINUXOSX64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; LINUXOSX64-NEXT: vmovdqa %ymm0, %ymm1
; LINUXOSX64-NEXT: callq test_argRet256Vector
; LINUXOSX64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; LINUXOSX64-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; LINUXOSX64-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1}
; LINUXOSX64-NEXT: addq $80, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = call x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i1> %x, <8 x i32> %a, <8 x i32> %a)
%c = select <8 x i1> %x, <8 x i32> %a, <8 x i32> %b
ret <8 x i32> %c
}
; Test regcall when receiving/returning 512 bit vector
define x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i1> %x, <16 x i32> %a, <16 x i32> %b) {
; X32-LABEL: test_argRet512Vector:
; X32: # %bb.0:
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet512Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: kmovd %eax, %k1
; WIN64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet512Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: kmovd %eax, %k1
; LINUXOSX64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <16 x i1> %x, <16 x i32> %a, <16 x i32> %b
ret <16 x i32> %d
}
; Test regcall when passing/retrieving 512 bit vector
define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i1> %x, <16 x i32> %a) {
; X32-LABEL: test_CallargRet512Vector:
; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $184, %esp
; X32-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: kmovw %k1, {{[-0-9]+}}(%e{{[sb]}}p) # 2-byte Spill
; X32-NEXT: vmovdqa64 %zmm0, %zmm1
; X32-NEXT: calll _test_argRet512Vector
; X32-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k1 # 2-byte Reload
; X32-NEXT: vmovdqu64 {{[-0-9]+}}(%e{{[sb]}}p), %zmm1 # 64-byte Reload
; X32-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; X32-NEXT: addl $184, %esp
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet512Vector:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $176, %rsp
; WIN64-NEXT: .seh_stackalloc 176
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; WIN64-NEXT: kmovd %eax, %k1
; WIN64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; WIN64-NEXT: vmovdqa64 %zmm0, %zmm1
; WIN64-NEXT: callq test_argRet512Vector
; WIN64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; WIN64-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; WIN64-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; WIN64-NEXT: addq $176, %rsp
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; WIN64-NEXT: .text
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet512Vector:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $176, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 192
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; LINUXOSX64-NEXT: kmovd %eax, %k1
; LINUXOSX64-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; LINUXOSX64-NEXT: vmovdqa64 %zmm0, %zmm1
; LINUXOSX64-NEXT: callq test_argRet512Vector
; LINUXOSX64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
; LINUXOSX64-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; LINUXOSX64-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; LINUXOSX64-NEXT: addq $176, %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8
; LINUXOSX64-NEXT: retq
%b = call x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i1> %x, <16 x i32> %a, <16 x i32> %a)
%c = select <16 x i1> %x, <16 x i32> %a, <16 x i32> %b
ret <16 x i32> %c
}
; Test regcall when running multiple input parameters - callee saved xmms
define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b, <32 x float> %c) nounwind {
; X32-LABEL: testf32_inp:
; X32: # %bb.0:
; X32-NEXT: subl $44, %esp
; X32-NEXT: vmovups %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32-NEXT: vmovups %xmm6, (%esp) # 16-byte Spill
; X32-NEXT: vaddps %zmm2, %zmm0, %zmm6
; X32-NEXT: vaddps %zmm3, %zmm1, %zmm7
; X32-NEXT: vmulps %zmm2, %zmm0, %zmm0
; X32-NEXT: vsubps %zmm0, %zmm6, %zmm0
; X32-NEXT: vmulps %zmm3, %zmm1, %zmm1
; X32-NEXT: vsubps %zmm1, %zmm7, %zmm1
; X32-NEXT: vaddps %zmm4, %zmm0, %zmm0
; X32-NEXT: vaddps %zmm5, %zmm1, %zmm1
; X32-NEXT: vmovups (%esp), %xmm6 # 16-byte Reload
; X32-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm7 # 16-byte Reload
; X32-NEXT: addl $44, %esp
; X32-NEXT: retl
;
; WIN64-LABEL: testf32_inp:
; WIN64: # %bb.0:
; WIN64-NEXT: vaddps %zmm2, %zmm0, %zmm6
; WIN64-NEXT: vaddps %zmm3, %zmm1, %zmm7
; WIN64-NEXT: vmulps %zmm2, %zmm0, %zmm0
; WIN64-NEXT: vsubps %zmm0, %zmm6, %zmm0
; WIN64-NEXT: vmulps %zmm3, %zmm1, %zmm1
; WIN64-NEXT: vsubps %zmm1, %zmm7, %zmm1
; WIN64-NEXT: vaddps %zmm4, %zmm0, %zmm0
; WIN64-NEXT: vaddps %zmm5, %zmm1, %zmm1
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testf32_inp:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddps %zmm2, %zmm0, %zmm6
; LINUXOSX64-NEXT: vaddps %zmm3, %zmm1, %zmm7
; LINUXOSX64-NEXT: vmulps %zmm2, %zmm0, %zmm0
; LINUXOSX64-NEXT: vsubps %zmm0, %zmm6, %zmm0
; LINUXOSX64-NEXT: vmulps %zmm3, %zmm1, %zmm1
; LINUXOSX64-NEXT: vsubps %zmm1, %zmm7, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm4, %zmm0, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm5, %zmm1, %zmm1
; LINUXOSX64-NEXT: retq
%x1 = fadd <32 x float> %a, %b
%x2 = fmul <32 x float> %a, %b
%x3 = fsub <32 x float> %x1, %x2
%x4 = fadd <32 x float> %x3, %c
ret <32 x float> %x4
}
; Test regcall when running multiple input parameters - callee saved GPRs
define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %b1, i32 %b2, i32 %b3, i32 %b4, i32 %b5, i32 %b6) nounwind {
; X32-LABEL: testi32_inp:
; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: subl $20, %esp
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: subl %ecx, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, %ebp
; X32-NEXT: subl {{[0-9]+}}(%esp), %ebp
; X32-NEXT: imull %ebp, %ebx
; X32-NEXT: movl %edx, %ebp
; X32-NEXT: subl %edi, %ebp
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: imull %ebp, %ecx
; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: movl %edi, %ebp
; X32-NEXT: subl {{[0-9]+}}(%esp), %ebp
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull %ebp, %eax
; X32-NEXT: addl %eax, %ebx
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X32-NEXT: movl (%esp), %ebp # 4-byte Reload
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
; X32-NEXT: imull %eax, %esi
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: imull %ebp, %edx
; X32-NEXT: addl %esi, %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: imull %edi, %ecx
; X32-NEXT: addl %edx, %ecx
; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: movl %ebx, %eax
; X32-NEXT: addl $20, %esp
; X32-NEXT: popl %ebx
; X32-NEXT: popl %ebp
; X32-NEXT: retl
;
; WIN64-LABEL: testi32_inp:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %r13
; WIN64-NEXT: pushq %rbp
; WIN64-NEXT: pushq %rbx
; WIN64-NEXT: movl %eax, %r13d
; WIN64-NEXT: subl %ecx, %eax
; WIN64-NEXT: movl %edx, %ebp
; WIN64-NEXT: subl %edi, %ebp
; WIN64-NEXT: movl %r9d, %ebx
; WIN64-NEXT: subl %r10d, %ebx
; WIN64-NEXT: imull %ebx, %eax
; WIN64-NEXT: movl %r11d, %ebx
; WIN64-NEXT: subl %r12d, %ebx
; WIN64-NEXT: imull %ebp, %ebx
; WIN64-NEXT: movl %esi, %ebp
; WIN64-NEXT: subl %r8d, %ebp
; WIN64-NEXT: addl %ebx, %eax
; WIN64-NEXT: movl %r14d, %ebx
; WIN64-NEXT: subl %r15d, %ebx
; WIN64-NEXT: imull %ebp, %ebx
; WIN64-NEXT: addl %ebx, %eax
; WIN64-NEXT: addl %ecx, %r13d
; WIN64-NEXT: addl %edi, %edx
; WIN64-NEXT: addl %r8d, %esi
; WIN64-NEXT: addl %r10d, %r9d
; WIN64-NEXT: imull %r13d, %r9d
; WIN64-NEXT: addl %r12d, %r11d
; WIN64-NEXT: imull %edx, %r11d
; WIN64-NEXT: addl %r9d, %r11d
; WIN64-NEXT: addl %r15d, %r14d
; WIN64-NEXT: imull %esi, %r14d
; WIN64-NEXT: addl %r11d, %r14d
; WIN64-NEXT: addl %r14d, %eax
; WIN64-NEXT: popq %rbx
; WIN64-NEXT: popq %rbp
; WIN64-NEXT: popq %r13
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testi32_inp:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rbp
; LINUXOSX64-NEXT: pushq %rbx
; LINUXOSX64-NEXT: movl %eax, %r10d
; LINUXOSX64-NEXT: movl {{[0-9]+}}(%rsp), %r11d
; LINUXOSX64-NEXT: subl %ecx, %eax
; LINUXOSX64-NEXT: movl %edx, %ebx
; LINUXOSX64-NEXT: subl %edi, %ebx
; LINUXOSX64-NEXT: movl %r9d, %ebp
; LINUXOSX64-NEXT: subl %r12d, %ebp
; LINUXOSX64-NEXT: imull %ebp, %eax
; LINUXOSX64-NEXT: movl %r13d, %ebp
; LINUXOSX64-NEXT: subl %r14d, %ebp
; LINUXOSX64-NEXT: imull %ebx, %ebp
; LINUXOSX64-NEXT: movl %esi, %ebx
; LINUXOSX64-NEXT: subl %r8d, %ebx
; LINUXOSX64-NEXT: addl %ebp, %eax
; LINUXOSX64-NEXT: movl %r15d, %ebp
; LINUXOSX64-NEXT: subl %r11d, %ebp
; LINUXOSX64-NEXT: imull %ebx, %ebp
; LINUXOSX64-NEXT: addl %ebp, %eax
; LINUXOSX64-NEXT: addl %ecx, %r10d
; LINUXOSX64-NEXT: addl %edi, %edx
; LINUXOSX64-NEXT: addl %r8d, %esi
; LINUXOSX64-NEXT: addl %r12d, %r9d
; LINUXOSX64-NEXT: imull %r10d, %r9d
; LINUXOSX64-NEXT: addl %r14d, %r13d
; LINUXOSX64-NEXT: imull %edx, %r13d
; LINUXOSX64-NEXT: addl %r9d, %r13d
; LINUXOSX64-NEXT: addl %r11d, %r15d
; LINUXOSX64-NEXT: imull %esi, %r15d
; LINUXOSX64-NEXT: addl %r13d, %r15d
; LINUXOSX64-NEXT: addl %r15d, %eax
; LINUXOSX64-NEXT: popq %rbx
; LINUXOSX64-NEXT: popq %rbp
; LINUXOSX64-NEXT: retq
%x1 = sub i32 %a1, %a2
%x2 = sub i32 %a3, %a4
%x3 = sub i32 %a5, %a6
%y1 = sub i32 %b1, %b2
%y2 = sub i32 %b3, %b4
%y3 = sub i32 %b5, %b6
%v1 = add i32 %a1, %a2
%v2 = add i32 %a3, %a4
%v3 = add i32 %a5, %a6
%w1 = add i32 %b1, %b2
%w2 = add i32 %b3, %b4
%w3 = add i32 %b5, %b6
%s1 = mul i32 %x1, %y1
%s2 = mul i32 %x2, %y2
%s3 = mul i32 %x3, %y3
%t1 = mul i32 %v1, %w1
%t2 = mul i32 %v2, %w2
%t3 = mul i32 %v3, %w3
%m1 = add i32 %s1, %s2
%m2 = add i32 %m1, %s3
%n1 = add i32 %t1, %t2
%n2 = add i32 %n1, %t3
%r1 = add i32 %m2, %n2
ret i32 %r1
}
; Test that parameters, overflowing register capacity, are passed through the stack
define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float> %b0, <32 x float> %c0, <32 x float> %a1, <32 x float> %b1, <32 x float> %c1, <32 x float> %a2, <32 x float> %b2, <32 x float> %c2) nounwind {
; X32-LABEL: testf32_stack:
; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-64, %esp
; X32-NEXT: subl $64, %esp
; X32-NEXT: vaddps %zmm3, %zmm1, %zmm1
; X32-NEXT: vaddps %zmm2, %zmm0, %zmm0
; X32-NEXT: vaddps %zmm0, %zmm4, %zmm0
; X32-NEXT: vaddps %zmm1, %zmm5, %zmm1
; X32-NEXT: vaddps %zmm1, %zmm7, %zmm1
; X32-NEXT: vaddps %zmm0, %zmm6, %zmm0
; X32-NEXT: vaddps 8(%ebp), %zmm0, %zmm0
; X32-NEXT: vaddps 72(%ebp), %zmm1, %zmm1
; X32-NEXT: vaddps 200(%ebp), %zmm1, %zmm1
; X32-NEXT: vaddps 136(%ebp), %zmm0, %zmm0
; X32-NEXT: vaddps 264(%ebp), %zmm0, %zmm0
; X32-NEXT: vaddps 328(%ebp), %zmm1, %zmm1
; X32-NEXT: vaddps 456(%ebp), %zmm1, %zmm1
; X32-NEXT: vaddps 392(%ebp), %zmm0, %zmm0
; X32-NEXT: vaddps 520(%ebp), %zmm0, %zmm0
; X32-NEXT: vaddps 584(%ebp), %zmm1, %zmm1
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
;
; WIN64-LABEL: testf32_stack:
; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rbp
; WIN64-NEXT: subq $48, %rsp
; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
; WIN64-NEXT: andq $-64, %rsp
; WIN64-NEXT: vaddps %zmm3, %zmm1, %zmm1
; WIN64-NEXT: vaddps %zmm2, %zmm0, %zmm0
; WIN64-NEXT: vaddps %zmm0, %zmm4, %zmm0
; WIN64-NEXT: vaddps %zmm1, %zmm5, %zmm1
; WIN64-NEXT: vaddps %zmm1, %zmm7, %zmm1
; WIN64-NEXT: vaddps %zmm0, %zmm6, %zmm0
; WIN64-NEXT: vaddps %zmm0, %zmm8, %zmm0
; WIN64-NEXT: vaddps %zmm1, %zmm9, %zmm1
; WIN64-NEXT: vaddps %zmm1, %zmm11, %zmm1
; WIN64-NEXT: vaddps %zmm0, %zmm10, %zmm0
; WIN64-NEXT: vaddps %zmm0, %zmm12, %zmm0
; WIN64-NEXT: vaddps %zmm1, %zmm13, %zmm1
; WIN64-NEXT: vaddps %zmm1, %zmm15, %zmm1
; WIN64-NEXT: vaddps %zmm0, %zmm14, %zmm0
; WIN64-NEXT: vaddps 16(%rbp), %zmm0, %zmm0
; WIN64-NEXT: vaddps 80(%rbp), %zmm1, %zmm1
; WIN64-NEXT: movq %rbp, %rsp
; WIN64-NEXT: popq %rbp
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testf32_stack:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rbp
; LINUXOSX64-NEXT: movq %rsp, %rbp
; LINUXOSX64-NEXT: andq $-64, %rsp
; LINUXOSX64-NEXT: subq $64, %rsp
; LINUXOSX64-NEXT: vaddps %zmm3, %zmm1, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm2, %zmm0, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm4, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm5, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm7, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm6, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm8, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm9, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm11, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm10, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm12, %zmm0
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm13, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm1, %zmm15, %zmm1
; LINUXOSX64-NEXT: vaddps %zmm0, %zmm14, %zmm0
; LINUXOSX64-NEXT: vaddps 16(%rbp), %zmm0, %zmm0
; LINUXOSX64-NEXT: vaddps 80(%rbp), %zmm1, %zmm1
; LINUXOSX64-NEXT: movq %rbp, %rsp
; LINUXOSX64-NEXT: popq %rbp
; LINUXOSX64-NEXT: retq
%x1 = fadd <32 x float> %a0, %b0
%x2 = fadd <32 x float> %c0, %x1
%x3 = fadd <32 x float> %a1, %x2
%x4 = fadd <32 x float> %b1, %x3
%x5 = fadd <32 x float> %c1, %x4
%x6 = fadd <32 x float> %a2, %x5
%x7 = fadd <32 x float> %b2, %x6
%x8 = fadd <32 x float> %c2, %x7
ret <32 x float> %x8
}
; Test regcall when passing/retrieving mixed types
define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i64, i16 signext, i32*) #0 {
; X32-LABEL: test_argRetMixTypes:
; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; X32-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; X32-NEXT: vcvtsi2sdl %eax, %xmm2, %xmm1
; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; X32-NEXT: vcvtsi2sdl %ecx, %xmm2, %xmm1
; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; X32-NEXT: vmovd %edx, %xmm1
; X32-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
; X32-NEXT: vcvtqq2pd %ymm1, %ymm1
; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; X32-NEXT: vcvtsi2sdl %esi, %xmm2, %xmm1
; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; X32-NEXT: vcvtsi2sdl (%ebx), %xmm2, %xmm1
; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; X32-NEXT: vcvttsd2si %xmm0, %eax
; X32-NEXT: popl %ebx
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetMixTypes:
; WIN64: # %bb.0:
; WIN64-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; WIN64-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; WIN64-NEXT: vcvtsi2sdl %eax, %xmm2, %xmm1
; WIN64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; WIN64-NEXT: vcvtsi2sdl %ecx, %xmm2, %xmm1
; WIN64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; WIN64-NEXT: vcvtsi2sdq %rdx, %xmm2, %xmm1
; WIN64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; WIN64-NEXT: vcvtsi2sdl %edi, %xmm2, %xmm1
; WIN64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; WIN64-NEXT: vcvtsi2sdl (%rsi), %xmm2, %xmm1
; WIN64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; WIN64-NEXT: vcvttsd2si %xmm0, %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetMixTypes:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdl %eax, %xmm2, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdl %ecx, %xmm2, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdq %rdx, %xmm2, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdl %edi, %xmm2, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdl (%rsi), %xmm2, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; LINUXOSX64-NEXT: vcvttsd2si %xmm0, %eax
; LINUXOSX64-NEXT: retq
%8 = fpext float %1 to double
%9 = fadd double %8, %0
%10 = sitofp i8 %2 to double
%11 = fadd double %9, %10
%12 = sitofp i32 %3 to double
%13 = fadd double %11, %12
%14 = sitofp i64 %4 to double
%15 = fadd double %13, %14
%16 = sitofp i16 %5 to double
%17 = fadd double %15, %16
%18 = load i32, i32* %6, align 4
%19 = sitofp i32 %18 to double
%20 = fadd double %17, %19
%21 = fptosi double %20 to i32
ret i32 %21
}
%struct.complex = type { float, double, i32, i8, i64}
define x86_regcallcc %struct.complex @test_argMultiRet(float, double, i32, i8, i64) local_unnamed_addr #0 {
; X32-LABEL: test_argMultiRet:
; X32: # %bb.0:
; X32-NEXT: vaddsd __real@4014000000000000, %xmm1, %xmm1
; X32-NEXT: movl $4, %eax
; X32-NEXT: movb $7, %cl
; X32-NEXT: movl $999, %edx # imm = 0x3E7
; X32-NEXT: xorl %edi, %edi
; X32-NEXT: retl
;
; WIN64-LABEL: test_argMultiRet:
; WIN64: # %bb.0:
; WIN64-NEXT: vaddsd __real@{{.*}}(%rip), %xmm1, %xmm1
; WIN64-NEXT: movl $999, %edx # imm = 0x3E7
; WIN64-NEXT: movl $4, %eax
; WIN64-NEXT: movb $7, %cl
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argMultiRet:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; LINUXOSX64-NEXT: movl $999, %edx # imm = 0x3E7
; LINUXOSX64-NEXT: movl $4, %eax
; LINUXOSX64-NEXT: movb $7, %cl
; LINUXOSX64-NEXT: retq
%6 = fadd double %1, 5.000000e+00
%7 = insertvalue %struct.complex undef, float %0, 0
%8 = insertvalue %struct.complex %7, double %6, 1
%9 = insertvalue %struct.complex %8, i32 4, 2
%10 = insertvalue %struct.complex %9, i8 7, 3
%11 = insertvalue %struct.complex %10, i64 999, 4
ret %struct.complex %11
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/fcmove.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/fcmove.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/fcmove.ll (revision 344166)
@@ -1,15 +1,15 @@
; RUN: llc %s -o - -verify-machineinstrs | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
; Test that we can generate an fcmove, and also that it passes verification.
; CHECK-LABEL: cmove_f
-; CHECK: fcmove %st({{[0-7]}}), %st(0)
+; CHECK: fcmove %st({{[0-7]}}), %st
define x86_fp80 @cmove_f(x86_fp80 %a, x86_fp80 %b, i32 %c) {
%test = icmp eq i32 %c, 0
%add = fadd x86_fp80 %a, %b
%ret = select i1 %test, x86_fp80 %add, x86_fp80 %b
ret x86_fp80 %ret
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/fmf-flags.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/fmf-flags.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/fmf-flags.ll (revision 344166)
@@ -1,124 +1,124 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
declare float @llvm.sqrt.f32(float %x);
define float @fast_recip_sqrt(float %x) {
; X64-LABEL: fast_recip_sqrt:
; X64: # %bb.0:
; X64-NEXT: rsqrtss %xmm0, %xmm1
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm1
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: retq
;
; X86-LABEL: fast_recip_sqrt:
; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fsqrt
; X86-NEXT: fld1
-; X86-NEXT: fdivp %st(1)
+; X86-NEXT: fdivp %st, %st(1)
; X86-NEXT: retl
%y = call fast float @llvm.sqrt.f32(float %x)
%z = fdiv fast float 1.0, %y
ret float %z
}
declare float @llvm.fmuladd.f32(float %a, float %b, float %c);
define float @fast_fmuladd_opts(float %a , float %b , float %c) {
; X64-LABEL: fast_fmuladd_opts:
; X64: # %bb.0:
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X86-LABEL: fast_fmuladd_opts:
; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fmuls {{.*}}
; X86-NEXT: retl
%res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a)
ret float %res
}
; The multiply is strict.
@mul1 = common global double 0.000000e+00, align 4
define double @not_so_fast_mul_add(double %x) {
; X64-LABEL: not_so_fast_mul_add:
; X64: # %bb.0:
; X64-NEXT: movsd {{.*}}(%rip), %xmm1
; X64-NEXT: mulsd %xmm0, %xmm1
; X64-NEXT: mulsd {{.*}}(%rip), %xmm0
; X64-NEXT: movsd %xmm1, {{.*}}(%rip)
; X64-NEXT: retq
;
; X86-LABEL: not_so_fast_mul_add:
; X86: # %bb.0:
; X86-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NEXT: fld %st(0)
; X86-NEXT: fmull {{\.LCPI.*}}
; X86-NEXT: fxch %st(1)
; X86-NEXT: fmull {{\.LCPI.*}}
; X86-NEXT: fxch %st(1)
; X86-NEXT: fstpl mul1
; X86-NEXT: retl
%m = fmul double %x, 4.2
%a = fadd fast double %m, %x
store double %m, double* @mul1, align 4
ret double %a
}
; The sqrt is strict.
@sqrt1 = common global float 0.000000e+00, align 4
define float @not_so_fast_recip_sqrt(float %x) {
; X64-LABEL: not_so_fast_recip_sqrt:
; X64: # %bb.0:
; X64-NEXT: rsqrtss %xmm0, %xmm1
; X64-NEXT: sqrtss %xmm0, %xmm2
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm1
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: movss %xmm2, sqrt1(%rip)
; X64-NEXT: retq
;
; X86-LABEL: not_so_fast_recip_sqrt:
; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fsqrt
; X86-NEXT: fld1
-; X86-NEXT: fdiv %st(1)
+; X86-NEXT: fdiv %st(1), %st
; X86-NEXT: fxch %st(1)
; X86-NEXT: fstps sqrt1
; X86-NEXT: retl
%y = call float @llvm.sqrt.f32(float %x)
%z = fdiv fast float 1.0, %y
store float %y, float* @sqrt1, align 4
%ret = fadd float %z , 14.5
ret float %z
}
define float @div_arcp_by_const(half %x) {
; X64-LABEL: .LCPI4_0:
; X64-NEXT: .long 1036828672
; X64-LABEL: div_arcp_by_const:
; X64: movzwl %ax, %edi
; X64: mulss .LCPI4_0(%rip), %xmm0
;
; X86-LABEL: .LCPI4_0:
; X86-NEXT: .long 1036828672
; X86-LABEL: div_arcp_by_const:
; X86: movzwl %ax, %eax
; X86: fmuls .LCPI4_0
%rcp = fdiv arcp half %x, 10.0
%z = fpext half %rcp to float
ret float %z
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/fp-cvt.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/fp-cvt.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/fp-cvt.ll (revision 344166)
@@ -1,1099 +1,1099 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefixes=X86,X86-X87
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=X64,X64-X87
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
;
; fptosi
;
define i16 @fptosi_i16_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptosi_i16_fp80:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fnstcw (%esp)
; X86-NEXT: movzwl (%esp), %eax
; X86-NEXT: movw $3199, (%esp) # imm = 0xC7F
; X86-NEXT: fldcw (%esp)
; X86-NEXT: movw %ax, (%esp)
; X86-NEXT: fistps {{[0-9]+}}(%esp)
; X86-NEXT: fldcw (%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: popl %ecx
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i16_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: # kill: def $ax killed $ax killed $eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i16_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; X64-SSSE3-NEXT: retq
%1 = fptosi x86_fp80 %a0 to i16
ret i16 %1
}
define i16 @fptosi_i16_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptosi_i16_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fnstcw (%esp)
; X86-NEXT: movzwl (%esp), %eax
; X86-NEXT: movw $3199, (%esp) # imm = 0xC7F
; X86-NEXT: fldcw (%esp)
; X86-NEXT: movw %ax, (%esp)
; X86-NEXT: fistps {{[0-9]+}}(%esp)
; X86-NEXT: fldcw (%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: popl %ecx
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i16_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: # kill: def $ax killed $ax killed $eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i16_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptosi x86_fp80 %1 to i16
ret i16 %2
}
define i32 @fptosi_i32_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptosi_i32_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $8, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpl {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $8, %esp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i32_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i32_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: retq
%1 = fptosi x86_fp80 %a0 to i32
ret i32 %1
}
define i32 @fptosi_i32_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptosi_i32_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpl {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $8, %esp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i32_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i32_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptosi x86_fp80 %1 to i32
ret i32 %2
}
define i64 @fptosi_i64_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptosi_i64_fp80:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i64_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i64_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-SSSE3-NEXT: retq
%1 = fptosi x86_fp80 %a0 to i64
ret i64 %1
}
define i64 @fptosi_i64_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptosi_i64_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptosi_i64_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptosi_i64_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptosi x86_fp80 %1 to i64
ret i64 %2
}
;
; fptoui
;
define i16 @fptoui_i16_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptoui_i16_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $8, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpl {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: addl $8, %esp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i16_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: # kill: def $ax killed $ax killed $eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i16_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; X64-SSSE3-NEXT: retq
%1 = fptoui x86_fp80 %a0 to i16
ret i16 %1
}
define i16 @fptoui_i16_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptoui_i16_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpl {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: addl $8, %esp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i16_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: # kill: def $ax killed $ax killed $eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i16_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: fisttpl -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptoui x86_fp80 %1 to i16
ret i16 %2
}
define i32 @fptoui_i32_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptoui_i32_fp80:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i32_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i32_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: retq
%1 = fptoui x86_fp80 %a0 to i32
ret i32 %1
}
define i32 @fptoui_i32_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptoui_i32_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i32_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i32_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptoui x86_fp80 %1 to i32
ret i32 %2
}
define i64 @fptoui_i64_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: fptoui_i64_fp80:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp)
; X86-NEXT: flds {{\.LCPI.*}}
; X86-NEXT: fld %st(1)
; X86-NEXT: fsub %st(1)
; X86-NEXT: fxch %st(1)
; X86-NEXT: fucomp %st(2)
; X86-NEXT: fnstsw %ax
; X86-NEXT: # kill: def $ah killed $ah killed $ax
; X86-NEXT: sahf
; X86-NEXT: ja .LBB10_2
; X86-NEXT: # %bb.1:
; X86-NEXT: fstp %st(1)
; X86-NEXT: fldz
; X86-NEXT: .LBB10_2:
; X86-NEXT: fstp %st(0)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: setbe %al
; X86-NEXT: movzbl %al, %edx
; X86-NEXT: shll $31, %edx
; X86-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i64_fp80:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-X87-NEXT: flds {{.*}}(%rip)
; X64-X87-NEXT: fld %st(1)
; X64-X87-NEXT: fsub %st(1)
; X64-X87-NEXT: xorl %eax, %eax
; X64-X87-NEXT: fxch %st(1)
; X64-X87-NEXT: fucompi %st(2)
-; X64-X87-NEXT: fcmovnbe %st(1), %st(0)
+; X64-X87-NEXT: fcmovnbe %st(1), %st
; X64-X87-NEXT: fstp %st(1)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %cx, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: setbe %al
; X64-X87-NEXT: shlq $63, %rax
; X64-X87-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i64_fp80:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: flds {{.*}}(%rip)
; X64-SSSE3-NEXT: fld %st(1)
; X64-SSSE3-NEXT: fsub %st(1)
; X64-SSSE3-NEXT: xorl %eax, %eax
; X64-SSSE3-NEXT: fxch %st(1)
; X64-SSSE3-NEXT: fucompi %st(2)
-; X64-SSSE3-NEXT: fcmovnbe %st(1), %st(0)
+; X64-SSSE3-NEXT: fcmovnbe %st(1), %st
; X64-SSSE3-NEXT: fstp %st(1)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: setbe %al
; X64-SSSE3-NEXT: shlq $63, %rax
; X64-SSSE3-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; X64-SSSE3-NEXT: retq
%1 = fptoui x86_fp80 %a0 to i64
ret i64 %1
}
define i64 @fptoui_i64_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: fptoui_i64_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: flds {{\.LCPI.*}}
; X86-NEXT: fld %st(1)
; X86-NEXT: fsub %st(1)
; X86-NEXT: fxch %st(1)
; X86-NEXT: fucomp %st(2)
; X86-NEXT: fnstsw %ax
; X86-NEXT: # kill: def $ah killed $ah killed $ax
; X86-NEXT: sahf
; X86-NEXT: ja .LBB11_2
; X86-NEXT: # %bb.1:
; X86-NEXT: fstp %st(1)
; X86-NEXT: fldz
; X86-NEXT: .LBB11_2:
; X86-NEXT: fstp %st(0)
; X86-NEXT: fnstcw {{[0-9]+}}(%esp)
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-NEXT: setbe %al
; X86-NEXT: movzbl %al, %edx
; X86-NEXT: shll $31, %edx
; X86-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-X87-LABEL: fptoui_i64_fp80_ld:
; X64-X87: # %bb.0:
; X64-X87-NEXT: fldt (%rdi)
; X64-X87-NEXT: flds {{.*}}(%rip)
; X64-X87-NEXT: fld %st(1)
; X64-X87-NEXT: fsub %st(1)
; X64-X87-NEXT: xorl %eax, %eax
; X64-X87-NEXT: fxch %st(1)
; X64-X87-NEXT: fucompi %st(2)
-; X64-X87-NEXT: fcmovnbe %st(1), %st(0)
+; X64-X87-NEXT: fcmovnbe %st(1), %st
; X64-X87-NEXT: fstp %st(1)
; X64-X87-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx
; X64-X87-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: movw %cx, -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fistpll -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: fldcw -{{[0-9]+}}(%rsp)
; X64-X87-NEXT: setbe %al
; X64-X87-NEXT: shlq $63, %rax
; X64-X87-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; X64-X87-NEXT: retq
;
; X64-SSSE3-LABEL: fptoui_i64_fp80_ld:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: fldt (%rdi)
; X64-SSSE3-NEXT: flds {{.*}}(%rip)
; X64-SSSE3-NEXT: fld %st(1)
; X64-SSSE3-NEXT: fsub %st(1)
; X64-SSSE3-NEXT: xorl %eax, %eax
; X64-SSSE3-NEXT: fxch %st(1)
; X64-SSSE3-NEXT: fucompi %st(2)
-; X64-SSSE3-NEXT: fcmovnbe %st(1), %st(0)
+; X64-SSSE3-NEXT: fcmovnbe %st(1), %st
; X64-SSSE3-NEXT: fstp %st(1)
; X64-SSSE3-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: setbe %al
; X64-SSSE3-NEXT: shlq $63, %rax
; X64-SSSE3-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; X64-SSSE3-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = fptoui x86_fp80 %1 to i64
ret i64 %2
}
;
; sitofp
;
define x86_fp80 @sitofp_fp80_i16(i16 %a0) nounwind {
; X86-LABEL: sitofp_fp80_i16:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: filds {{[0-9]+}}(%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i16:
; X64: # %bb.0:
; X64-NEXT: movswl %di, %eax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = sitofp i16 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @sitofp_fp80_i16_ld(i16 *%a0) nounwind {
; X86-LABEL: sitofp_fp80_i16_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X86-NEXT: filds {{[0-9]+}}(%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i16_ld:
; X64: # %bb.0:
; X64-NEXT: movswl (%rdi), %eax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = load i16, i16 *%a0
%2 = sitofp i16 %1 to x86_fp80
ret x86_fp80 %2
}
define x86_fp80 @sitofp_fp80_i32(i32 %a0) nounwind {
; X86-LABEL: sitofp_fp80_i32:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: fildl (%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i32:
; X64: # %bb.0:
; X64-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = sitofp i32 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @sitofp_fp80_i32_ld(i32 *%a0) nounwind {
; X86-LABEL: sitofp_fp80_i32_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: fildl (%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i32_ld:
; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = load i32, i32 *%a0
%2 = sitofp i32 %1 to x86_fp80
ret x86_fp80 %2
}
define x86_fp80 @sitofp_fp80_i64(i64 %a0) nounwind {
; X86-LABEL: sitofp_fp80_i64:
; X86: # %bb.0:
; X86-NEXT: fildll {{[0-9]+}}(%esp)
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = sitofp i64 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @sitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X86-LABEL: sitofp_fp80_i64_ld:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fildll (%eax)
; X86-NEXT: retl
;
; X64-LABEL: sitofp_fp80_i64_ld:
; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = load i64, i64 *%a0
%2 = sitofp i64 %1 to x86_fp80
ret x86_fp80 %2
}
;
; uitofp
;
define x86_fp80 @uitofp_fp80_i16(i16 %a0) nounwind {
; X86-LABEL: uitofp_fp80_i16:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: fildl (%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i16:
; X64: # %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = uitofp i16 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @uitofp_fp80_i16_ld(i16 *%a0) nounwind {
; X86-LABEL: uitofp_fp80_i16_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: fildl (%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i16_ld:
; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildl -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = load i16, i16 *%a0
%2 = uitofp i16 %1 to x86_fp80
ret x86_fp80 %2
}
define x86_fp80 @uitofp_fp80_i32(i32 %a0) nounwind {
; X86-LABEL: uitofp_fp80_i32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: fildll (%esp)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i32:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = uitofp i32 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @uitofp_fp80_i32_ld(i32 *%a0) nounwind {
; X86-LABEL: uitofp_fp80_i32_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: fildll (%esp)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i32_ld:
; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = load i32, i32 *%a0
%2 = uitofp i32 %1 to x86_fp80
ret x86_fp80 %2
}
define x86_fp80 @uitofp_fp80_i64(i64 %a0) nounwind {
; X86-LABEL: uitofp_fp80_i64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl 12(%ebp), %ecx
; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: testl %ecx, %ecx
; X86-NEXT: setns %al
; X86-NEXT: fildll (%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: setns %al
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4)
; X64-NEXT: retq
%1 = uitofp i64 %a0 to x86_fp80
ret x86_fp80 %1
}
define x86_fp80 @uitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X86-LABEL: uitofp_fp80_i64_ld:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl (%eax), %ecx
; X86-NEXT: movl 4(%eax), %eax
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl %ecx, (%esp)
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: testl %eax, %eax
; X86-NEXT: setns %cl
; X86-NEXT: fildll (%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: uitofp_fp80_i64_ld:
; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: testq %rax, %rax
; X64-NEXT: setns %cl
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: fadds {{\.LCPI.*}}(,%rcx,4)
; X64-NEXT: retq
%1 = load i64, i64 *%a0
%2 = uitofp i64 %1 to x86_fp80
ret x86_fp80 %2
}
;
; floor
;
define x86_fp80 @floor_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: floor_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll floorl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: floor_fp80:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq floorl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.floor.f80(x86_fp80 %a0)
ret x86_fp80 %1
}
define x86_fp80 @floor_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: floor_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll floorl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: floor_fp80_ld:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq floorl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = call x86_fp80 @llvm.floor.f80(x86_fp80 %1)
ret x86_fp80 %2
}
declare x86_fp80 @llvm.floor.f80(x86_fp80 %p)
;
; ceil
;
define x86_fp80 @ceil_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: ceil_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll ceill
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: ceil_fp80:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq ceill
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.ceil.f80(x86_fp80 %a0)
ret x86_fp80 %1
}
define x86_fp80 @ceil_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: ceil_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll ceill
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: ceil_fp80_ld:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq ceill
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = call x86_fp80 @llvm.ceil.f80(x86_fp80 %1)
ret x86_fp80 %2
}
declare x86_fp80 @llvm.ceil.f80(x86_fp80 %p)
;
; trunc
;
define x86_fp80 @trunc_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: trunc_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll truncl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: trunc_fp80:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq truncl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.trunc.f80(x86_fp80 %a0)
ret x86_fp80 %1
}
define x86_fp80 @trunc_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: trunc_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll truncl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: trunc_fp80_ld:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq truncl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = call x86_fp80 @llvm.trunc.f80(x86_fp80 %1)
ret x86_fp80 %2
}
declare x86_fp80 @llvm.trunc.f80(x86_fp80 %p)
;
; rint
;
define x86_fp80 @rint_fp80(x86_fp80 %a0) nounwind {
; X86-LABEL: rint_fp80:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll rintl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: rint_fp80:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq rintl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.rint.f80(x86_fp80 %a0)
ret x86_fp80 %1
}
define x86_fp80 @rint_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-LABEL: rint_fp80_ld:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: fldt (%eax)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll rintl
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
; X64-LABEL: rint_fp80_ld:
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
; X64-NEXT: callq rintl
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
%2 = call x86_fp80 @llvm.rint.f80(x86_fp80 %1)
ret x86_fp80 %2
}
declare x86_fp80 @llvm.rint.f80(x86_fp80 %p)
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-default-clobbers.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-default-clobbers.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-default-clobbers.ll (revision 344166)
@@ -0,0 +1,8 @@
+; RUN: llc < %s -mtriple=i686 -stop-after=expand-isel-pseudos | FileCheck %s
+
+; CHECK: INLINEASM &"", 1, 12, implicit-def early-clobber $df, 12, implicit-def early-clobber $fpsw, 12, implicit-def early-clobber $eflags
+define void @foo() {
+entry:
+ call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+ ret void
+}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-fpstack.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-fpstack.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/inline-asm-fpstack.ll (revision 344166)
@@ -1,518 +1,518 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mcpu=generic -mtriple=i386-apple-darwin -verify-machineinstrs -no-integrated-as | FileCheck %s
; There should be no stack manipulations between the inline asm and ret.
define x86_fp80 @test1() {
; CHECK-LABEL: test1:
; CHECK: ## %bb.0:
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fld0
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
%tmp85 = call x86_fp80 asm sideeffect "fld0", "={st(0)}"()
ret x86_fp80 %tmp85
}
define double @test2() {
; CHECK-LABEL: test2:
; CHECK: ## %bb.0:
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fld0
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
%tmp85 = call double asm sideeffect "fld0", "={st(0)}"()
ret double %tmp85
}
; Setting up argument in st(0) should be a single fld.
; Asm consumes stack, nothing should be popped.
define void @test3(x86_fp80 %X) {
; CHECK-LABEL: test3:
; CHECK: ## %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frob
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
call void asm sideeffect "frob ", "{st(0)},~{st},~{dirflag},~{fpsr},~{flags}"( x86_fp80 %X)
ret void
}
define void @test4(double %X) {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0:
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frob
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
call void asm sideeffect "frob ", "{st(0)},~{st},~{dirflag},~{fpsr},~{flags}"( double %X)
ret void
}
; Same as test3/4, but using value from fadd.
; The fadd can be done in xmm or x87 regs - we don't test that.
define void @test5(double %X) {
; CHECK-LABEL: test5:
; CHECK: ## %bb.0:
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fadds LCPI4_0
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frob
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
%Y = fadd double %X, 123.0
call void asm sideeffect "frob ", "{st(0)},~{st},~{dirflag},~{fpsr},~{flags}"( double %Y)
ret void
}
define void @test6(double %A, double %B, double %C, double %D, double %E) nounwind {
; CHECK-LABEL: test6:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: foo %st(0) %st(0)
+; CHECK-NEXT: foo %st %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: bar %st(1) %st(0)
+; CHECK-NEXT: bar %st(1) %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: baz %st(1) %st(0)
+; CHECK-NEXT: baz %st(1) %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: baz %st(0)
+; CHECK-NEXT: baz %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
; Uses the same value twice, should have one fstp after the asm.
tail call void asm sideeffect "foo $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %A, double %A ) nounwind
; Uses two different values, should be in st(0)/st(1) and both be popped.
tail call void asm sideeffect "bar $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %B, double %C ) nounwind
; Uses two different values, one of which isn't killed in this asm, it should not be popped after the asm.
tail call void asm sideeffect "baz $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %D, double %E ) nounwind
; This is the last use of %D, so it should be popped after.
tail call void asm sideeffect "baz $0", "f,~{dirflag},~{fpsr},~{flags}"( double %D ) nounwind
ret void
}
; PR4185
; Passing a non-killed value to asm in {st}.
; Make sure it is duped before.
; asm kills st(0), so we shouldn't pop anything
; A valid alternative would be to remat the constant pool load before each
; inline asm.
define void @testPR4185() {
; CHECK-LABEL: testPR4185:
; CHECK: ## %bb.0: ## %return
; CHECK-NEXT: flds LCPI6_0
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
return:
call void asm sideeffect "fistpl $0", "{st},~{st}"(double 1.000000e+06)
call void asm sideeffect "fistpl $0", "{st},~{st}"(double 1.000000e+06)
ret void
}
; Passing a non-killed value through asm in {st}.
; Make sure it is not duped before.
; Second asm kills st(0), so we shouldn't pop anything
; A valid alternative would be to remat the constant pool load before each inline asm.
define void @testPR4185b() {
; CHECK-LABEL: testPR4185b:
; CHECK: ## %bb.0: ## %return
; CHECK-NEXT: flds LCPI7_0
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistl %st(0)
+; CHECK-NEXT: fistl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
return:
call void asm sideeffect "fistl $0", "{st}"(double 1.000000e+06)
call void asm sideeffect "fistpl $0", "{st},~{st}"(double 1.000000e+06)
ret void
}
; PR4459
; The return value from ceil must be duped before being consumed by asm.
define void @testPR4459(x86_fp80 %a) {
; CHECK-LABEL: testPR4459:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _ceil
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _test3
; CHECK-NEXT: addl $28, %esp
; CHECK-NEXT: retl
entry:
%0 = call x86_fp80 @ceil(x86_fp80 %a)
call void asm sideeffect "fistpl $0", "{st},~{st}"( x86_fp80 %0)
call void @test3(x86_fp80 %0 )
ret void
}
declare x86_fp80 @ceil(x86_fp80)
; PR4484
; test1 leaves a value on the stack that is needed after the asm.
; Load %a from stack after ceil
; Set up call to test.
define void @testPR4484(x86_fp80 %a) {
; CHECK-LABEL: testPR4484:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
; CHECK-NEXT: calll _test1
; CHECK-NEXT: fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _test3
; CHECK-NEXT: addl $28, %esp
; CHECK-NEXT: retl
entry:
%0 = call x86_fp80 @test1()
call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %a)
call void @test3(x86_fp80 %0)
ret void
}
; PR4485
define void @testPR4485(x86_fp80* %a) {
; CHECK-LABEL: testPR4485:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: fldt (%eax)
; CHECK-NEXT: flds LCPI10_0
-; CHECK-NEXT: fmul %st(0), %st(1)
+; CHECK-NEXT: fmul %st, %st(1)
; CHECK-NEXT: flds LCPI10_1
-; CHECK-NEXT: fmul %st(0), %st(2)
+; CHECK-NEXT: fmul %st, %st(2)
; CHECK-NEXT: fxch %st(2)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fldt (%eax)
-; CHECK-NEXT: fmulp %st(1)
-; CHECK-NEXT: fmulp %st(1)
+; CHECK-NEXT: fmulp %st, %st(1)
+; CHECK-NEXT: fmulp %st, %st(1)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fistpl %st(0)
+; CHECK-NEXT: fistpl %st
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
entry:
%0 = load x86_fp80, x86_fp80* %a, align 16
%1 = fmul x86_fp80 %0, 0xK4006B400000000000000
%2 = fmul x86_fp80 %1, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %2)
%3 = load x86_fp80, x86_fp80* %a, align 16
%4 = fmul x86_fp80 %3, 0xK4006B400000000000000
%5 = fmul x86_fp80 %4, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %5)
ret void
}
; An input argument in a fixed position is implicitly popped by the asm only if
; the input argument is tied to an output register, or it is in the clobber list.
; The clobber list case is tested above.
;
; This doesn't implicitly pop the stack:
;
; void fist1(long double x, int *p) {
; asm volatile ("fistl %1" : : "t"(x), "m"(*p));
; }
define void @fist1(x86_fp80 %x, i32* %p) nounwind ssp {
; CHECK-LABEL: fist1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistl (%eax)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
tail call void asm sideeffect "fistl $1", "{st},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* %p) nounwind
ret void
}
; Here, the input operand is tied to an output which means that is is
; implicitly popped (and then the output is implicitly pushed).
;
; long double fist2(long double x, int *p) {
; long double y;
; asm ("fistl %1" : "=&t"(y) : "0"(x), "m"(*p) : "memory");
; return y;
; }
define x86_fp80 @fist2(x86_fp80 %x, i32* %p) nounwind ssp {
; CHECK-LABEL: fist2:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistl (%eax)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
%0 = tail call x86_fp80 asm "fistl $2", "=&{st},0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* %p) nounwind
ret x86_fp80 %0
}
; An 'f' constraint is never implicitly popped:
;
; void fucomp1(long double x, long double y) {
; asm volatile ("fucomp %1" : : "t"(x), "f"(y) : "st");
; }
define void @fucomp1(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fucomp %st(1)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
tail call void asm sideeffect "fucomp $1", "{st},f,~{st},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, x86_fp80 %y) nounwind
ret void
}
; The 'u' constraint is only popped implicitly when clobbered:
;
; void fucomp2(long double x, long double y) {
; asm volatile ("fucomp %1" : : "t"(x), "u"(y) : "st");
; }
;
; void fucomp3(long double x, long double y) {
; asm volatile ("fucompp %1" : : "t"(x), "u"(y) : "st", "st(1)");
; }
;
define void @fucomp2(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp2:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fucomp %st(1)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
tail call void asm sideeffect "fucomp $1", "{st},{st(1)},~{st},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, x86_fp80 %y) nounwind
ret void
}
define void @fucomp3(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp3:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fucompp %st(1)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
tail call void asm sideeffect "fucompp $1", "{st},{st(1)},~{st},~{st(1)},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, x86_fp80 %y) nounwind
ret void
}
; One input, two outputs, one dead output.
%complex = type { float, float }
define float @sincos1(float %x) nounwind ssp {
; CHECK-LABEL: sincos1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
%0 = tail call %complex asm "sincos", "={st},={st(1)},0,~{dirflag},~{fpsr},~{flags}"(float %x) nounwind
%asmresult = extractvalue %complex %0, 0
ret float %asmresult
}
; Same thing, swapped output operands.
define float @sincos2(float %x) nounwind ssp {
; CHECK-LABEL: sincos2:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
%0 = tail call %complex asm "sincos", "={st(1)},={st},1,~{dirflag},~{fpsr},~{flags}"(float %x) nounwind
%asmresult = extractvalue %complex %0, 1
ret float %asmresult
}
; Clobber st(0) after it was live-out/dead from the previous asm.
; Load x, make a copy for the second asm.
; Discard dead result in st(0), bring x to the top.
; x is now in st(0) for the second asm
; Discard both results.
define float @sincos3(float %x) nounwind ssp {
; CHECK-LABEL: sincos3:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
%0 = tail call %complex asm sideeffect "sincos", "={st(1)},={st},1,~{dirflag},~{fpsr},~{flags}"(float %x) nounwind
%1 = tail call %complex asm sideeffect "sincos", "={st(1)},={st},1,~{dirflag},~{fpsr},~{flags}"(float %x) nounwind
%asmresult = extractvalue %complex %0, 0
ret float %asmresult
}
; Pass the same value in two fixed stack slots.
define i32 @PR10602() nounwind ssp {
; CHECK-LABEL: PR10602:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds LCPI19_0
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: fcomi %st(1), %st(0); pushf; pop %eax
+; CHECK-NEXT: fcomi %st(1), %st; pushf; pop %eax
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: fstp %st(0)
; CHECK-NEXT: retl
; CHECK-NEXT: ## -- End function
entry:
%0 = tail call i32 asm "fcomi $2, $1; pushf; pop $0", "=r,{st},{st(1)},~{dirflag},~{fpsr},~{flags}"(double 2.000000e+00, double 2.000000e+00) nounwind
ret i32 %0
}
; <rdar://problem/16952634>
; X87 stackifier asserted when there was an ST register defined by an
; inline-asm instruction and the ST register was live across another
; inline-asm instruction.
;
; INLINEASM $frndint [sideeffect] [attdialect], $0:[regdef], %st0<imp-def,tied5>, $1:[reguse tiedto:$0], %st0<tied3>, $2:[clobber], early-clobber implicit dead %eflags
; INLINEASM $fldcw $0 [sideeffect] [mayload] [attdialect], $0:[mem], undef %eax, 1, %noreg, 0, %noreg, $1:[clobber], early-clobber implicit dead %eflags
; %fp0 = COPY %st0
%struct.fpu_t = type { [8 x x86_fp80], x86_fp80, %struct.anon1, %struct.anon2, i32, i8, [15 x i8] }
%struct.anon1 = type { i32, i32, i32 }
%struct.anon2 = type { i32, i32, i32, i32 }
@fpu = external global %struct.fpu_t, align 16
; Function Attrs: ssp
define void @test_live_st(i32 %a1) {
; CHECK-LABEL: test_live_st:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: fldt (%eax)
; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: jne LBB20_2
; CHECK-NEXT: ## %bb.1: ## %sw.bb4.i
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frndint
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fldcw (%eax)
; CHECK-NEXT: ## InlineAsm End
; CHECK-NEXT: LBB20_2: ## %_Z5tointRKe.exit
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movw $3199, {{[0-9]+}}(%esp) ## imm = 0xC7F
; CHECK-NEXT: fldcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movw %ax, {{[0-9]+}}(%esp)
; CHECK-NEXT: fistpl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: fildl {{[0-9]+}}(%esp)
; CHECK-NEXT: movl L_fpu$non_lazy_ptr, %eax
; CHECK-NEXT: fstpt 128(%eax)
; CHECK-NEXT: addl $12, %esp
; CHECK-NEXT: retl
entry:
%0 = load x86_fp80, x86_fp80* undef, align 16
%cond = icmp eq i32 %a1, 1
br i1 %cond, label %sw.bb4.i, label %_Z5tointRKe.exit
sw.bb4.i:
%1 = call x86_fp80 asm sideeffect "frndint", "={st},0,~{dirflag},~{fpsr},~{flags}"(x86_fp80 %0)
call void asm sideeffect "fldcw $0", "*m,~{dirflag},~{fpsr},~{flags}"(i32* undef)
br label %_Z5tointRKe.exit
_Z5tointRKe.exit:
%result.0.i = phi x86_fp80 [ %1, %sw.bb4.i ], [ %0, %entry ]
%conv.i1814 = fptosi x86_fp80 %result.0.i to i32
%conv626 = sitofp i32 %conv.i1814 to x86_fp80
store x86_fp80 %conv626, x86_fp80* getelementptr inbounds (%struct.fpu_t, %struct.fpu_t* @fpu, i32 0, i32 1)
br label %return
return:
ret void
}
; Check that x87 stackifier is correctly rewriting FP registers to ST registers.
define double @test_operand_rewrite() {
; CHECK-LABEL: test_operand_rewrite:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: ## InlineAsm Start
-; CHECK-NEXT: foo %st(0), %st(1)
+; CHECK-NEXT: foo %st, %st(1)
; CHECK-NEXT: ## InlineAsm End
-; CHECK-NEXT: fsubp %st(1)
+; CHECK-NEXT: fsubp %st, %st(1)
; CHECK-NEXT: retl
entry:
%0 = tail call { double, double } asm sideeffect "foo $0, $1", "={st},={st(1)},~{dirflag},~{fpsr},~{flags}"()
%asmresult = extractvalue { double, double } %0, 0
%asmresult1 = extractvalue { double, double } %0, 1
%sub = fsub double %asmresult, %asmresult1
ret double %sub
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/ipra-reg-usage.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/ipra-reg-usage.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/ipra-reg-usage.ll (revision 344166)
@@ -1,12 +1,12 @@
; RUN: llc -enable-ipra -print-regusage -o /dev/null 2>&1 < %s | FileCheck %s
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $fpsw $fs $gs $hip $ip $rip $riz $ss $ssp $bnd0 $bnd1 $bnd2 $bnd3 $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11bh $r11d $r11w $r11wh
+; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $fpcw $fpsw $fs $gs $hip $ip $rip $riz $ss $ssp $bnd0 $bnd1 $bnd2 $bnd3 $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11bh $r11d $r11w $r11wh
call void @bar1()
call void @bar2()
ret void
}
declare void @bar2()
attributes #0 = {nounwind}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/pr13577.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/pr13577.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/pr13577.ll (revision 344166)
@@ -1,39 +1,39 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-darwin | FileCheck %s
; CHECK-LABEL: LCPI0_0:
; CHECK-NEXT: .long 4286578688
; CHECK-LABEL: LCPI0_1:
; CHECK-NEXT: .long 2139095040
define x86_fp80 @foo(x86_fp80 %a) {
; CHECK-LABEL: foo:
; CHECK: ## %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt -{{[0-9]+}}(%rsp)
; CHECK-NEXT: testb $-128, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: flds {{.*}}(%rip)
; CHECK-NEXT: flds {{.*}}(%rip)
-; CHECK-NEXT: fcmovne %st(1), %st(0)
+; CHECK-NEXT: fcmovne %st(1), %st
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: retq
%1 = tail call x86_fp80 @copysignl(x86_fp80 0xK7FFF8000000000000000, x86_fp80 %a) nounwind readnone
ret x86_fp80 %1
}
declare x86_fp80 @copysignl(x86_fp80, x86_fp80) nounwind readnone
; This would crash:
; https://llvm.org/bugs/show_bug.cgi?id=26070
define float @pr26070() {
; CHECK-LABEL: pr26070:
; CHECK: ## %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq
%c = call float @copysignf(float 1.0, float undef) readnone
ret float %c
}
declare float @copysignf(float, float)
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/pr33349.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/pr33349.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/pr33349.ll (revision 344166)
@@ -1,84 +1,84 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=+avx512f | FileCheck %s --check-prefix=KNL
; RUN: llc < %s -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefix=SKX
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define void @test(<4 x i1> %m, <4 x x86_fp80> %v, <4 x x86_fp80>*%p) local_unnamed_addr {
; KNL-LABEL: test:
; KNL: # %bb.0: # %bb
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k1
; KNL-NEXT: kmovw %k1, %eax
; KNL-NEXT: kshiftrw $2, %k0, %k1
; KNL-NEXT: kshiftrw $1, %k1, %k2
; KNL-NEXT: kmovw %k1, %ecx
; KNL-NEXT: testb $1, %al
; KNL-NEXT: fld1
; KNL-NEXT: fldz
; KNL-NEXT: fld %st(0)
-; KNL-NEXT: fcmovne %st(2), %st(0)
+; KNL-NEXT: fcmovne %st(2), %st
; KNL-NEXT: testb $1, %cl
; KNL-NEXT: fld %st(1)
-; KNL-NEXT: fcmovne %st(3), %st(0)
+; KNL-NEXT: fcmovne %st(3), %st
; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: testb $1, %al
; KNL-NEXT: fld %st(2)
-; KNL-NEXT: fcmovne %st(4), %st(0)
+; KNL-NEXT: fcmovne %st(4), %st
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: testb $1, %al
; KNL-NEXT: fxch %st(3)
-; KNL-NEXT: fcmovne %st(4), %st(0)
+; KNL-NEXT: fcmovne %st(4), %st
; KNL-NEXT: fstp %st(4)
; KNL-NEXT: fxch %st(3)
; KNL-NEXT: fstpt (%rdi)
; KNL-NEXT: fxch %st(1)
; KNL-NEXT: fstpt 30(%rdi)
; KNL-NEXT: fxch %st(1)
; KNL-NEXT: fstpt 20(%rdi)
; KNL-NEXT: fstpt 10(%rdi)
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: test:
; SKX: # %bb.0: # %bb
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vpmovd2m %xmm0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k1
; SKX-NEXT: kmovd %k1, %eax
; SKX-NEXT: kshiftrb $2, %k0, %k1
; SKX-NEXT: kshiftrb $1, %k1, %k2
; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: testb $1, %al
; SKX-NEXT: fld1
; SKX-NEXT: fldz
; SKX-NEXT: fld %st(0)
-; SKX-NEXT: fcmovne %st(2), %st(0)
+; SKX-NEXT: fcmovne %st(2), %st
; SKX-NEXT: testb $1, %cl
; SKX-NEXT: fld %st(1)
-; SKX-NEXT: fcmovne %st(3), %st(0)
+; SKX-NEXT: fcmovne %st(3), %st
; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: testb $1, %al
; SKX-NEXT: fld %st(2)
-; SKX-NEXT: fcmovne %st(4), %st(0)
+; SKX-NEXT: fcmovne %st(4), %st
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: testb $1, %al
; SKX-NEXT: fxch %st(3)
-; SKX-NEXT: fcmovne %st(4), %st(0)
+; SKX-NEXT: fcmovne %st(4), %st
; SKX-NEXT: fstp %st(4)
; SKX-NEXT: fxch %st(3)
; SKX-NEXT: fstpt (%rdi)
; SKX-NEXT: fxch %st(1)
; SKX-NEXT: fstpt 30(%rdi)
; SKX-NEXT: fxch %st(1)
; SKX-NEXT: fstpt 20(%rdi)
; SKX-NEXT: fstpt 10(%rdi)
; SKX-NEXT: retq
bb:
%tmp = select <4 x i1> %m, <4 x x86_fp80> <x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000>, <4 x x86_fp80> zeroinitializer
store <4 x x86_fp80> %tmp, <4 x x86_fp80>* %p, align 16
ret void
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/pr34080.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/pr34080.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/pr34080.ll (revision 344166)
@@ -1,167 +1,167 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 -mcpu=x86-64 | FileCheck %s --check-prefix=SSE2-SCHEDULE
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse3 | FileCheck %s --check-prefix=SSE3
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse3 -mcpu=nocona | FileCheck %s --check-prefix=SSE3
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -mcpu=sandybridge | FileCheck %s --check-prefix=AVX
define void @_Z1fe(x86_fp80 %z) local_unnamed_addr #0 {
; SSE2-LABEL: _Z1fe:
; SSE2: ## %bb.0: ## %entry
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: .cfi_def_cfa_offset 16
; SSE2-NEXT: .cfi_offset %rbp, -16
; SSE2-NEXT: movq %rsp, %rbp
; SSE2-NEXT: .cfi_def_cfa_register %rbp
; SSE2-NEXT: fldt 16(%rbp)
; SSE2-NEXT: fnstcw -4(%rbp)
; SSE2-NEXT: movzwl -4(%rbp), %eax
; SSE2-NEXT: movw $3199, -4(%rbp) ## imm = 0xC7F
; SSE2-NEXT: fldcw -4(%rbp)
; SSE2-NEXT: movw %ax, -4(%rbp)
; SSE2-NEXT: fistl -8(%rbp)
; SSE2-NEXT: fldcw -4(%rbp)
; SSE2-NEXT: cvtsi2sdl -8(%rbp), %xmm0
; SSE2-NEXT: movsd %xmm0, -64(%rbp)
; SSE2-NEXT: movsd %xmm0, -32(%rbp)
; SSE2-NEXT: fsubl -32(%rbp)
; SSE2-NEXT: flds {{.*}}(%rip)
-; SSE2-NEXT: fmul %st(0), %st(1)
+; SSE2-NEXT: fmul %st, %st(1)
; SSE2-NEXT: fnstcw -2(%rbp)
; SSE2-NEXT: movzwl -2(%rbp), %eax
; SSE2-NEXT: movw $3199, -2(%rbp) ## imm = 0xC7F
; SSE2-NEXT: fldcw -2(%rbp)
; SSE2-NEXT: movw %ax, -2(%rbp)
; SSE2-NEXT: fxch %st(1)
; SSE2-NEXT: fistl -12(%rbp)
; SSE2-NEXT: fldcw -2(%rbp)
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2sdl -12(%rbp), %xmm0
; SSE2-NEXT: movsd %xmm0, -56(%rbp)
; SSE2-NEXT: movsd %xmm0, -24(%rbp)
; SSE2-NEXT: fsubl -24(%rbp)
-; SSE2-NEXT: fmulp %st(1)
+; SSE2-NEXT: fmulp %st, %st(1)
; SSE2-NEXT: fstpl -48(%rbp)
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; SSE2-SCHEDULE-LABEL: _Z1fe:
; SSE2-SCHEDULE: ## %bb.0: ## %entry
; SSE2-SCHEDULE-NEXT: pushq %rbp
; SSE2-SCHEDULE-NEXT: .cfi_def_cfa_offset 16
; SSE2-SCHEDULE-NEXT: .cfi_offset %rbp, -16
; SSE2-SCHEDULE-NEXT: movq %rsp, %rbp
; SSE2-SCHEDULE-NEXT: .cfi_def_cfa_register %rbp
; SSE2-SCHEDULE-NEXT: fnstcw -4(%rbp)
; SSE2-SCHEDULE-NEXT: movzwl -4(%rbp), %eax
; SSE2-SCHEDULE-NEXT: movw $3199, -4(%rbp) ## imm = 0xC7F
; SSE2-SCHEDULE-NEXT: fldcw -4(%rbp)
; SSE2-SCHEDULE-NEXT: fldt 16(%rbp)
; SSE2-SCHEDULE-NEXT: movw %ax, -4(%rbp)
; SSE2-SCHEDULE-NEXT: fistl -8(%rbp)
; SSE2-SCHEDULE-NEXT: fldcw -4(%rbp)
; SSE2-SCHEDULE-NEXT: cvtsi2sdl -8(%rbp), %xmm0
; SSE2-SCHEDULE-NEXT: movsd %xmm0, -64(%rbp)
; SSE2-SCHEDULE-NEXT: movsd %xmm0, -32(%rbp)
; SSE2-SCHEDULE-NEXT: fsubl -32(%rbp)
-; SSE2-SCHEDULE-NEXT: fnstcw -2(%rbp)
; SSE2-SCHEDULE-NEXT: flds {{.*}}(%rip)
+; SSE2-SCHEDULE-NEXT: fnstcw -2(%rbp)
+; SSE2-SCHEDULE-NEXT: fmul %st, %st(1)
; SSE2-SCHEDULE-NEXT: movzwl -2(%rbp), %eax
; SSE2-SCHEDULE-NEXT: movw $3199, -2(%rbp) ## imm = 0xC7F
; SSE2-SCHEDULE-NEXT: fldcw -2(%rbp)
-; SSE2-SCHEDULE-NEXT: fmul %st(0), %st(1)
; SSE2-SCHEDULE-NEXT: movw %ax, -2(%rbp)
; SSE2-SCHEDULE-NEXT: fxch %st(1)
; SSE2-SCHEDULE-NEXT: fistl -12(%rbp)
; SSE2-SCHEDULE-NEXT: fldcw -2(%rbp)
; SSE2-SCHEDULE-NEXT: xorps %xmm0, %xmm0
; SSE2-SCHEDULE-NEXT: cvtsi2sdl -12(%rbp), %xmm0
; SSE2-SCHEDULE-NEXT: movsd %xmm0, -56(%rbp)
; SSE2-SCHEDULE-NEXT: movsd %xmm0, -24(%rbp)
; SSE2-SCHEDULE-NEXT: fsubl -24(%rbp)
-; SSE2-SCHEDULE-NEXT: fmulp %st(1)
+; SSE2-SCHEDULE-NEXT: fmulp %st, %st(1)
; SSE2-SCHEDULE-NEXT: fstpl -48(%rbp)
; SSE2-SCHEDULE-NEXT: popq %rbp
; SSE2-SCHEDULE-NEXT: retq
;
; SSE3-LABEL: _Z1fe:
; SSE3: ## %bb.0: ## %entry
; SSE3-NEXT: pushq %rbp
; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: .cfi_offset %rbp, -16
; SSE3-NEXT: movq %rsp, %rbp
; SSE3-NEXT: .cfi_def_cfa_register %rbp
; SSE3-NEXT: fldt 16(%rbp)
; SSE3-NEXT: fld %st(0)
; SSE3-NEXT: fisttpl -4(%rbp)
; SSE3-NEXT: cvtsi2sdl -4(%rbp), %xmm0
; SSE3-NEXT: movsd %xmm0, -48(%rbp)
; SSE3-NEXT: movsd %xmm0, -24(%rbp)
; SSE3-NEXT: fsubl -24(%rbp)
; SSE3-NEXT: flds {{.*}}(%rip)
-; SSE3-NEXT: fmul %st(0), %st(1)
+; SSE3-NEXT: fmul %st, %st(1)
; SSE3-NEXT: fld %st(1)
; SSE3-NEXT: fisttpl -8(%rbp)
; SSE3-NEXT: xorps %xmm0, %xmm0
; SSE3-NEXT: cvtsi2sdl -8(%rbp), %xmm0
; SSE3-NEXT: movsd %xmm0, -40(%rbp)
; SSE3-NEXT: movsd %xmm0, -16(%rbp)
; SSE3-NEXT: fxch %st(1)
; SSE3-NEXT: fsubl -16(%rbp)
-; SSE3-NEXT: fmulp %st(1)
+; SSE3-NEXT: fmulp %st, %st(1)
; SSE3-NEXT: fstpl -32(%rbp)
; SSE3-NEXT: popq %rbp
; SSE3-NEXT: retq
;
; AVX-LABEL: _Z1fe:
; AVX: ## %bb.0: ## %entry
; AVX-NEXT: pushq %rbp
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: .cfi_offset %rbp, -16
; AVX-NEXT: movq %rsp, %rbp
; AVX-NEXT: .cfi_def_cfa_register %rbp
; AVX-NEXT: fldt 16(%rbp)
; AVX-NEXT: fld %st(0)
; AVX-NEXT: fisttpl -4(%rbp)
; AVX-NEXT: vcvtsi2sdl -4(%rbp), %xmm0, %xmm0
; AVX-NEXT: vmovsd %xmm0, -48(%rbp)
; AVX-NEXT: vmovsd %xmm0, -24(%rbp)
; AVX-NEXT: fsubl -24(%rbp)
; AVX-NEXT: flds {{.*}}(%rip)
-; AVX-NEXT: fmul %st(0), %st(1)
+; AVX-NEXT: fmul %st, %st(1)
; AVX-NEXT: fld %st(1)
; AVX-NEXT: fisttpl -8(%rbp)
; AVX-NEXT: vcvtsi2sdl -8(%rbp), %xmm1, %xmm0
; AVX-NEXT: vmovsd %xmm0, -40(%rbp)
; AVX-NEXT: vmovsd %xmm0, -16(%rbp)
; AVX-NEXT: fxch %st(1)
; AVX-NEXT: fsubl -16(%rbp)
-; AVX-NEXT: fmulp %st(1)
+; AVX-NEXT: fmulp %st, %st(1)
; AVX-NEXT: fstpl -32(%rbp)
; AVX-NEXT: popq %rbp
; AVX-NEXT: retq
entry:
%tx = alloca [3 x double], align 16
%0 = bitcast [3 x double]* %tx to i8*
%conv = fptosi x86_fp80 %z to i32
%conv1 = sitofp i32 %conv to double
%arrayidx = getelementptr inbounds [3 x double], [3 x double]* %tx, i64 0, i64 0
store double %conv1, double* %arrayidx, align 16
%conv4 = fpext double %conv1 to x86_fp80
%sub = fsub x86_fp80 %z, %conv4
%mul = fmul x86_fp80 %sub, 0xK40178000000000000000
%conv.1 = fptosi x86_fp80 %mul to i32
%conv1.1 = sitofp i32 %conv.1 to double
%arrayidx.1 = getelementptr inbounds [3 x double], [3 x double]* %tx, i64 0, i64 1
store double %conv1.1, double* %arrayidx.1, align 8
%conv4.1 = fpext double %conv1.1 to x86_fp80
%sub.1 = fsub x86_fp80 %mul, %conv4.1
%mul.1 = fmul x86_fp80 %sub.1, 0xK40178000000000000000
%conv5 = fptrunc x86_fp80 %mul.1 to double
%arrayidx6 = getelementptr inbounds [3 x double], [3 x double]* %tx, i64 0, i64 2
store double %conv5, double* %arrayidx6, align 16
ret void
}
attributes #0 = { noinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/pr34177.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/pr34177.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/pr34177.ll (revision 344166)
@@ -1,59 +1,59 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define void @test(<4x i64> %a, <4 x x86_fp80> %b, <8 x x86_fp80>* %c) local_unnamed_addr {
; CHECK-LABEL: test:
; CHECK: # %bb.0:
; CHECK-NEXT: vpextrq $1, %xmm0, %r8
; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3]
; CHECK-NEXT: vmovq %xmm1, %r9
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vmovq %xmm2, %rdx
; CHECK-NEXT: vpextrq $1, %xmm1, %rsi
; CHECK-NEXT: vpextrq $1, %xmm2, %rax
; CHECK-NEXT: vmovq %xmm0, %rcx
; CHECK-NEXT: negq %rcx
; CHECK-NEXT: fld1
; CHECK-NEXT: fldz
; CHECK-NEXT: fld %st(0)
-; CHECK-NEXT: fcmove %st(2), %st(0)
+; CHECK-NEXT: fcmove %st(2), %st
; CHECK-NEXT: cmpq %rax, %rsi
; CHECK-NEXT: fld %st(1)
-; CHECK-NEXT: fcmove %st(3), %st(0)
+; CHECK-NEXT: fcmove %st(3), %st
; CHECK-NEXT: cmpq %rdx, %r9
; CHECK-NEXT: fld %st(2)
-; CHECK-NEXT: fcmove %st(4), %st(0)
+; CHECK-NEXT: fcmove %st(4), %st
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmpq %r8, %rax
; CHECK-NEXT: fxch %st(3)
-; CHECK-NEXT: fcmove %st(4), %st(0)
+; CHECK-NEXT: fcmove %st(4), %st
; CHECK-NEXT: fstp %st(4)
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt 70(%rdi)
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt 50(%rdi)
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt 30(%rdi)
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt 10(%rdi)
-; CHECK-NEXT: fadd %st(0), %st(0)
+; CHECK-NEXT: fadd %st, %st(0)
; CHECK-NEXT: fstpt 60(%rdi)
; CHECK-NEXT: fxch %st(1)
-; CHECK-NEXT: fadd %st(0), %st(0)
+; CHECK-NEXT: fadd %st, %st(0)
; CHECK-NEXT: fstpt 40(%rdi)
; CHECK-NEXT: fxch %st(1)
-; CHECK-NEXT: fadd %st(0), %st(0)
+; CHECK-NEXT: fadd %st, %st(0)
; CHECK-NEXT: fstpt 20(%rdi)
-; CHECK-NEXT: fadd %st(0), %st(0)
+; CHECK-NEXT: fadd %st, %st(0)
; CHECK-NEXT: fstpt (%rdi)
%1 = icmp eq <4 x i64> <i64 0, i64 1, i64 2, i64 3>, %a
%2 = select <4 x i1> %1, <4 x x86_fp80> <x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000>, <4 x x86_fp80> zeroinitializer
%3 = fadd <4 x x86_fp80> %2, %2
%4 = shufflevector <4 x x86_fp80> %3, <4 x x86_fp80> %b, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x x86_fp80> %4, <8 x x86_fp80>* %c, align 16
unreachable
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/pr40529.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/pr40529.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/pr40529.ll (revision 344166)
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux -mcpu=x86-64 | FileCheck %s
+
+define x86_fp80 @rem_pio2l_min(x86_fp80 %z) {
+; CHECK-LABEL: rem_pio2l_min:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fnstcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
+; CHECK-NEXT: fldcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fistl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fldcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fisubl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: flds {{.*}}(%rip)
+; CHECK-NEXT: fnstcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fmul %st, %st(1)
+; CHECK-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
+; CHECK-NEXT: fldcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fxch %st(1)
+; CHECK-NEXT: fistl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fldcw -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fisubl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fmulp %st, %st(1)
+; CHECK-NEXT: retq
+entry:
+ %conv = fptosi x86_fp80 %z to i32
+ %conv1 = sitofp i32 %conv to x86_fp80
+ %sub = fsub x86_fp80 %z, %conv1
+ %mul = fmul x86_fp80 %sub, 0xK40178000000000000000
+ %conv2 = fptosi x86_fp80 %mul to i32
+ %conv3 = sitofp i32 %conv2 to x86_fp80
+ %sub4 = fsub x86_fp80 %mul, %conv3
+ %mul5 = fmul x86_fp80 %sub4, 0xK40178000000000000000
+ ret x86_fp80 %mul5
+}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/scalar-fp-to-i64.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/scalar-fp-to-i64.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/scalar-fp-to-i64.ll (revision 344166)
@@ -1,1797 +1,1797 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_WIN,AVX512DQVL_32_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_LIN,AVX512DQVL_32_LIN
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX512_64,AVX512_64_WIN,AVX512DQVL_64_WIN
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X32,AVX512_64,AVX512_64_LIN,AVX512DQVL_64_LIN
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_WIN,AVX512DQ_32_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_LIN,AVX512DQ_32_LIN
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X64,AVX512_64,AVX512_64_WIN,AVX512DQ_64_WIN
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X32,AVX512_64,AVX512_64_LIN,AVX512DQ_64_LIN
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_WIN,AVX512F_32_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X32,AVX512_32,AVX512_32_LIN,AVX512F_32_LIN
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512_64,AVX512_64_WIN,AVX512F_64_WIN
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X32,AVX512_64,AVX512_64_LIN,AVX512F_64_LIN
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X32,SSE3_32,SSE3_32_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X32,SSE3_32,SSE3_32_LIN
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X64,SSE3_64,SSE3_64_WIN
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X64,SSE3_64,SSE3_64_LIN
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X32,SSE2_32,SSE2_32_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X32,SSE2_32,SSE2_32_LIN
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE2_64,SSE2_64_WIN
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE2_64,SSE2_64_LIN
; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=-sse | FileCheck %s --check-prefixes=CHECK,X32,X87,X87_WIN
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=-sse | FileCheck %s --check-prefixes=CHECK,X32,X87,X87_LIN
; Check that scalar FP conversions to signed and unsigned int64 are using
; reasonable sequences, across platforms and target switches.
;
; The signed case is straight forward, and the tests here basically
; ensure successful compilation (f80 with avx512 was broken at one point).
;
; For the unsigned case there are many possible sequences, so to avoid
; a fragile test we just check for the presence of a few key instructions.
; AVX512 on Intel64 can use vcvtts[ds]2usi directly for float and double.
; Otherwise the sequence will involve an FP subtract (fsub, subss or subsd),
; and a truncating conversion (cvtts[ds]2si, fisttp, or fnstcw+fist). When
; both a subtract and fnstcw are needed, they can occur in either order.
;
; The interesting subtargets are AVX512F (vcvtts[ds]2usi), SSE3 (fisttp),
; SSE2 (cvtts[ds]2si) and vanilla X87 (fnstcw+fist, 32-bit only).
define i64 @f_to_u64(float %a) nounwind {
; AVX512DQVL_32_WIN-LABEL: f_to_u64:
; AVX512DQVL_32_WIN: # %bb.0:
; AVX512DQVL_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQVL_32_WIN-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512DQVL_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_WIN-NEXT: vzeroupper
; AVX512DQVL_32_WIN-NEXT: retl
;
; AVX512DQVL_32_LIN-LABEL: f_to_u64:
; AVX512DQVL_32_LIN: # %bb.0:
; AVX512DQVL_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQVL_32_LIN-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512DQVL_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_LIN-NEXT: vzeroupper
; AVX512DQVL_32_LIN-NEXT: retl
;
; AVX512_64-LABEL: f_to_u64:
; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvttss2usi %xmm0, %rax
; AVX512_64-NEXT: retq
;
; AVX512DQ_32_WIN-LABEL: f_to_u64:
; AVX512DQ_32_WIN: # %bb.0:
; AVX512DQ_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQ_32_WIN-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_WIN-NEXT: vzeroupper
; AVX512DQ_32_WIN-NEXT: retl
;
; AVX512DQ_32_LIN-LABEL: f_to_u64:
; AVX512DQ_32_LIN: # %bb.0:
; AVX512DQ_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQ_32_LIN-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_LIN-NEXT: vzeroupper
; AVX512DQ_32_LIN-NEXT: retl
;
; AVX512F_32_WIN-LABEL: f_to_u64:
; AVX512F_32_WIN: # %bb.0:
; AVX512F_32_WIN-NEXT: pushl %ebp
; AVX512F_32_WIN-NEXT: movl %esp, %ebp
; AVX512F_32_WIN-NEXT: andl $-8, %esp
; AVX512F_32_WIN-NEXT: subl $16, %esp
; AVX512F_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32_WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX512F_32_WIN-NEXT: vcmpltss %xmm1, %xmm0, %k1
; AVX512F_32_WIN-NEXT: vsubss %xmm1, %xmm0, %xmm2
; AVX512F_32_WIN-NEXT: vmovss %xmm0, %xmm0, %xmm2 {%k1}
; AVX512F_32_WIN-NEXT: vmovss %xmm2, {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fisttpll (%esp)
; AVX512F_32_WIN-NEXT: xorl %edx, %edx
; AVX512F_32_WIN-NEXT: vucomiss %xmm0, %xmm1
; AVX512F_32_WIN-NEXT: setbe %dl
; AVX512F_32_WIN-NEXT: shll $31, %edx
; AVX512F_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512F_32_WIN-NEXT: movl (%esp), %eax
; AVX512F_32_WIN-NEXT: movl %ebp, %esp
; AVX512F_32_WIN-NEXT: popl %ebp
; AVX512F_32_WIN-NEXT: retl
;
; AVX512F_32_LIN-LABEL: f_to_u64:
; AVX512F_32_LIN: # %bb.0:
; AVX512F_32_LIN-NEXT: subl $20, %esp
; AVX512F_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32_LIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX512F_32_LIN-NEXT: vcmpltss %xmm1, %xmm0, %k1
; AVX512F_32_LIN-NEXT: vsubss %xmm1, %xmm0, %xmm2
; AVX512F_32_LIN-NEXT: vmovss %xmm0, %xmm0, %xmm2 {%k1}
; AVX512F_32_LIN-NEXT: vmovss %xmm2, {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fisttpll (%esp)
; AVX512F_32_LIN-NEXT: xorl %edx, %edx
; AVX512F_32_LIN-NEXT: vucomiss %xmm0, %xmm1
; AVX512F_32_LIN-NEXT: setbe %dl
; AVX512F_32_LIN-NEXT: shll $31, %edx
; AVX512F_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512F_32_LIN-NEXT: movl (%esp), %eax
; AVX512F_32_LIN-NEXT: addl $20, %esp
; AVX512F_32_LIN-NEXT: retl
;
; SSE3_32_WIN-LABEL: f_to_u64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $16, %esp
; SSE3_32_WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE3_32_WIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE3_32_WIN-NEXT: movaps %xmm0, %xmm2
; SSE3_32_WIN-NEXT: cmpltss %xmm1, %xmm2
; SSE3_32_WIN-NEXT: movaps %xmm2, %xmm3
; SSE3_32_WIN-NEXT: andps %xmm0, %xmm2
; SSE3_32_WIN-NEXT: xorl %edx, %edx
; SSE3_32_WIN-NEXT: ucomiss %xmm0, %xmm1
; SSE3_32_WIN-NEXT: subss %xmm1, %xmm0
; SSE3_32_WIN-NEXT: andnps %xmm0, %xmm3
; SSE3_32_WIN-NEXT: orps %xmm3, %xmm2
; SSE3_32_WIN-NEXT: movss %xmm2, {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: setbe %dl
; SSE3_32_WIN-NEXT: shll $31, %edx
; SSE3_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: f_to_u64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $20, %esp
; SSE3_32_LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE3_32_LIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE3_32_LIN-NEXT: movaps %xmm0, %xmm2
; SSE3_32_LIN-NEXT: cmpltss %xmm1, %xmm2
; SSE3_32_LIN-NEXT: movaps %xmm2, %xmm3
; SSE3_32_LIN-NEXT: andps %xmm0, %xmm2
; SSE3_32_LIN-NEXT: xorl %edx, %edx
; SSE3_32_LIN-NEXT: ucomiss %xmm0, %xmm1
; SSE3_32_LIN-NEXT: subss %xmm1, %xmm0
; SSE3_32_LIN-NEXT: andnps %xmm0, %xmm3
; SSE3_32_LIN-NEXT: orps %xmm3, %xmm2
; SSE3_32_LIN-NEXT: movss %xmm2, {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: setbe %dl
; SSE3_32_LIN-NEXT: shll $31, %edx
; SSE3_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: addl $20, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64-LABEL: f_to_u64:
; SSE3_64: # %bb.0:
; SSE3_64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE3_64-NEXT: movaps %xmm0, %xmm2
; SSE3_64-NEXT: subss %xmm1, %xmm2
; SSE3_64-NEXT: cvttss2si %xmm2, %rax
; SSE3_64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE3_64-NEXT: xorq %rax, %rcx
; SSE3_64-NEXT: cvttss2si %xmm0, %rax
; SSE3_64-NEXT: ucomiss %xmm1, %xmm0
; SSE3_64-NEXT: cmovaeq %rcx, %rax
; SSE3_64-NEXT: retq
;
; SSE2_32_WIN-LABEL: f_to_u64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $24, %esp
; SSE2_32_WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32_WIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2_32_WIN-NEXT: movaps %xmm0, %xmm2
; SSE2_32_WIN-NEXT: cmpltss %xmm1, %xmm2
; SSE2_32_WIN-NEXT: movaps %xmm2, %xmm3
; SSE2_32_WIN-NEXT: andps %xmm0, %xmm2
; SSE2_32_WIN-NEXT: xorl %edx, %edx
; SSE2_32_WIN-NEXT: ucomiss %xmm0, %xmm1
; SSE2_32_WIN-NEXT: subss %xmm1, %xmm0
; SSE2_32_WIN-NEXT: andnps %xmm0, %xmm3
; SSE2_32_WIN-NEXT: orps %xmm3, %xmm2
; SSE2_32_WIN-NEXT: movss %xmm2, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: setbe %dl
; SSE2_32_WIN-NEXT: shll $31, %edx
; SSE2_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: f_to_u64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $28, %esp
; SSE2_32_LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32_LIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2_32_LIN-NEXT: movaps %xmm0, %xmm2
; SSE2_32_LIN-NEXT: cmpltss %xmm1, %xmm2
; SSE2_32_LIN-NEXT: movaps %xmm2, %xmm3
; SSE2_32_LIN-NEXT: andps %xmm0, %xmm2
; SSE2_32_LIN-NEXT: xorl %edx, %edx
; SSE2_32_LIN-NEXT: ucomiss %xmm0, %xmm1
; SSE2_32_LIN-NEXT: subss %xmm1, %xmm0
; SSE2_32_LIN-NEXT: andnps %xmm0, %xmm3
; SSE2_32_LIN-NEXT: orps %xmm3, %xmm2
; SSE2_32_LIN-NEXT: movss %xmm2, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: setbe %dl
; SSE2_32_LIN-NEXT: shll $31, %edx
; SSE2_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64-LABEL: f_to_u64:
; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2_64-NEXT: movaps %xmm0, %xmm2
; SSE2_64-NEXT: subss %xmm1, %xmm2
; SSE2_64-NEXT: cvttss2si %xmm2, %rax
; SSE2_64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE2_64-NEXT: xorq %rax, %rcx
; SSE2_64-NEXT: cvttss2si %xmm0, %rax
; SSE2_64-NEXT: ucomiss %xmm1, %xmm0
; SSE2_64-NEXT: cmovaeq %rcx, %rax
; SSE2_64-NEXT: retq
;
; X87_WIN-LABEL: f_to_u64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: flds 8(%ebp)
; X87_WIN-NEXT: flds __real@5f000000
; X87_WIN-NEXT: fld %st(1)
; X87_WIN-NEXT: fsub %st(1)
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB0_2
; X87_WIN-NEXT: # %bb.1:
; X87_WIN-NEXT: fstp %st(1)
; X87_WIN-NEXT: fldz
; X87_WIN-NEXT: LBB0_2:
; X87_WIN-NEXT: fstp %st(0)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: setbe %al
; X87_WIN-NEXT: movzbl %al, %edx
; X87_WIN-NEXT: shll $31, %edx
; X87_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: f_to_u64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: flds {{[0-9]+}}(%esp)
; X87_LIN-NEXT: flds {{\.LCPI.*}}
; X87_LIN-NEXT: fld %st(1)
; X87_LIN-NEXT: fsub %st(1)
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB0_2
; X87_LIN-NEXT: # %bb.1:
; X87_LIN-NEXT: fstp %st(1)
; X87_LIN-NEXT: fldz
; X87_LIN-NEXT: .LBB0_2:
; X87_LIN-NEXT: fstp %st(0)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: setbe %al
; X87_LIN-NEXT: movzbl %al, %edx
; X87_LIN-NEXT: shll $31, %edx
; X87_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptoui float %a to i64
ret i64 %r
}
define i64 @f_to_s64(float %a) nounwind {
; AVX512DQVL_32_WIN-LABEL: f_to_s64:
; AVX512DQVL_32_WIN: # %bb.0:
; AVX512DQVL_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQVL_32_WIN-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512DQVL_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_WIN-NEXT: vzeroupper
; AVX512DQVL_32_WIN-NEXT: retl
;
; AVX512DQVL_32_LIN-LABEL: f_to_s64:
; AVX512DQVL_32_LIN: # %bb.0:
; AVX512DQVL_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQVL_32_LIN-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512DQVL_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_LIN-NEXT: vzeroupper
; AVX512DQVL_32_LIN-NEXT: retl
;
; AVX512_64-LABEL: f_to_s64:
; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvttss2si %xmm0, %rax
; AVX512_64-NEXT: retq
;
; AVX512DQ_32_WIN-LABEL: f_to_s64:
; AVX512DQ_32_WIN: # %bb.0:
; AVX512DQ_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQ_32_WIN-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_WIN-NEXT: vzeroupper
; AVX512DQ_32_WIN-NEXT: retl
;
; AVX512DQ_32_LIN-LABEL: f_to_s64:
; AVX512DQ_32_LIN: # %bb.0:
; AVX512DQ_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512DQ_32_LIN-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_LIN-NEXT: vzeroupper
; AVX512DQ_32_LIN-NEXT: retl
;
; AVX512F_32_WIN-LABEL: f_to_s64:
; AVX512F_32_WIN: # %bb.0:
; AVX512F_32_WIN-NEXT: pushl %ebp
; AVX512F_32_WIN-NEXT: movl %esp, %ebp
; AVX512F_32_WIN-NEXT: andl $-8, %esp
; AVX512F_32_WIN-NEXT: subl $16, %esp
; AVX512F_32_WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32_WIN-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fisttpll (%esp)
; AVX512F_32_WIN-NEXT: movl (%esp), %eax
; AVX512F_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F_32_WIN-NEXT: movl %ebp, %esp
; AVX512F_32_WIN-NEXT: popl %ebp
; AVX512F_32_WIN-NEXT: retl
;
; AVX512F_32_LIN-LABEL: f_to_s64:
; AVX512F_32_LIN: # %bb.0:
; AVX512F_32_LIN-NEXT: subl $20, %esp
; AVX512F_32_LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32_LIN-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fisttpll (%esp)
; AVX512F_32_LIN-NEXT: movl (%esp), %eax
; AVX512F_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F_32_LIN-NEXT: addl $20, %esp
; AVX512F_32_LIN-NEXT: retl
;
; SSE3_32_WIN-LABEL: f_to_s64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $16, %esp
; SSE3_32_WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE3_32_WIN-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: f_to_s64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $20, %esp
; SSE3_32_LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE3_32_LIN-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: addl $20, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64-LABEL: f_to_s64:
; SSE3_64: # %bb.0:
; SSE3_64-NEXT: cvttss2si %xmm0, %rax
; SSE3_64-NEXT: retq
;
; SSE2_32_WIN-LABEL: f_to_s64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $24, %esp
; SSE2_32_WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32_WIN-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: f_to_s64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $28, %esp
; SSE2_32_LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32_LIN-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: flds {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64-LABEL: f_to_s64:
; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvttss2si %xmm0, %rax
; SSE2_64-NEXT: retq
;
; X87_WIN-LABEL: f_to_s64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: flds 8(%ebp)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: f_to_s64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: flds {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptosi float %a to i64
ret i64 %r
}
define i64 @d_to_u64(double %a) nounwind {
; AVX512DQVL_32_WIN-LABEL: d_to_u64:
; AVX512DQVL_32_WIN: # %bb.0:
; AVX512DQVL_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQVL_32_WIN-NEXT: vcvttpd2uqq %ymm0, %ymm0
; AVX512DQVL_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_WIN-NEXT: vzeroupper
; AVX512DQVL_32_WIN-NEXT: retl
;
; AVX512DQVL_32_LIN-LABEL: d_to_u64:
; AVX512DQVL_32_LIN: # %bb.0:
; AVX512DQVL_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQVL_32_LIN-NEXT: vcvttpd2uqq %ymm0, %ymm0
; AVX512DQVL_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_LIN-NEXT: vzeroupper
; AVX512DQVL_32_LIN-NEXT: retl
;
; AVX512_64-LABEL: d_to_u64:
; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512_64-NEXT: retq
;
; AVX512DQ_32_WIN-LABEL: d_to_u64:
; AVX512DQ_32_WIN: # %bb.0:
; AVX512DQ_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ_32_WIN-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_WIN-NEXT: vzeroupper
; AVX512DQ_32_WIN-NEXT: retl
;
; AVX512DQ_32_LIN-LABEL: d_to_u64:
; AVX512DQ_32_LIN: # %bb.0:
; AVX512DQ_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ_32_LIN-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_LIN-NEXT: vzeroupper
; AVX512DQ_32_LIN-NEXT: retl
;
; AVX512F_32_WIN-LABEL: d_to_u64:
; AVX512F_32_WIN: # %bb.0:
; AVX512F_32_WIN-NEXT: pushl %ebp
; AVX512F_32_WIN-NEXT: movl %esp, %ebp
; AVX512F_32_WIN-NEXT: andl $-8, %esp
; AVX512F_32_WIN-NEXT: subl $16, %esp
; AVX512F_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32_WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512F_32_WIN-NEXT: vcmpltsd %xmm1, %xmm0, %k1
; AVX512F_32_WIN-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; AVX512F_32_WIN-NEXT: vmovsd %xmm0, %xmm0, %xmm2 {%k1}
; AVX512F_32_WIN-NEXT: vmovsd %xmm2, {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fisttpll (%esp)
; AVX512F_32_WIN-NEXT: xorl %edx, %edx
; AVX512F_32_WIN-NEXT: vucomisd %xmm0, %xmm1
; AVX512F_32_WIN-NEXT: setbe %dl
; AVX512F_32_WIN-NEXT: shll $31, %edx
; AVX512F_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512F_32_WIN-NEXT: movl (%esp), %eax
; AVX512F_32_WIN-NEXT: movl %ebp, %esp
; AVX512F_32_WIN-NEXT: popl %ebp
; AVX512F_32_WIN-NEXT: retl
;
; AVX512F_32_LIN-LABEL: d_to_u64:
; AVX512F_32_LIN: # %bb.0:
; AVX512F_32_LIN-NEXT: subl $20, %esp
; AVX512F_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32_LIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512F_32_LIN-NEXT: vcmpltsd %xmm1, %xmm0, %k1
; AVX512F_32_LIN-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; AVX512F_32_LIN-NEXT: vmovsd %xmm0, %xmm0, %xmm2 {%k1}
; AVX512F_32_LIN-NEXT: vmovsd %xmm2, {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fisttpll (%esp)
; AVX512F_32_LIN-NEXT: xorl %edx, %edx
; AVX512F_32_LIN-NEXT: vucomisd %xmm0, %xmm1
; AVX512F_32_LIN-NEXT: setbe %dl
; AVX512F_32_LIN-NEXT: shll $31, %edx
; AVX512F_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512F_32_LIN-NEXT: movl (%esp), %eax
; AVX512F_32_LIN-NEXT: addl $20, %esp
; AVX512F_32_LIN-NEXT: retl
;
; SSE3_32_WIN-LABEL: d_to_u64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $16, %esp
; SSE3_32_WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3_32_WIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3_32_WIN-NEXT: movapd %xmm0, %xmm2
; SSE3_32_WIN-NEXT: cmpltsd %xmm1, %xmm2
; SSE3_32_WIN-NEXT: movapd %xmm2, %xmm3
; SSE3_32_WIN-NEXT: andpd %xmm0, %xmm2
; SSE3_32_WIN-NEXT: xorl %edx, %edx
; SSE3_32_WIN-NEXT: ucomisd %xmm0, %xmm1
; SSE3_32_WIN-NEXT: subsd %xmm1, %xmm0
; SSE3_32_WIN-NEXT: andnpd %xmm0, %xmm3
; SSE3_32_WIN-NEXT: orpd %xmm3, %xmm2
; SSE3_32_WIN-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: setbe %dl
; SSE3_32_WIN-NEXT: shll $31, %edx
; SSE3_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: d_to_u64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $20, %esp
; SSE3_32_LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3_32_LIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3_32_LIN-NEXT: movapd %xmm0, %xmm2
; SSE3_32_LIN-NEXT: cmpltsd %xmm1, %xmm2
; SSE3_32_LIN-NEXT: movapd %xmm2, %xmm3
; SSE3_32_LIN-NEXT: andpd %xmm0, %xmm2
; SSE3_32_LIN-NEXT: xorl %edx, %edx
; SSE3_32_LIN-NEXT: ucomisd %xmm0, %xmm1
; SSE3_32_LIN-NEXT: subsd %xmm1, %xmm0
; SSE3_32_LIN-NEXT: andnpd %xmm0, %xmm3
; SSE3_32_LIN-NEXT: orpd %xmm3, %xmm2
; SSE3_32_LIN-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: setbe %dl
; SSE3_32_LIN-NEXT: shll $31, %edx
; SSE3_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: addl $20, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64-LABEL: d_to_u64:
; SSE3_64: # %bb.0:
; SSE3_64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3_64-NEXT: movapd %xmm0, %xmm2
; SSE3_64-NEXT: subsd %xmm1, %xmm2
; SSE3_64-NEXT: cvttsd2si %xmm2, %rax
; SSE3_64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE3_64-NEXT: xorq %rax, %rcx
; SSE3_64-NEXT: cvttsd2si %xmm0, %rax
; SSE3_64-NEXT: ucomisd %xmm1, %xmm0
; SSE3_64-NEXT: cmovaeq %rcx, %rax
; SSE3_64-NEXT: retq
;
; SSE2_32_WIN-LABEL: d_to_u64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $24, %esp
; SSE2_32_WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32_WIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE2_32_WIN-NEXT: movapd %xmm0, %xmm2
; SSE2_32_WIN-NEXT: cmpltsd %xmm1, %xmm2
; SSE2_32_WIN-NEXT: movapd %xmm2, %xmm3
; SSE2_32_WIN-NEXT: andpd %xmm0, %xmm2
; SSE2_32_WIN-NEXT: xorl %edx, %edx
; SSE2_32_WIN-NEXT: ucomisd %xmm0, %xmm1
; SSE2_32_WIN-NEXT: subsd %xmm1, %xmm0
; SSE2_32_WIN-NEXT: andnpd %xmm0, %xmm3
; SSE2_32_WIN-NEXT: orpd %xmm3, %xmm2
; SSE2_32_WIN-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: setbe %dl
; SSE2_32_WIN-NEXT: shll $31, %edx
; SSE2_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: d_to_u64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $28, %esp
; SSE2_32_LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32_LIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE2_32_LIN-NEXT: movapd %xmm0, %xmm2
; SSE2_32_LIN-NEXT: cmpltsd %xmm1, %xmm2
; SSE2_32_LIN-NEXT: movapd %xmm2, %xmm3
; SSE2_32_LIN-NEXT: andpd %xmm0, %xmm2
; SSE2_32_LIN-NEXT: xorl %edx, %edx
; SSE2_32_LIN-NEXT: ucomisd %xmm0, %xmm1
; SSE2_32_LIN-NEXT: subsd %xmm1, %xmm0
; SSE2_32_LIN-NEXT: andnpd %xmm0, %xmm3
; SSE2_32_LIN-NEXT: orpd %xmm3, %xmm2
; SSE2_32_LIN-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: setbe %dl
; SSE2_32_LIN-NEXT: shll $31, %edx
; SSE2_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64-LABEL: d_to_u64:
; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE2_64-NEXT: movapd %xmm0, %xmm2
; SSE2_64-NEXT: subsd %xmm1, %xmm2
; SSE2_64-NEXT: cvttsd2si %xmm2, %rax
; SSE2_64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE2_64-NEXT: xorq %rax, %rcx
; SSE2_64-NEXT: cvttsd2si %xmm0, %rax
; SSE2_64-NEXT: ucomisd %xmm1, %xmm0
; SSE2_64-NEXT: cmovaeq %rcx, %rax
; SSE2_64-NEXT: retq
;
; X87_WIN-LABEL: d_to_u64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: fldl 8(%ebp)
; X87_WIN-NEXT: flds __real@5f000000
; X87_WIN-NEXT: fld %st(1)
; X87_WIN-NEXT: fsub %st(1)
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB2_2
; X87_WIN-NEXT: # %bb.1:
; X87_WIN-NEXT: fstp %st(1)
; X87_WIN-NEXT: fldz
; X87_WIN-NEXT: LBB2_2:
; X87_WIN-NEXT: fstp %st(0)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: setbe %al
; X87_WIN-NEXT: movzbl %al, %edx
; X87_WIN-NEXT: shll $31, %edx
; X87_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: d_to_u64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: flds {{\.LCPI.*}}
; X87_LIN-NEXT: fld %st(1)
; X87_LIN-NEXT: fsub %st(1)
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB2_2
; X87_LIN-NEXT: # %bb.1:
; X87_LIN-NEXT: fstp %st(1)
; X87_LIN-NEXT: fldz
; X87_LIN-NEXT: .LBB2_2:
; X87_LIN-NEXT: fstp %st(0)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: setbe %al
; X87_LIN-NEXT: movzbl %al, %edx
; X87_LIN-NEXT: shll $31, %edx
; X87_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptoui double %a to i64
ret i64 %r
}
define i64 @d_to_s64(double %a) nounwind {
; AVX512DQVL_32_WIN-LABEL: d_to_s64:
; AVX512DQVL_32_WIN: # %bb.0:
; AVX512DQVL_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQVL_32_WIN-NEXT: vcvttpd2qq %ymm0, %ymm0
; AVX512DQVL_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_WIN-NEXT: vzeroupper
; AVX512DQVL_32_WIN-NEXT: retl
;
; AVX512DQVL_32_LIN-LABEL: d_to_s64:
; AVX512DQVL_32_LIN: # %bb.0:
; AVX512DQVL_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQVL_32_LIN-NEXT: vcvttpd2qq %ymm0, %ymm0
; AVX512DQVL_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQVL_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQVL_32_LIN-NEXT: vzeroupper
; AVX512DQVL_32_LIN-NEXT: retl
;
; AVX512_64-LABEL: d_to_s64:
; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvttsd2si %xmm0, %rax
; AVX512_64-NEXT: retq
;
; AVX512DQ_32_WIN-LABEL: d_to_s64:
; AVX512DQ_32_WIN: # %bb.0:
; AVX512DQ_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ_32_WIN-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ_32_WIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_WIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_WIN-NEXT: vzeroupper
; AVX512DQ_32_WIN-NEXT: retl
;
; AVX512DQ_32_LIN-LABEL: d_to_s64:
; AVX512DQ_32_LIN: # %bb.0:
; AVX512DQ_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ_32_LIN-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ_32_LIN-NEXT: vmovd %xmm0, %eax
; AVX512DQ_32_LIN-NEXT: vpextrd $1, %xmm0, %edx
; AVX512DQ_32_LIN-NEXT: vzeroupper
; AVX512DQ_32_LIN-NEXT: retl
;
; AVX512F_32_WIN-LABEL: d_to_s64:
; AVX512F_32_WIN: # %bb.0:
; AVX512F_32_WIN-NEXT: pushl %ebp
; AVX512F_32_WIN-NEXT: movl %esp, %ebp
; AVX512F_32_WIN-NEXT: andl $-8, %esp
; AVX512F_32_WIN-NEXT: subl $16, %esp
; AVX512F_32_WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32_WIN-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; AVX512F_32_WIN-NEXT: fisttpll (%esp)
; AVX512F_32_WIN-NEXT: movl (%esp), %eax
; AVX512F_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F_32_WIN-NEXT: movl %ebp, %esp
; AVX512F_32_WIN-NEXT: popl %ebp
; AVX512F_32_WIN-NEXT: retl
;
; AVX512F_32_LIN-LABEL: d_to_s64:
; AVX512F_32_LIN: # %bb.0:
; AVX512F_32_LIN-NEXT: subl $20, %esp
; AVX512F_32_LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32_LIN-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; AVX512F_32_LIN-NEXT: fisttpll (%esp)
; AVX512F_32_LIN-NEXT: movl (%esp), %eax
; AVX512F_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F_32_LIN-NEXT: addl $20, %esp
; AVX512F_32_LIN-NEXT: retl
;
; SSE3_32_WIN-LABEL: d_to_s64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $16, %esp
; SSE3_32_WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3_32_WIN-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: d_to_s64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $20, %esp
; SSE3_32_LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3_32_LIN-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: addl $20, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64-LABEL: d_to_s64:
; SSE3_64: # %bb.0:
; SSE3_64-NEXT: cvttsd2si %xmm0, %rax
; SSE3_64-NEXT: retq
;
; SSE2_32_WIN-LABEL: d_to_s64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $24, %esp
; SSE2_32_WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32_WIN-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: d_to_s64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $28, %esp
; SSE2_32_LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32_LIN-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64-LABEL: d_to_s64:
; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvttsd2si %xmm0, %rax
; SSE2_64-NEXT: retq
;
; X87_WIN-LABEL: d_to_s64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: fldl 8(%ebp)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: d_to_s64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: fldl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptosi double %a to i64
ret i64 %r
}
define i64 @x_to_u64(x86_fp80 %a) nounwind {
; AVX512_32_WIN-LABEL: x_to_u64:
; AVX512_32_WIN: # %bb.0:
; AVX512_32_WIN-NEXT: pushl %ebp
; AVX512_32_WIN-NEXT: movl %esp, %ebp
; AVX512_32_WIN-NEXT: andl $-8, %esp
; AVX512_32_WIN-NEXT: subl $8, %esp
; AVX512_32_WIN-NEXT: fldt 8(%ebp)
; AVX512_32_WIN-NEXT: flds __real@5f000000
; AVX512_32_WIN-NEXT: fld %st(1)
; AVX512_32_WIN-NEXT: fsub %st(1)
; AVX512_32_WIN-NEXT: xorl %edx, %edx
; AVX512_32_WIN-NEXT: fxch %st(1)
; AVX512_32_WIN-NEXT: fucompi %st(2)
-; AVX512_32_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; AVX512_32_WIN-NEXT: fcmovnbe %st(1), %st
; AVX512_32_WIN-NEXT: fstp %st(1)
; AVX512_32_WIN-NEXT: fisttpll (%esp)
; AVX512_32_WIN-NEXT: setbe %dl
; AVX512_32_WIN-NEXT: shll $31, %edx
; AVX512_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512_32_WIN-NEXT: movl (%esp), %eax
; AVX512_32_WIN-NEXT: movl %ebp, %esp
; AVX512_32_WIN-NEXT: popl %ebp
; AVX512_32_WIN-NEXT: retl
;
; AVX512_32_LIN-LABEL: x_to_u64:
; AVX512_32_LIN: # %bb.0:
; AVX512_32_LIN-NEXT: subl $12, %esp
; AVX512_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; AVX512_32_LIN-NEXT: flds {{\.LCPI.*}}
; AVX512_32_LIN-NEXT: fld %st(1)
; AVX512_32_LIN-NEXT: fsub %st(1)
; AVX512_32_LIN-NEXT: xorl %edx, %edx
; AVX512_32_LIN-NEXT: fxch %st(1)
; AVX512_32_LIN-NEXT: fucompi %st(2)
-; AVX512_32_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; AVX512_32_LIN-NEXT: fcmovnbe %st(1), %st
; AVX512_32_LIN-NEXT: fstp %st(1)
; AVX512_32_LIN-NEXT: fisttpll (%esp)
; AVX512_32_LIN-NEXT: setbe %dl
; AVX512_32_LIN-NEXT: shll $31, %edx
; AVX512_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; AVX512_32_LIN-NEXT: movl (%esp), %eax
; AVX512_32_LIN-NEXT: addl $12, %esp
; AVX512_32_LIN-NEXT: retl
;
; AVX512_64_WIN-LABEL: x_to_u64:
; AVX512_64_WIN: # %bb.0:
; AVX512_64_WIN-NEXT: pushq %rax
; AVX512_64_WIN-NEXT: fldt (%rcx)
; AVX512_64_WIN-NEXT: flds __real@{{.*}}(%rip)
; AVX512_64_WIN-NEXT: fld %st(1)
; AVX512_64_WIN-NEXT: fsub %st(1)
; AVX512_64_WIN-NEXT: xorl %ecx, %ecx
; AVX512_64_WIN-NEXT: fxch %st(1)
; AVX512_64_WIN-NEXT: fucompi %st(2)
-; AVX512_64_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; AVX512_64_WIN-NEXT: fcmovnbe %st(1), %st
; AVX512_64_WIN-NEXT: fstp %st(1)
; AVX512_64_WIN-NEXT: fisttpll (%rsp)
; AVX512_64_WIN-NEXT: setbe %cl
; AVX512_64_WIN-NEXT: shll $31, %ecx
; AVX512_64_WIN-NEXT: xorl {{[0-9]+}}(%rsp), %ecx
; AVX512_64_WIN-NEXT: shlq $32, %rcx
; AVX512_64_WIN-NEXT: movl (%rsp), %eax
; AVX512_64_WIN-NEXT: orq %rcx, %rax
; AVX512_64_WIN-NEXT: popq %rcx
; AVX512_64_WIN-NEXT: retq
;
; AVX512_64_LIN-LABEL: x_to_u64:
; AVX512_64_LIN: # %bb.0:
; AVX512_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX512_64_LIN-NEXT: flds {{.*}}(%rip)
; AVX512_64_LIN-NEXT: fld %st(1)
; AVX512_64_LIN-NEXT: fsub %st(1)
; AVX512_64_LIN-NEXT: xorl %ecx, %ecx
; AVX512_64_LIN-NEXT: fxch %st(1)
; AVX512_64_LIN-NEXT: fucompi %st(2)
-; AVX512_64_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; AVX512_64_LIN-NEXT: fcmovnbe %st(1), %st
; AVX512_64_LIN-NEXT: fstp %st(1)
; AVX512_64_LIN-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; AVX512_64_LIN-NEXT: setbe %cl
; AVX512_64_LIN-NEXT: shll $31, %ecx
; AVX512_64_LIN-NEXT: xorl -{{[0-9]+}}(%rsp), %ecx
; AVX512_64_LIN-NEXT: shlq $32, %rcx
; AVX512_64_LIN-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; AVX512_64_LIN-NEXT: orq %rcx, %rax
; AVX512_64_LIN-NEXT: retq
;
; SSE3_32_WIN-LABEL: x_to_u64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $8, %esp
; SSE3_32_WIN-NEXT: fldt 8(%ebp)
; SSE3_32_WIN-NEXT: flds __real@5f000000
; SSE3_32_WIN-NEXT: fld %st(1)
; SSE3_32_WIN-NEXT: fsub %st(1)
; SSE3_32_WIN-NEXT: xorl %edx, %edx
; SSE3_32_WIN-NEXT: fxch %st(1)
; SSE3_32_WIN-NEXT: fucompi %st(2)
-; SSE3_32_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE3_32_WIN-NEXT: fcmovnbe %st(1), %st
; SSE3_32_WIN-NEXT: fstp %st(1)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: setbe %dl
; SSE3_32_WIN-NEXT: shll $31, %edx
; SSE3_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: x_to_u64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $12, %esp
; SSE3_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: flds {{\.LCPI.*}}
; SSE3_32_LIN-NEXT: fld %st(1)
; SSE3_32_LIN-NEXT: fsub %st(1)
; SSE3_32_LIN-NEXT: xorl %edx, %edx
; SSE3_32_LIN-NEXT: fxch %st(1)
; SSE3_32_LIN-NEXT: fucompi %st(2)
-; SSE3_32_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE3_32_LIN-NEXT: fcmovnbe %st(1), %st
; SSE3_32_LIN-NEXT: fstp %st(1)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: setbe %dl
; SSE3_32_LIN-NEXT: shll $31, %edx
; SSE3_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: addl $12, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64_WIN-LABEL: x_to_u64:
; SSE3_64_WIN: # %bb.0:
; SSE3_64_WIN-NEXT: pushq %rax
; SSE3_64_WIN-NEXT: fldt (%rcx)
; SSE3_64_WIN-NEXT: flds __real@{{.*}}(%rip)
; SSE3_64_WIN-NEXT: fld %st(1)
; SSE3_64_WIN-NEXT: fsub %st(1)
; SSE3_64_WIN-NEXT: xorl %eax, %eax
; SSE3_64_WIN-NEXT: fxch %st(1)
; SSE3_64_WIN-NEXT: fucompi %st(2)
-; SSE3_64_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE3_64_WIN-NEXT: fcmovnbe %st(1), %st
; SSE3_64_WIN-NEXT: fstp %st(1)
; SSE3_64_WIN-NEXT: fisttpll (%rsp)
; SSE3_64_WIN-NEXT: setbe %al
; SSE3_64_WIN-NEXT: shlq $63, %rax
; SSE3_64_WIN-NEXT: xorq (%rsp), %rax
; SSE3_64_WIN-NEXT: popq %rcx
; SSE3_64_WIN-NEXT: retq
;
; SSE3_64_LIN-LABEL: x_to_u64:
; SSE3_64_LIN: # %bb.0:
; SSE3_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE3_64_LIN-NEXT: flds {{.*}}(%rip)
; SSE3_64_LIN-NEXT: fld %st(1)
; SSE3_64_LIN-NEXT: fsub %st(1)
; SSE3_64_LIN-NEXT: xorl %eax, %eax
; SSE3_64_LIN-NEXT: fxch %st(1)
; SSE3_64_LIN-NEXT: fucompi %st(2)
-; SSE3_64_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE3_64_LIN-NEXT: fcmovnbe %st(1), %st
; SSE3_64_LIN-NEXT: fstp %st(1)
; SSE3_64_LIN-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; SSE3_64_LIN-NEXT: setbe %al
; SSE3_64_LIN-NEXT: shlq $63, %rax
; SSE3_64_LIN-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; SSE3_64_LIN-NEXT: retq
;
; SSE2_32_WIN-LABEL: x_to_u64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $16, %esp
; SSE2_32_WIN-NEXT: fldt 8(%ebp)
; SSE2_32_WIN-NEXT: flds __real@5f000000
; SSE2_32_WIN-NEXT: fld %st(1)
; SSE2_32_WIN-NEXT: fsub %st(1)
; SSE2_32_WIN-NEXT: xorl %edx, %edx
; SSE2_32_WIN-NEXT: fxch %st(1)
; SSE2_32_WIN-NEXT: fucompi %st(2)
-; SSE2_32_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE2_32_WIN-NEXT: fcmovnbe %st(1), %st
; SSE2_32_WIN-NEXT: fstp %st(1)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: setbe %dl
; SSE2_32_WIN-NEXT: shll $31, %edx
; SSE2_32_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: x_to_u64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $20, %esp
; SSE2_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: flds {{\.LCPI.*}}
; SSE2_32_LIN-NEXT: fld %st(1)
; SSE2_32_LIN-NEXT: fsub %st(1)
; SSE2_32_LIN-NEXT: xorl %edx, %edx
; SSE2_32_LIN-NEXT: fxch %st(1)
; SSE2_32_LIN-NEXT: fucompi %st(2)
-; SSE2_32_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE2_32_LIN-NEXT: fcmovnbe %st(1), %st
; SSE2_32_LIN-NEXT: fstp %st(1)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: setbe %dl
; SSE2_32_LIN-NEXT: shll $31, %edx
; SSE2_32_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: addl $20, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64_WIN-LABEL: x_to_u64:
; SSE2_64_WIN: # %bb.0:
; SSE2_64_WIN-NEXT: subq $16, %rsp
; SSE2_64_WIN-NEXT: fldt (%rcx)
; SSE2_64_WIN-NEXT: flds __real@{{.*}}(%rip)
; SSE2_64_WIN-NEXT: fld %st(1)
; SSE2_64_WIN-NEXT: fsub %st(1)
; SSE2_64_WIN-NEXT: xorl %eax, %eax
; SSE2_64_WIN-NEXT: fxch %st(1)
; SSE2_64_WIN-NEXT: fucompi %st(2)
-; SSE2_64_WIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE2_64_WIN-NEXT: fcmovnbe %st(1), %st
; SSE2_64_WIN-NEXT: fstp %st(1)
; SSE2_64_WIN-NEXT: fnstcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
; SSE2_64_WIN-NEXT: movw $3199, {{[0-9]+}}(%rsp) # imm = 0xC7F
; SSE2_64_WIN-NEXT: fldcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: fistpll {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: fldcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: setbe %al
; SSE2_64_WIN-NEXT: shlq $63, %rax
; SSE2_64_WIN-NEXT: xorq {{[0-9]+}}(%rsp), %rax
; SSE2_64_WIN-NEXT: addq $16, %rsp
; SSE2_64_WIN-NEXT: retq
;
; SSE2_64_LIN-LABEL: x_to_u64:
; SSE2_64_LIN: # %bb.0:
; SSE2_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: flds {{.*}}(%rip)
; SSE2_64_LIN-NEXT: fld %st(1)
; SSE2_64_LIN-NEXT: fsub %st(1)
; SSE2_64_LIN-NEXT: xorl %eax, %eax
; SSE2_64_LIN-NEXT: fxch %st(1)
; SSE2_64_LIN-NEXT: fucompi %st(2)
-; SSE2_64_LIN-NEXT: fcmovnbe %st(1), %st(0)
+; SSE2_64_LIN-NEXT: fcmovnbe %st(1), %st
; SSE2_64_LIN-NEXT: fstp %st(1)
; SSE2_64_LIN-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx
; SSE2_64_LIN-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; SSE2_64_LIN-NEXT: fldcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: movw %cx, -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: fistpll -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: fldcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: setbe %al
; SSE2_64_LIN-NEXT: shlq $63, %rax
; SSE2_64_LIN-NEXT: xorq -{{[0-9]+}}(%rsp), %rax
; SSE2_64_LIN-NEXT: retq
;
; X87_WIN-LABEL: x_to_u64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: fldt 8(%ebp)
; X87_WIN-NEXT: flds __real@5f000000
; X87_WIN-NEXT: fld %st(1)
; X87_WIN-NEXT: fsub %st(1)
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB4_2
; X87_WIN-NEXT: # %bb.1:
; X87_WIN-NEXT: fstp %st(1)
; X87_WIN-NEXT: fldz
; X87_WIN-NEXT: LBB4_2:
; X87_WIN-NEXT: fstp %st(0)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: setbe %al
; X87_WIN-NEXT: movzbl %al, %edx
; X87_WIN-NEXT: shll $31, %edx
; X87_WIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: x_to_u64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X87_LIN-NEXT: flds {{\.LCPI.*}}
; X87_LIN-NEXT: fld %st(1)
; X87_LIN-NEXT: fsub %st(1)
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB4_2
; X87_LIN-NEXT: # %bb.1:
; X87_LIN-NEXT: fstp %st(1)
; X87_LIN-NEXT: fldz
; X87_LIN-NEXT: .LBB4_2:
; X87_LIN-NEXT: fstp %st(0)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: setbe %al
; X87_LIN-NEXT: movzbl %al, %edx
; X87_LIN-NEXT: shll $31, %edx
; X87_LIN-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptoui x86_fp80 %a to i64
ret i64 %r
}
define i64 @x_to_s64(x86_fp80 %a) nounwind {
; AVX512_32_WIN-LABEL: x_to_s64:
; AVX512_32_WIN: # %bb.0:
; AVX512_32_WIN-NEXT: pushl %ebp
; AVX512_32_WIN-NEXT: movl %esp, %ebp
; AVX512_32_WIN-NEXT: andl $-8, %esp
; AVX512_32_WIN-NEXT: subl $8, %esp
; AVX512_32_WIN-NEXT: fldt 8(%ebp)
; AVX512_32_WIN-NEXT: fisttpll (%esp)
; AVX512_32_WIN-NEXT: movl (%esp), %eax
; AVX512_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512_32_WIN-NEXT: movl %ebp, %esp
; AVX512_32_WIN-NEXT: popl %ebp
; AVX512_32_WIN-NEXT: retl
;
; AVX512_32_LIN-LABEL: x_to_s64:
; AVX512_32_LIN: # %bb.0:
; AVX512_32_LIN-NEXT: subl $12, %esp
; AVX512_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; AVX512_32_LIN-NEXT: fisttpll (%esp)
; AVX512_32_LIN-NEXT: movl (%esp), %eax
; AVX512_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512_32_LIN-NEXT: addl $12, %esp
; AVX512_32_LIN-NEXT: retl
;
; AVX512_64_WIN-LABEL: x_to_s64:
; AVX512_64_WIN: # %bb.0:
; AVX512_64_WIN-NEXT: pushq %rax
; AVX512_64_WIN-NEXT: fldt (%rcx)
; AVX512_64_WIN-NEXT: fisttpll (%rsp)
; AVX512_64_WIN-NEXT: movq (%rsp), %rax
; AVX512_64_WIN-NEXT: popq %rcx
; AVX512_64_WIN-NEXT: retq
;
; AVX512_64_LIN-LABEL: x_to_s64:
; AVX512_64_LIN: # %bb.0:
; AVX512_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX512_64_LIN-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; AVX512_64_LIN-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512_64_LIN-NEXT: retq
;
; SSE3_32_WIN-LABEL: x_to_s64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl %ebp
; SSE3_32_WIN-NEXT: movl %esp, %ebp
; SSE3_32_WIN-NEXT: andl $-8, %esp
; SSE3_32_WIN-NEXT: subl $8, %esp
; SSE3_32_WIN-NEXT: fldt 8(%ebp)
; SSE3_32_WIN-NEXT: fisttpll (%esp)
; SSE3_32_WIN-NEXT: movl (%esp), %eax
; SSE3_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_WIN-NEXT: movl %ebp, %esp
; SSE3_32_WIN-NEXT: popl %ebp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: x_to_s64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $12, %esp
; SSE3_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: fisttpll (%esp)
; SSE3_32_LIN-NEXT: movl (%esp), %eax
; SSE3_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE3_32_LIN-NEXT: addl $12, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64_WIN-LABEL: x_to_s64:
; SSE3_64_WIN: # %bb.0:
; SSE3_64_WIN-NEXT: pushq %rax
; SSE3_64_WIN-NEXT: fldt (%rcx)
; SSE3_64_WIN-NEXT: fisttpll (%rsp)
; SSE3_64_WIN-NEXT: movq (%rsp), %rax
; SSE3_64_WIN-NEXT: popq %rcx
; SSE3_64_WIN-NEXT: retq
;
; SSE3_64_LIN-LABEL: x_to_s64:
; SSE3_64_LIN: # %bb.0:
; SSE3_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE3_64_LIN-NEXT: fisttpll -{{[0-9]+}}(%rsp)
; SSE3_64_LIN-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; SSE3_64_LIN-NEXT: retq
;
; SSE2_32_WIN-LABEL: x_to_s64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl %ebp
; SSE2_32_WIN-NEXT: movl %esp, %ebp
; SSE2_32_WIN-NEXT: andl $-8, %esp
; SSE2_32_WIN-NEXT: subl $16, %esp
; SSE2_32_WIN-NEXT: fldt 8(%ebp)
; SSE2_32_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_WIN-NEXT: movl %ebp, %esp
; SSE2_32_WIN-NEXT: popl %ebp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: x_to_s64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $20, %esp
; SSE2_32_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2_32_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE2_32_LIN-NEXT: addl $20, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64_WIN-LABEL: x_to_s64:
; SSE2_64_WIN: # %bb.0:
; SSE2_64_WIN-NEXT: subq $16, %rsp
; SSE2_64_WIN-NEXT: fldt (%rcx)
; SSE2_64_WIN-NEXT: fnstcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSE2_64_WIN-NEXT: movw $3199, {{[0-9]+}}(%rsp) # imm = 0xC7F
; SSE2_64_WIN-NEXT: fldcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: movw %ax, {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: fistpll {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: fldcw {{[0-9]+}}(%rsp)
; SSE2_64_WIN-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE2_64_WIN-NEXT: addq $16, %rsp
; SSE2_64_WIN-NEXT: retq
;
; SSE2_64_LIN-LABEL: x_to_s64:
; SSE2_64_LIN: # %bb.0:
; SSE2_64_LIN-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: fnstcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; SSE2_64_LIN-NEXT: movw $3199, -{{[0-9]+}}(%rsp) # imm = 0xC7F
; SSE2_64_LIN-NEXT: fldcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: fistpll -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: fldcw -{{[0-9]+}}(%rsp)
; SSE2_64_LIN-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; SSE2_64_LIN-NEXT: retq
;
; X87_WIN-LABEL: x_to_s64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl %ebp
; X87_WIN-NEXT: movl %esp, %ebp
; X87_WIN-NEXT: andl $-8, %esp
; X87_WIN-NEXT: subl $16, %esp
; X87_WIN-NEXT: fldt 8(%ebp)
; X87_WIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_WIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_WIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_WIN-NEXT: movl %ebp, %esp
; X87_WIN-NEXT: popl %ebp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: x_to_s64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $20, %esp
; X87_LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fnstcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movw %ax, {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fistpll {{[0-9]+}}(%esp)
; X87_LIN-NEXT: fldcw {{[0-9]+}}(%esp)
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87_LIN-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87_LIN-NEXT: addl $20, %esp
; X87_LIN-NEXT: retl
%r = fptosi x86_fp80 %a to i64
ret i64 %r
}
define i64 @t_to_u64(fp128 %a) nounwind {
; AVX512_32_WIN-LABEL: t_to_u64:
; AVX512_32_WIN: # %bb.0:
; AVX512_32_WIN-NEXT: subl $16, %esp
; AVX512_32_WIN-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX512_32_WIN-NEXT: vmovups %xmm0, (%esp)
; AVX512_32_WIN-NEXT: calll ___fixunstfdi
; AVX512_32_WIN-NEXT: addl $16, %esp
; AVX512_32_WIN-NEXT: retl
;
; AVX512_32_LIN-LABEL: t_to_u64:
; AVX512_32_LIN: # %bb.0:
; AVX512_32_LIN-NEXT: subl $28, %esp
; AVX512_32_LIN-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; AVX512_32_LIN-NEXT: vmovups %xmm0, (%esp)
; AVX512_32_LIN-NEXT: calll __fixunstfdi
; AVX512_32_LIN-NEXT: addl $28, %esp
; AVX512_32_LIN-NEXT: retl
;
; AVX512_64_WIN-LABEL: t_to_u64:
; AVX512_64_WIN: # %bb.0:
; AVX512_64_WIN-NEXT: subq $40, %rsp
; AVX512_64_WIN-NEXT: callq __fixunstfdi
; AVX512_64_WIN-NEXT: addq $40, %rsp
; AVX512_64_WIN-NEXT: retq
;
; AVX512_64_LIN-LABEL: t_to_u64:
; AVX512_64_LIN: # %bb.0:
; AVX512_64_LIN-NEXT: pushq %rax
; AVX512_64_LIN-NEXT: callq __fixunstfdi
; AVX512_64_LIN-NEXT: popq %rcx
; AVX512_64_LIN-NEXT: retq
;
; SSE3_32_WIN-LABEL: t_to_u64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: calll ___fixunstfdi
; SSE3_32_WIN-NEXT: addl $16, %esp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: t_to_u64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $12, %esp
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: calll __fixunstfdi
; SSE3_32_LIN-NEXT: addl $28, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64_WIN-LABEL: t_to_u64:
; SSE3_64_WIN: # %bb.0:
; SSE3_64_WIN-NEXT: subq $40, %rsp
; SSE3_64_WIN-NEXT: callq __fixunstfdi
; SSE3_64_WIN-NEXT: addq $40, %rsp
; SSE3_64_WIN-NEXT: retq
;
; SSE3_64_LIN-LABEL: t_to_u64:
; SSE3_64_LIN: # %bb.0:
; SSE3_64_LIN-NEXT: pushq %rax
; SSE3_64_LIN-NEXT: callq __fixunstfdi
; SSE3_64_LIN-NEXT: popq %rcx
; SSE3_64_LIN-NEXT: retq
;
; SSE2_32_WIN-LABEL: t_to_u64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: calll ___fixunstfdi
; SSE2_32_WIN-NEXT: addl $16, %esp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: t_to_u64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $12, %esp
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: calll __fixunstfdi
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64_WIN-LABEL: t_to_u64:
; SSE2_64_WIN: # %bb.0:
; SSE2_64_WIN-NEXT: subq $40, %rsp
; SSE2_64_WIN-NEXT: callq __fixunstfdi
; SSE2_64_WIN-NEXT: addq $40, %rsp
; SSE2_64_WIN-NEXT: retq
;
; SSE2_64_LIN-LABEL: t_to_u64:
; SSE2_64_LIN: # %bb.0:
; SSE2_64_LIN-NEXT: pushq %rax
; SSE2_64_LIN-NEXT: callq __fixunstfdi
; SSE2_64_LIN-NEXT: popq %rcx
; SSE2_64_LIN-NEXT: retq
;
; X87_WIN-LABEL: t_to_u64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: calll ___fixunstfdi
; X87_WIN-NEXT: addl $16, %esp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: t_to_u64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $12, %esp
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: calll __fixunstfdi
; X87_LIN-NEXT: addl $28, %esp
; X87_LIN-NEXT: retl
%r = fptoui fp128 %a to i64
ret i64 %r
}
define i64 @t_to_s64(fp128 %a) nounwind {
; AVX512_32_WIN-LABEL: t_to_s64:
; AVX512_32_WIN: # %bb.0:
; AVX512_32_WIN-NEXT: subl $16, %esp
; AVX512_32_WIN-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX512_32_WIN-NEXT: vmovups %xmm0, (%esp)
; AVX512_32_WIN-NEXT: calll ___fixtfdi
; AVX512_32_WIN-NEXT: addl $16, %esp
; AVX512_32_WIN-NEXT: retl
;
; AVX512_32_LIN-LABEL: t_to_s64:
; AVX512_32_LIN: # %bb.0:
; AVX512_32_LIN-NEXT: subl $28, %esp
; AVX512_32_LIN-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; AVX512_32_LIN-NEXT: vmovups %xmm0, (%esp)
; AVX512_32_LIN-NEXT: calll __fixtfdi
; AVX512_32_LIN-NEXT: addl $28, %esp
; AVX512_32_LIN-NEXT: retl
;
; AVX512_64_WIN-LABEL: t_to_s64:
; AVX512_64_WIN: # %bb.0:
; AVX512_64_WIN-NEXT: subq $40, %rsp
; AVX512_64_WIN-NEXT: callq __fixtfdi
; AVX512_64_WIN-NEXT: addq $40, %rsp
; AVX512_64_WIN-NEXT: retq
;
; AVX512_64_LIN-LABEL: t_to_s64:
; AVX512_64_LIN: # %bb.0:
; AVX512_64_LIN-NEXT: pushq %rax
; AVX512_64_LIN-NEXT: callq __fixtfdi
; AVX512_64_LIN-NEXT: popq %rcx
; AVX512_64_LIN-NEXT: retq
;
; SSE3_32_WIN-LABEL: t_to_s64:
; SSE3_32_WIN: # %bb.0:
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_WIN-NEXT: calll ___fixtfdi
; SSE3_32_WIN-NEXT: addl $16, %esp
; SSE3_32_WIN-NEXT: retl
;
; SSE3_32_LIN-LABEL: t_to_s64:
; SSE3_32_LIN: # %bb.0:
; SSE3_32_LIN-NEXT: subl $12, %esp
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE3_32_LIN-NEXT: calll __fixtfdi
; SSE3_32_LIN-NEXT: addl $28, %esp
; SSE3_32_LIN-NEXT: retl
;
; SSE3_64_WIN-LABEL: t_to_s64:
; SSE3_64_WIN: # %bb.0:
; SSE3_64_WIN-NEXT: subq $40, %rsp
; SSE3_64_WIN-NEXT: callq __fixtfdi
; SSE3_64_WIN-NEXT: addq $40, %rsp
; SSE3_64_WIN-NEXT: retq
;
; SSE3_64_LIN-LABEL: t_to_s64:
; SSE3_64_LIN: # %bb.0:
; SSE3_64_LIN-NEXT: pushq %rax
; SSE3_64_LIN-NEXT: callq __fixtfdi
; SSE3_64_LIN-NEXT: popq %rcx
; SSE3_64_LIN-NEXT: retq
;
; SSE2_32_WIN-LABEL: t_to_s64:
; SSE2_32_WIN: # %bb.0:
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_WIN-NEXT: calll ___fixtfdi
; SSE2_32_WIN-NEXT: addl $16, %esp
; SSE2_32_WIN-NEXT: retl
;
; SSE2_32_LIN-LABEL: t_to_s64:
; SSE2_32_LIN: # %bb.0:
; SSE2_32_LIN-NEXT: subl $12, %esp
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; SSE2_32_LIN-NEXT: calll __fixtfdi
; SSE2_32_LIN-NEXT: addl $28, %esp
; SSE2_32_LIN-NEXT: retl
;
; SSE2_64_WIN-LABEL: t_to_s64:
; SSE2_64_WIN: # %bb.0:
; SSE2_64_WIN-NEXT: subq $40, %rsp
; SSE2_64_WIN-NEXT: callq __fixtfdi
; SSE2_64_WIN-NEXT: addq $40, %rsp
; SSE2_64_WIN-NEXT: retq
;
; SSE2_64_LIN-LABEL: t_to_s64:
; SSE2_64_LIN: # %bb.0:
; SSE2_64_LIN-NEXT: pushq %rax
; SSE2_64_LIN-NEXT: callq __fixtfdi
; SSE2_64_LIN-NEXT: popq %rcx
; SSE2_64_LIN-NEXT: retq
;
; X87_WIN-LABEL: t_to_s64:
; X87_WIN: # %bb.0:
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_WIN-NEXT: calll ___fixtfdi
; X87_WIN-NEXT: addl $16, %esp
; X87_WIN-NEXT: retl
;
; X87_LIN-LABEL: t_to_s64:
; X87_LIN: # %bb.0:
; X87_LIN-NEXT: subl $12, %esp
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: pushl {{[0-9]+}}(%esp)
; X87_LIN-NEXT: calll __fixtfdi
; X87_LIN-NEXT: addl $28, %esp
; X87_LIN-NEXT: retl
%r = fptosi fp128 %a to i64
ret i64 %r
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/select.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/select.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/select.ll (revision 344166)
@@ -1,1633 +1,1633 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=athlon | FileCheck %s --check-prefix=ATHLON
; RUN: llc < %s -mtriple=i386-intel-elfiamcu | FileCheck %s --check-prefix=MCU
; PR5757
%0 = type { i64, i32 }
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## %bb.0:
; CHECK-NEXT: addq $8, %rdi
; CHECK-NEXT: addq $8, %rsi
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovneq %rdi, %rsi
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test1:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: addl $8, %ecx
; ATHLON-NEXT: addl $8, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovnel %ecx, %eax
; ATHLON-NEXT: movl (%eax), %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test1:
; MCU: # %bb.0:
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: jne .LBB0_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: addl $8, %edx
; MCU-NEXT: movl (%edx), %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB0_1:
; MCU-NEXT: addl $8, %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: retl
%t0 = load %0, %0* %p
%t1 = load %0, %0* %q
%t4 = select i1 %r, %0 %t0, %0 %t1
%t5 = extractvalue %0 %t4, 1
ret i32 %t5
}
; PR2139
define i32 @test2() nounwind {
; GENERIC-LABEL: test2:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: pushq %rax
; GENERIC-NEXT: callq _return_false
; GENERIC-NEXT: xorl %ecx, %ecx
; GENERIC-NEXT: testb $1, %al
; GENERIC-NEXT: movl $-480, %eax ## imm = 0xFE20
; GENERIC-NEXT: cmovnel %ecx, %eax
; GENERIC-NEXT: shll $3, %eax
; GENERIC-NEXT: cmpl $32768, %eax ## imm = 0x8000
; GENERIC-NEXT: jge LBB1_1
; GENERIC-NEXT: ## %bb.2: ## %bb91
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: popq %rcx
; GENERIC-NEXT: retq
; GENERIC-NEXT: LBB1_1: ## %bb90
; GENERIC-NEXT: ud2
;
; ATOM-LABEL: test2:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: pushq %rax
; ATOM-NEXT: callq _return_false
; ATOM-NEXT: xorl %ecx, %ecx
; ATOM-NEXT: movl $-480, %edx ## imm = 0xFE20
; ATOM-NEXT: testb $1, %al
; ATOM-NEXT: cmovnel %ecx, %edx
; ATOM-NEXT: shll $3, %edx
; ATOM-NEXT: cmpl $32768, %edx ## imm = 0x8000
; ATOM-NEXT: jge LBB1_1
; ATOM-NEXT: ## %bb.2: ## %bb91
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: popq %rcx
; ATOM-NEXT: retq
; ATOM-NEXT: LBB1_1: ## %bb90
; ATOM-NEXT: ud2
;
; ATHLON-LABEL: test2:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: subl $12, %esp
; ATHLON-NEXT: calll _return_false
; ATHLON-NEXT: xorl %ecx, %ecx
; ATHLON-NEXT: testb $1, %al
; ATHLON-NEXT: movl $-480, %eax ## imm = 0xFE20
; ATHLON-NEXT: cmovnel %ecx, %eax
; ATHLON-NEXT: shll $3, %eax
; ATHLON-NEXT: cmpl $32768, %eax ## imm = 0x8000
; ATHLON-NEXT: jge LBB1_1
; ATHLON-NEXT: ## %bb.2: ## %bb91
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: addl $12, %esp
; ATHLON-NEXT: retl
; ATHLON-NEXT: LBB1_1: ## %bb90
; ATHLON-NEXT: ud2
;
; MCU-LABEL: test2:
; MCU: # %bb.0: # %entry
; MCU-NEXT: calll return_false
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB1_2
; MCU-NEXT: # %bb.1: # %entry
; MCU-NEXT: movl $-480, %ecx # imm = 0xFE20
; MCU-NEXT: .LBB1_2: # %entry
; MCU-NEXT: shll $3, %ecx
; MCU-NEXT: cmpl $32768, %ecx # imm = 0x8000
; MCU-NEXT: jge .LBB1_3
; MCU-NEXT: # %bb.4: # %bb91
; MCU-NEXT: xorl %eax, %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB1_3: # %bb90
entry:
%tmp73 = tail call i1 @return_false()
%g.0 = select i1 %tmp73, i16 0, i16 -480
%tmp7778 = sext i16 %g.0 to i32
%tmp80 = shl i32 %tmp7778, 3
%tmp87 = icmp sgt i32 %tmp80, 32767
br i1 %tmp87, label %bb90, label %bb91
bb90:
unreachable
bb91:
ret i32 0
}
declare i1 @return_false()
;; Select between two floating point constants.
define float @test3(i32 %x) nounwind readnone {
; GENERIC-LABEL: test3:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: testl %edi, %edi
; GENERIC-NEXT: sete %al
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test3:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
; ATOM-NEXT: testl %edi, %edi
; ATOM-NEXT: sete %al
; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test3:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; ATHLON-NEXT: sete %al
; ATHLON-NEXT: flds LCPI2_0(,%eax,4)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test3:
; MCU: # %bb.0: # %entry
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: sete %cl
; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4)
; MCU-NEXT: retl
entry:
%0 = icmp eq i32 %x, 0
%iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01
ret float %iftmp.0.0
}
define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ucomisd %xmm0, %xmm1
; CHECK-NEXT: seta %al
; CHECK-NEXT: movsbl (%rdi,%rax,4), %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test4:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: fldl {{[0-9]+}}(%esp)
; ATHLON-NEXT: flds LCPI3_0
; ATHLON-NEXT: xorl %ecx, %ecx
; ATHLON-NEXT: fucompi %st(1)
; ATHLON-NEXT: fstp %st(0)
; ATHLON-NEXT: seta %cl
; ATHLON-NEXT: movsbl (%eax,%ecx,4), %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test4:
; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: fldl {{[0-9]+}}(%esp)
; MCU-NEXT: flds {{\.LCPI.*}}
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
; MCU-NEXT: seta %dl
; MCU-NEXT: movb (%ecx,%edx,4), %al
; MCU-NEXT: retl
entry:
%0 = fcmp olt double %F, 4.200000e+01
%iftmp.0.0 = select i1 %0, i32 4, i32 0
%1 = getelementptr i8, i8* %P, i32 %iftmp.0.0
%2 = load i8, i8* %1, align 1
ret i8 %2
}
define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; CHECK-LABEL: test5:
; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne LBB4_2
; CHECK-NEXT: ## %bb.1:
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: LBB4_2:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: movd %xmm0, (%rsi)
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test5:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: pushl %esi
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmovnel %ecx, %edx
; ATHLON-NEXT: movzwl (%edx), %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
; ATHLON-NEXT: cmovnel %edx, %esi
; ATHLON-NEXT: movzwl (%esi), %edx
; ATHLON-NEXT: movw %dx, 2(%eax)
; ATHLON-NEXT: movw %cx, (%eax)
; ATHLON-NEXT: popl %esi
; ATHLON-NEXT: retl
;
; MCU-LABEL: test5:
; MCU: # %bb.0:
; MCU-NEXT: pushl %esi
; MCU-NEXT: movl {{[0-9]+}}(%esp), %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB4_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB4_2:
; MCU-NEXT: movw %cx, 2(%esi)
; MCU-NEXT: movw %dx, (%esi)
; MCU-NEXT: popl %esi
; MCU-NEXT: retl
%x = select i1 %c, <2 x i16> %a, <2 x i16> %b
store <2 x i16> %x, <2 x i16>* %p
ret void
}
; Verify that the fmul gets sunk into the one part of the diamond where it is needed.
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: test6:
; CHECK: ## %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je LBB5_1
; CHECK-NEXT: ## %bb.2:
; CHECK-NEXT: movaps (%rsi), %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
; CHECK-NEXT: LBB5_1:
; CHECK-NEXT: movaps (%rdx), %xmm0
; CHECK-NEXT: mulps %xmm0, %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test6:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: flds 12(%ecx)
; ATHLON-NEXT: flds 8(%ecx)
; ATHLON-NEXT: flds 4(%ecx)
; ATHLON-NEXT: flds (%ecx)
; ATHLON-NEXT: flds (%eax)
-; ATHLON-NEXT: fmul %st(0), %st(0)
+; ATHLON-NEXT: fmul %st, %st(0)
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; ATHLON-NEXT: fxch %st(1)
-; ATHLON-NEXT: fcmove %st(1), %st(0)
+; ATHLON-NEXT: fcmove %st(1), %st
; ATHLON-NEXT: fstp %st(1)
; ATHLON-NEXT: flds 4(%eax)
-; ATHLON-NEXT: fmul %st(0), %st(0)
+; ATHLON-NEXT: fmul %st, %st(0)
; ATHLON-NEXT: fxch %st(2)
-; ATHLON-NEXT: fcmove %st(2), %st(0)
+; ATHLON-NEXT: fcmove %st(2), %st
; ATHLON-NEXT: fstp %st(2)
; ATHLON-NEXT: flds 8(%eax)
-; ATHLON-NEXT: fmul %st(0), %st(0)
+; ATHLON-NEXT: fmul %st, %st(0)
; ATHLON-NEXT: fxch %st(3)
-; ATHLON-NEXT: fcmove %st(3), %st(0)
+; ATHLON-NEXT: fcmove %st(3), %st
; ATHLON-NEXT: fstp %st(3)
; ATHLON-NEXT: flds 12(%eax)
-; ATHLON-NEXT: fmul %st(0), %st(0)
+; ATHLON-NEXT: fmul %st, %st(0)
; ATHLON-NEXT: fxch %st(4)
-; ATHLON-NEXT: fcmove %st(4), %st(0)
+; ATHLON-NEXT: fcmove %st(4), %st
; ATHLON-NEXT: fstp %st(4)
; ATHLON-NEXT: fxch %st(3)
; ATHLON-NEXT: fstps 12(%ecx)
; ATHLON-NEXT: fxch %st(1)
; ATHLON-NEXT: fstps 8(%ecx)
; ATHLON-NEXT: fstps 4(%ecx)
; ATHLON-NEXT: fstps (%ecx)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test6:
; MCU: # %bb.0:
; MCU-NEXT: pushl %eax
; MCU-NEXT: flds 12(%edx)
; MCU-NEXT: fstps (%esp) # 4-byte Folded Spill
; MCU-NEXT: flds 8(%edx)
; MCU-NEXT: flds 4(%edx)
; MCU-NEXT: flds (%ecx)
; MCU-NEXT: flds 4(%ecx)
; MCU-NEXT: flds 8(%ecx)
; MCU-NEXT: flds 12(%ecx)
-; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fmul %st, %st(0)
; MCU-NEXT: fxch %st(1)
-; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fmul %st, %st(0)
; MCU-NEXT: fxch %st(2)
-; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fmul %st, %st(0)
; MCU-NEXT: fxch %st(3)
-; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fmul %st, %st(0)
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: flds (%edx)
; MCU-NEXT: je .LBB5_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: fstp %st(1)
; MCU-NEXT: fstp %st(3)
; MCU-NEXT: fstp %st(1)
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: flds (%esp) # 4-byte Folded Reload
; MCU-NEXT: fldz
; MCU-NEXT: fldz
; MCU-NEXT: fldz
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(6)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(5)
; MCU-NEXT: fxch %st(4)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(3)
; MCU-NEXT: fxch %st(2)
; MCU-NEXT: .LBB5_2:
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: fstp %st(5)
; MCU-NEXT: fstp %st(3)
; MCU-NEXT: fxch %st(2)
; MCU-NEXT: fstps 12(%edx)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fstps 8(%edx)
; MCU-NEXT: fstps 4(%edx)
; MCU-NEXT: fstps (%edx)
; MCU-NEXT: popl %eax
; MCU-NEXT: retl
%tmp = load <4 x float>, <4 x float>* %A
%tmp3 = load <4 x float>, <4 x float>* %B
%tmp9 = fmul <4 x float> %tmp3, %tmp3
%tmp.upgrd.1 = icmp eq i32 %C, 0
%iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp
store <4 x float> %iftmp.38.0, <4 x float>* %A
ret void
}
; Select with fp80's
define x86_fp80 @test7(i32 %tmp8) nounwind {
; GENERIC-LABEL: test7:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: testl %edi, %edi
; GENERIC-NEXT: setns %al
; GENERIC-NEXT: shlq $4, %rax
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
; GENERIC-NEXT: fldt (%rax,%rcx)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test7:
; ATOM: ## %bb.0:
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
; ATOM-NEXT: testl %edi, %edi
; ATOM-NEXT: setns %al
; ATOM-NEXT: shlq $4, %rax
; ATOM-NEXT: fldt (%rax,%rcx)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test7:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: notl %eax
; ATHLON-NEXT: shrl $27, %eax
; ATHLON-NEXT: andl $-16, %eax
; ATHLON-NEXT: fldt LCPI6_0(%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test7:
; MCU: # %bb.0:
; MCU-NEXT: notl %eax
; MCU-NEXT: shrl $27, %eax
; MCU-NEXT: andl $-16, %eax
; MCU-NEXT: fldt {{\.LCPI.*}}(%eax)
; MCU-NEXT: retl
%tmp9 = icmp sgt i32 %tmp8, -1
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
ret x86_fp80 %retval
}
; widening select v6i32 and then a sub
define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
; GENERIC-LABEL: test8:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: testb $1, %dil
; GENERIC-NEXT: jne LBB7_1
; GENERIC-NEXT: ## %bb.2:
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: jmp LBB7_3
; GENERIC-NEXT: LBB7_1:
; GENERIC-NEXT: movd %r9d, %xmm0
; GENERIC-NEXT: movd %r8d, %xmm1
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; GENERIC-NEXT: movd %ecx, %xmm2
; GENERIC-NEXT: movd %edx, %xmm0
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: LBB7_3:
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; GENERIC-NEXT: pcmpeqd %xmm2, %xmm2
; GENERIC-NEXT: paddd %xmm2, %xmm0
; GENERIC-NEXT: paddd %xmm2, %xmm1
; GENERIC-NEXT: movq %xmm1, 16(%rsi)
; GENERIC-NEXT: movdqa %xmm0, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test8:
; ATOM: ## %bb.0:
; ATOM-NEXT: testb $1, %dil
; ATOM-NEXT: jne LBB7_1
; ATOM-NEXT: ## %bb.2:
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: jmp LBB7_3
; ATOM-NEXT: LBB7_1:
; ATOM-NEXT: movd %r9d, %xmm1
; ATOM-NEXT: movd %r8d, %xmm2
; ATOM-NEXT: movd %ecx, %xmm3
; ATOM-NEXT: movd %edx, %xmm0
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: LBB7_3:
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; ATOM-NEXT: pcmpeqd %xmm2, %xmm2
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; ATOM-NEXT: paddd %xmm2, %xmm0
; ATOM-NEXT: paddd %xmm2, %xmm1
; ATOM-NEXT: movq %xmm1, 16(%rsi)
; ATOM-NEXT: movdqa %xmm0, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test8:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: pushl %ebp
; ATHLON-NEXT: pushl %ebx
; ATHLON-NEXT: pushl %edi
; ATHLON-NEXT: pushl %esi
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmovnel %eax, %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmovnel %eax, %edx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
; ATHLON-NEXT: cmovnel %eax, %esi
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edi
; ATHLON-NEXT: cmovnel %eax, %edi
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebx
; ATHLON-NEXT: cmovnel %eax, %ebx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebp
; ATHLON-NEXT: cmovnel %eax, %ebp
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl (%ecx), %ecx
; ATHLON-NEXT: movl (%edx), %edx
; ATHLON-NEXT: movl (%esi), %esi
; ATHLON-NEXT: movl (%edi), %edi
; ATHLON-NEXT: movl (%ebx), %ebx
; ATHLON-NEXT: movl (%ebp), %ebp
; ATHLON-NEXT: decl %ecx
; ATHLON-NEXT: movl %ecx, 20(%eax)
; ATHLON-NEXT: decl %edx
; ATHLON-NEXT: movl %edx, 16(%eax)
; ATHLON-NEXT: decl %esi
; ATHLON-NEXT: movl %esi, 12(%eax)
; ATHLON-NEXT: decl %edi
; ATHLON-NEXT: movl %edi, 8(%eax)
; ATHLON-NEXT: decl %ebx
; ATHLON-NEXT: movl %ebx, 4(%eax)
; ATHLON-NEXT: decl %ebp
; ATHLON-NEXT: movl %ebp, (%eax)
; ATHLON-NEXT: popl %esi
; ATHLON-NEXT: popl %edi
; ATHLON-NEXT: popl %ebx
; ATHLON-NEXT: popl %ebp
; ATHLON-NEXT: retl
;
; MCU-LABEL: test8:
; MCU: # %bb.0:
; MCU-NEXT: pushl %ebp
; MCU-NEXT: pushl %ebx
; MCU-NEXT: pushl %edi
; MCU-NEXT: pushl %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB7_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: je .LBB7_5
; MCU-NEXT: .LBB7_4:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movl (%ecx), %ecx
; MCU-NEXT: je .LBB7_8
; MCU-NEXT: .LBB7_7:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
; MCU-NEXT: movl (%esi), %esi
; MCU-NEXT: je .LBB7_11
; MCU-NEXT: .LBB7_10:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
; MCU-NEXT: movl (%edi), %edi
; MCU-NEXT: je .LBB7_14
; MCU-NEXT: .LBB7_13:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
; MCU-NEXT: movl (%ebx), %ebx
; MCU-NEXT: je .LBB7_17
; MCU-NEXT: .LBB7_16:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
; MCU-NEXT: jmp .LBB7_18
; MCU-NEXT: .LBB7_1:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: jne .LBB7_4
; MCU-NEXT: .LBB7_5:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movl (%ecx), %ecx
; MCU-NEXT: jne .LBB7_7
; MCU-NEXT: .LBB7_8:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
; MCU-NEXT: movl (%esi), %esi
; MCU-NEXT: jne .LBB7_10
; MCU-NEXT: .LBB7_11:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
; MCU-NEXT: movl (%edi), %edi
; MCU-NEXT: jne .LBB7_13
; MCU-NEXT: .LBB7_14:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
; MCU-NEXT: movl (%ebx), %ebx
; MCU-NEXT: jne .LBB7_16
; MCU-NEXT: .LBB7_17:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
; MCU-NEXT: .LBB7_18:
; MCU-NEXT: movl (%ebp), %ebp
; MCU-NEXT: decl %ebp
; MCU-NEXT: decl %ebx
; MCU-NEXT: decl %edi
; MCU-NEXT: decl %esi
; MCU-NEXT: decl %ecx
; MCU-NEXT: decl %eax
; MCU-NEXT: movl %eax, 20(%edx)
; MCU-NEXT: movl %ecx, 16(%edx)
; MCU-NEXT: movl %esi, 12(%edx)
; MCU-NEXT: movl %edi, 8(%edx)
; MCU-NEXT: movl %ebx, 4(%edx)
; MCU-NEXT: movl %ebp, (%edx)
; MCU-NEXT: popl %esi
; MCU-NEXT: popl %edi
; MCU-NEXT: popl %ebx
; MCU-NEXT: popl %ebp
; MCU-NEXT: retl
%x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
%val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
store <6 x i32> %val, <6 x i32>* %dst.addr
ret void
}
;; Test integer select between values and constants.
define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test9:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test9:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: je LBB8_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB8_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: jne .LBB8_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
; MCU-NEXT: .LBB8_1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
}
;; Same as test9
define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test9a:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test9a:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: je LBB9_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB9_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9a:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB9_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB9_2:
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
}
define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; GENERIC-LABEL: test9b:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpq $1, %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: orq %rsi, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test9b:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpq $1, %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: orq %rsi, %rax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test9b:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %edx, %edx
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sete %dl
; ATHLON-NEXT: negl %edx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %edx, %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9b:
; MCU: # %bb.0:
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: sete %dl
; MCU-NEXT: negl %edx
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: orl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%A = sext i1 %cmp to i64
%cond = or i64 %y, %A
ret i64 %cond
}
;; Select between -1 and 1.
define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test10:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testq %rdi, %rdi
; CHECK-NEXT: setne %al
; CHECK-NEXT: leaq -1(%rax,%rax), %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test10:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %edx, %edx
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %ecx
; ATHLON-NEXT: movl $1, %eax
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: cmovel %ecx, %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test10:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB11_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: movl $1, %eax
; MCU-NEXT: .LBB11_2:
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 1
ret i64 %cond
}
define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test11:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: jne LBB12_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB12_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test11:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: je .LBB12_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
; MCU-NEXT: .LBB12_1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
}
define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11a:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test11a:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: jne LBB13_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB13_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test11a:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: jne .LBB13_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB13_2:
; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
}
define i32 @test13(i32 %a, i32 %b) nounwind {
; GENERIC-LABEL: test13:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test13:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl %esi, %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test13:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test13:
; MCU: # %bb.0:
; MCU-NEXT: cmpl %edx, %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
%c = icmp ult i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
}
define i32 @test14(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test14:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: setae %al
; CHECK-NEXT: negl %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test14:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: setae %al
; ATHLON-NEXT: negl %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test14:
; MCU: # %bb.0:
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: cmpl %edx, %eax
; MCU-NEXT: setae %cl
; MCU-NEXT: negl %ecx
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: retl
%c = icmp uge i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
}
; rdar://10961709
define i32 @test15(i32 %x) nounwind {
; GENERIC-LABEL: test15:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negl %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test15:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negl %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test15:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test15:
; MCU: # %bb.0: # %entry
; MCU-NEXT: negl %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i32 %x, 0
%sub = sext i1 %cmp to i32
ret i32 %sub
}
define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
; GENERIC-LABEL: test16:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negq %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test16:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negq %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test16:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: setne %al
; ATHLON-NEXT: negl %eax
; ATHLON-NEXT: movl %eax, %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test16:
; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: xorl %eax, %eax
; MCU-NEXT: orl %edx, %ecx
; MCU-NEXT: setne %al
; MCU-NEXT: negl %eax
; MCU-NEXT: movl %eax, %edx
; MCU-NEXT: retl
entry:
%cmp = icmp ne i64 %x, 0
%conv1 = sext i1 %cmp to i64
ret i64 %conv1
}
define i16 @test17(i16 %x) nounwind {
; GENERIC-LABEL: test17:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negw %di
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: ## kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negw %di
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: ## kill: def $ax killed $ax killed $eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test17:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpw {{[0-9]+}}(%esp), %ax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test17:
; MCU: # %bb.0: # %entry
; MCU-NEXT: negw %ax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
%sub = sext i1 %cmp to i16
ret i16 %sub
}
define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; GENERIC-LABEL: test18:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: movl %esi, %eax
; GENERIC-NEXT: cmpl $15, %edi
; GENERIC-NEXT: cmovgel %edx, %eax
; GENERIC-NEXT: ## kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test18:
; ATOM: ## %bb.0:
; ATOM-NEXT: movl %esi, %eax
; ATOM-NEXT: cmpl $15, %edi
; ATOM-NEXT: cmovgel %edx, %eax
; ATOM-NEXT: ## kill: def $al killed $al killed $eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test18:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: cmpl $15, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmovll %eax, %ecx
; ATHLON-NEXT: movb (%ecx), %al
; ATHLON-NEXT: retl
;
; MCU-LABEL: test18:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $15, %eax
; MCU-NEXT: jl .LBB19_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %ecx, %edx
; MCU-NEXT: .LBB19_2:
; MCU-NEXT: movl %edx, %eax
; MCU-NEXT: retl
%cmp = icmp slt i32 %x, 15
%sel = select i1 %cmp, i8 %a, i8 %b
ret i8 %sel
}
define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; CHECK-LABEL: trunc_select_miscompile:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: orb $2, %cl
; CHECK-NEXT: ## kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: shll %cl, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: trunc_select_miscompile:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movb {{[0-9]+}}(%esp), %cl
; ATHLON-NEXT: orb $2, %cl
; ATHLON-NEXT: shll %cl, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: trunc_select_miscompile:
; MCU: # %bb.0:
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: orb $2, %cl
; MCU-NEXT: # kill: def $cl killed $cl killed $ecx
; MCU-NEXT: shll %cl, %eax
; MCU-NEXT: retl
%tmp1 = select i1 %cc, i32 3, i32 2
%tmp2 = shl i32 %a, %tmp1
ret i32 %tmp2
}
; reproducer for pr29002
define void @clamp_i8(i32 %src, i8* %dst) {
; GENERIC-LABEL: clamp_i8:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $127, %edi
; GENERIC-NEXT: movl $127, %eax
; GENERIC-NEXT: cmovlel %edi, %eax
; GENERIC-NEXT: cmpl $-128, %eax
; GENERIC-NEXT: movb $-128, %cl
; GENERIC-NEXT: jl LBB21_2
; GENERIC-NEXT: ## %bb.1:
; GENERIC-NEXT: movl %eax, %ecx
; GENERIC-NEXT: LBB21_2:
; GENERIC-NEXT: movb %cl, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp_i8:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $127, %edi
; ATOM-NEXT: movl $127, %eax
; ATOM-NEXT: movb $-128, %cl
; ATOM-NEXT: cmovlel %edi, %eax
; ATOM-NEXT: cmpl $-128, %eax
; ATOM-NEXT: jl LBB21_2
; ATOM-NEXT: ## %bb.1:
; ATOM-NEXT: movl %eax, %ecx
; ATOM-NEXT: LBB21_2:
; ATOM-NEXT: movb %cl, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: clamp_i8:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmpl $127, %edx
; ATHLON-NEXT: movl $127, %ecx
; ATHLON-NEXT: cmovlel %edx, %ecx
; ATHLON-NEXT: cmpl $-128, %ecx
; ATHLON-NEXT: movb $-128, %dl
; ATHLON-NEXT: jl LBB21_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl %ecx, %edx
; ATHLON-NEXT: LBB21_2:
; ATHLON-NEXT: movb %dl, (%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: clamp_i8:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $127, %eax
; MCU-NEXT: movl $127, %ecx
; MCU-NEXT: jg .LBB21_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: .LBB21_2:
; MCU-NEXT: cmpl $-128, %ecx
; MCU-NEXT: movb $-128, %al
; MCU-NEXT: jl .LBB21_4
; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: .LBB21_4:
; MCU-NEXT: movb %al, (%edx)
; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 127
%sel1 = select i1 %cmp, i32 127, i32 %src
%cmp1 = icmp slt i32 %sel1, -128
%sel2 = select i1 %cmp1, i32 -128, i32 %sel1
%conv = trunc i32 %sel2 to i8
store i8 %conv, i8* %dst, align 2
ret void
}
; reproducer for pr29002
define void @clamp(i32 %src, i16* %dst) {
; GENERIC-LABEL: clamp:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; GENERIC-NEXT: movl $32767, %eax ## imm = 0x7FFF
; GENERIC-NEXT: cmovlel %edi, %eax
; GENERIC-NEXT: cmpl $-32768, %eax ## imm = 0x8000
; GENERIC-NEXT: movl $32768, %ecx ## imm = 0x8000
; GENERIC-NEXT: cmovgel %eax, %ecx
; GENERIC-NEXT: movw %cx, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; ATOM-NEXT: movl $32767, %eax ## imm = 0x7FFF
; ATOM-NEXT: movl $32768, %ecx ## imm = 0x8000
; ATOM-NEXT: cmovlel %edi, %eax
; ATOM-NEXT: cmpl $-32768, %eax ## imm = 0x8000
; ATOM-NEXT: cmovgel %eax, %ecx
; ATOM-NEXT: movw %cx, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: clamp:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmpl $32767, %ecx ## imm = 0x7FFF
; ATHLON-NEXT: movl $32767, %edx ## imm = 0x7FFF
; ATHLON-NEXT: cmovlel %ecx, %edx
; ATHLON-NEXT: cmpl $-32768, %edx ## imm = 0x8000
; ATHLON-NEXT: movl $32768, %ecx ## imm = 0x8000
; ATHLON-NEXT: cmovgel %edx, %ecx
; ATHLON-NEXT: movw %cx, (%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: clamp:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $32767, %eax # imm = 0x7FFF
; MCU-NEXT: movl $32767, %ecx # imm = 0x7FFF
; MCU-NEXT: jg .LBB22_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: .LBB22_2:
; MCU-NEXT: cmpl $-32768, %ecx # imm = 0x8000
; MCU-NEXT: movl $32768, %eax # imm = 0x8000
; MCU-NEXT: jl .LBB22_4
; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: .LBB22_4:
; MCU-NEXT: movw %ax, (%edx)
; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 32767
%sel1 = select i1 %cmp, i32 32767, i32 %src
%cmp1 = icmp slt i32 %sel1, -32768
%sel2 = select i1 %cmp1, i32 -32768, i32 %sel1
%conv = trunc i32 %sel2 to i16
store i16 %conv, i16* %dst, align 2
ret void
}
define void @test19() {
; This is a massive reduction of an llvm-stress test case that generates
; interesting chains feeding setcc and eventually a f32 select operation. This
; is intended to exercise the SELECT formation in the DAG combine simplifying
; a simplified select_cc node. If it it regresses and is no longer triggering
; that code path, it can be deleted.
;
; CHECK-LABEL: test19:
; CHECK: ## %bb.0: ## %BB
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: movb $1, %cl
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: LBB23_1: ## %CF
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: jne LBB23_1
; CHECK-NEXT: ## %bb.2: ## %CF250
; CHECK-NEXT: ## in Loop: Header=BB23_1 Depth=1
; CHECK-NEXT: jne LBB23_1
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: LBB23_3: ## %CF242
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: cmpl %eax, %eax
; CHECK-NEXT: ucomiss %xmm0, %xmm0
; CHECK-NEXT: jp LBB23_3
; CHECK-NEXT: ## %bb.4: ## %CF244
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test19:
; ATHLON: ## %bb.0: ## %BB
; ATHLON-NEXT: movb $1, %al
; ATHLON-NEXT: .p2align 4, 0x90
; ATHLON-NEXT: LBB23_1: ## %CF
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
; ATHLON-NEXT: testb %al, %al
; ATHLON-NEXT: jne LBB23_1
; ATHLON-NEXT: ## %bb.2: ## %CF250
; ATHLON-NEXT: ## in Loop: Header=BB23_1 Depth=1
; ATHLON-NEXT: jne LBB23_1
; ATHLON-NEXT: ## %bb.3: ## %CF242.preheader
; ATHLON-NEXT: fldz
; ATHLON-NEXT: .p2align 4, 0x90
; ATHLON-NEXT: LBB23_4: ## %CF242
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
; ATHLON-NEXT: fucomi %st(0)
; ATHLON-NEXT: jp LBB23_4
; ATHLON-NEXT: ## %bb.5: ## %CF244
; ATHLON-NEXT: fstp %st(0)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test19:
; MCU: # %bb.0: # %BB
; MCU-NEXT: movl $-1, %ecx
; MCU-NEXT: movb $1, %al
; MCU-NEXT: .p2align 4, 0x90
; MCU-NEXT: .LBB23_1: # %CF
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
; MCU-NEXT: testb %al, %al
; MCU-NEXT: jne .LBB23_1
; MCU-NEXT: # %bb.2: # %CF250
; MCU-NEXT: # in Loop: Header=BB23_1 Depth=1
; MCU-NEXT: jne .LBB23_1
; MCU-NEXT: # %bb.3: # %CF242.preheader
; MCU-NEXT: fldz
; MCU-NEXT: .p2align 4, 0x90
; MCU-NEXT: .LBB23_4: # %CF242
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
; MCU-NEXT: cmpl %eax, %ecx
; MCU-NEXT: fucom %st(0)
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
; MCU-NEXT: jp .LBB23_4
; MCU-NEXT: # %bb.5: # %CF244
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: retl
BB:
br label %CF
CF:
%Cmp10 = icmp ule i8 undef, undef
br i1 %Cmp10, label %CF, label %CF250
CF250:
%E12 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
%Cmp32 = icmp ugt i1 %Cmp10, false
br i1 %Cmp32, label %CF, label %CF242
CF242:
%Cmp38 = icmp uge i32 %E12, undef
%FC = uitofp i1 %Cmp38 to float
%Sl59 = select i1 %Cmp32, float %FC, float undef
%Cmp60 = fcmp ugt float undef, undef
br i1 %Cmp60, label %CF242, label %CF244
CF244:
%B122 = fadd float %Sl59, undef
ret void
}
define i16 @select_xor_1(i16 %A, i8 %cond) {
; CHECK-LABEL: select_xor_1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl $43, %eax
; CHECK-NEXT: testb $1, %sil
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_1:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl %ecx, %eax
; ATHLON-NEXT: xorl $43, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_1:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %edx
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
; MCU-NEXT: xorl %edx, %eax
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = xor i16 %A, 43
%1 = select i1 %cmp10, i16 %A, i16 %0
ret i16 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i16 @select_xor_1b(i16 %A, i8 %cond) {
; CHECK-LABEL: select_xor_1b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl $43, %eax
; CHECK-NEXT: testb $1, %sil
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_1b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl %ecx, %eax
; ATHLON-NEXT: xorl $43, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_1b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %dl
; MCU-NEXT: je .LBB25_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl $43, %eax
; MCU-NEXT: .LBB25_2: # %entry
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = xor i16 %A, 43
%1 = select i1 %cmp10, i16 %A, i16 %0
ret i16 %1
}
define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_xor_2:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_2:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_2:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: xorl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = xor i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_xor_2b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_2b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_2b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: je .LBB27_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl %edx, %eax
; MCU-NEXT: .LBB27_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = xor i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_or:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_or_b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: je .LBB29_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: .LBB29_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; CHECK-LABEL: select_or_1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_1:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_1:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i32 %cond, 1
%cmp10 = icmp eq i32 %and, 0
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
; CHECK-LABEL: select_or_1b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_1b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_1b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: je .LBB31_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: .LBB31_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i32 %cond, 1
%cmp10 = icmp ne i32 %and, 1
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/sincos-opt.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/sincos-opt.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/sincos-opt.ll (revision 344166)
@@ -1,151 +1,151 @@
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_SINCOS
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_NOOPT
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; Combine sin / cos into a single call unless they may write errno (as
; captured by readnone attrbiute, controlled by clang -fmath-errno
; setting).
; rdar://13087969
; rdar://13599493
define float @test1(float %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test1:
; GNU_SINCOS: callq sincosf
; GNU_SINCOS: movss 4(%rsp), %xmm0
; GNU_SINCOS: addss (%rsp), %xmm0
; GNU_SINCOS_FASTMATH-LABEL: test1:
; GNU_SINCOS_FASTMATH: callq sincosf
; GNU_SINCOS_FASTMATH: movss 4(%{{[re]}}sp), %xmm0
; GNU_SINCOS_FASTMATH: addss (%{{[re]}}sp), %xmm0
; OSX_SINCOS-LABEL: test1:
; OSX_SINCOS: callq ___sincosf_stret
; OSX_SINCOS: movshdup {{.*}} xmm1 = xmm0[1,1,3,3]
; OSX_SINCOS: addss %xmm1, %xmm0
; OSX_NOOPT-LABEL: test1:
; OSX_NOOPT: callq _sinf
; OSX_NOOPT: callq _cosf
%call = tail call float @sinf(float %x) readnone
%call1 = tail call float @cosf(float %x) readnone
%add = fadd float %call, %call1
ret float %add
}
define float @test1_errno(float %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test1_errno:
; GNU_SINCOS: callq sinf
; GNU_SINCOS: callq cosf
; GNU_SINCOS_FASTMATH-LABEL: test1_errno:
; GNU_SINCOS_FASTMATH: callq sinf
; GNU_SINCOS_FASTMATH: callq cosf
; OSX_SINCOS-LABEL: test1_errno:
; OSX_SINCOS: callq _sinf
; OSX_SINCOS: callq _cosf
; OSX_NOOPT-LABEL: test1_errno:
; OSX_NOOPT: callq _sinf
; OSX_NOOPT: callq _cosf
%call = tail call float @sinf(float %x)
%call1 = tail call float @cosf(float %x)
%add = fadd float %call, %call1
ret float %add
}
define double @test2(double %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test2:
; GNU_SINCOS: callq sincos
; GNU_SINCOS: movsd 16(%rsp), %xmm0
; GNU_SINCOS: addsd 8(%rsp), %xmm0
; GNU_SINCOS_FASTMATH-LABEL: test2:
; GNU_SINCOS_FASTMATH: callq sincos
; GNU_SINCOS_FASTMATH: movsd 16(%{{[re]}}sp), %xmm0
; GNU_SINCOS_FASTMATH: addsd 8(%{{[re]}}sp), %xmm0
; OSX_SINCOS-LABEL: test2:
; OSX_SINCOS: callq ___sincos_stret
; OSX_SINCOS: addsd %xmm1, %xmm0
; OSX_NOOPT-LABEL: test2:
; OSX_NOOPT: callq _sin
; OSX_NOOPT: callq _cos
%call = tail call double @sin(double %x) readnone
%call1 = tail call double @cos(double %x) readnone
%add = fadd double %call, %call1
ret double %add
}
define double @test2_errno(double %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test2_errno:
; GNU_SINCOS: callq sin
; GNU_SINCOS: callq cos
; GNU_SINCOS_FASTMATH-LABEL: test2_errno:
; GNU_SINCOS_FASTMATH: callq sin
; GNU_SINCOS_FASTMATH: callq cos
; OSX_SINCOS-LABEL: test2_errno:
; OSX_SINCOS: callq _sin
; OSX_SINCOS: callq _cos
; OSX_NOOPT-LABEL: test2_errno:
; OSX_NOOPT: callq _sin
; OSX_NOOPT: callq _cos
%call = tail call double @sin(double %x)
%call1 = tail call double @cos(double %x)
%add = fadd double %call, %call1
ret double %add
}
define x86_fp80 @test3(x86_fp80 %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test3:
; GNU_SINCOS: callq sincosl
; GNU_SINCOS: fldt 16(%rsp)
; GNU_SINCOS: fldt 32(%rsp)
-; GNU_SINCOS: faddp %st(1)
+; GNU_SINCOS: faddp %st, %st(1)
; GNU_SINCOS_FASTMATH-LABEL: test3:
; GNU_SINCOS_FASTMATH: callq sincosl
; GNU_SINCOS_FASTMATH: fldt 16(%{{[re]}}sp)
; GNU_SINCOS_FASTMATH: fldt 32(%{{[re]}}sp)
-; GNU_SINCOS_FASTMATH: faddp %st(1)
+; GNU_SINCOS_FASTMATH: faddp %st, %st(1)
%call = tail call x86_fp80 @sinl(x86_fp80 %x) readnone
%call1 = tail call x86_fp80 @cosl(x86_fp80 %x) readnone
%add = fadd x86_fp80 %call, %call1
ret x86_fp80 %add
}
define x86_fp80 @test3_errno(x86_fp80 %x) nounwind {
entry:
; GNU_SINCOS-LABEL: test3_errno:
; GNU_SINCOS: callq sinl
; GNU_SINCOS: callq cosl
; GNU_SINCOS_FASTMATH-LABEL: test3_errno:
; GNU_SINCOS_FASTMATH: callq sinl
; GNU_SINCOS_FASTMATH: callq cosl
%call = tail call x86_fp80 @sinl(x86_fp80 %x)
%call1 = tail call x86_fp80 @cosl(x86_fp80 %x)
%add = fadd x86_fp80 %call, %call1
ret x86_fp80 %add
}
declare float @sinf(float)
declare double @sin(double)
declare float @cosf(float)
declare double @cos(double)
declare x86_fp80 @sinl(x86_fp80)
declare x86_fp80 @cosl(x86_fp80)
Index: vendor/llvm/dist-release_80/test/CodeGen/X86/x87-schedule.ll
===================================================================
--- vendor/llvm/dist-release_80/test/CodeGen/X86/x87-schedule.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/CodeGen/X86/x87-schedule.ll (revision 344166)
@@ -1,6420 +1,6420 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=i686 | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=broadwell | FileCheck %s --check-prefix=CHECK --check-prefix=BROADWELL
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=SKYLAKE
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=bdver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BDVER2
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
; RUN: llc < %s -mtriple=i686-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define void @test_f2xm1() optsize {
; GENERIC-LABEL: test_f2xm1:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: f2xm1
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_f2xm1:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: f2xm1 # sched: [99:49.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_f2xm1:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: f2xm1 # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_f2xm1:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: f2xm1 # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_f2xm1:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: f2xm1 # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_f2xm1:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: f2xm1 # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_f2xm1:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: f2xm1 # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_f2xm1:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: f2xm1 # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_f2xm1:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: f2xm1 # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_f2xm1:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: f2xm1 # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_f2xm1:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: f2xm1 # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "f2xm1", ""() nounwind
ret void
}
define void @test_fabs() optsize {
; GENERIC-LABEL: test_fabs:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fabs
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fabs:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fabs # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fabs:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fabs # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fabs:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fabs # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fabs:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fabs # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fabs:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fabs # sched: [1:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fabs:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fabs # sched: [1:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fabs:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fabs # sched: [1:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fabs:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fabs # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fabs:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fabs # sched: [2:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fabs:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fabs # sched: [2:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fabs", ""() nounwind
ret void
}
define void @test_fadd(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fadd:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fadd %st(0), %st(1)
-; GENERIC-NEXT: fadd %st(2)
+; GENERIC-NEXT: fadd %st, %st(1)
+; GENERIC-NEXT: fadd %st(2), %st
; GENERIC-NEXT: fadds (%ecx)
; GENERIC-NEXT: faddl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fadd:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fadd %st(0), %st(1) # sched: [5:5.00]
-; ATOM-NEXT: fadd %st(2) # sched: [5:5.00]
+; ATOM-NEXT: fadd %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: fadd %st(2), %st # sched: [5:5.00]
; ATOM-NEXT: fadds (%ecx) # sched: [5:5.00]
; ATOM-NEXT: faddl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fadd:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; SLM-NEXT: fadd %st(2) # sched: [3:1.00]
+; SLM-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: fadd %st(2), %st # sched: [3:1.00]
; SLM-NEXT: fadds (%ecx) # sched: [6:1.00]
; SLM-NEXT: faddl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fadd:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; SANDY-NEXT: fadd %st(2) # sched: [3:1.00]
+; SANDY-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: fadd %st(2), %st # sched: [3:1.00]
; SANDY-NEXT: fadds (%ecx) # sched: [10:1.00]
; SANDY-NEXT: faddl (%eax) # sched: [10:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fadd:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: fadd %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: fadd %st(2), %st # sched: [3:1.00]
; HASWELL-NEXT: fadds (%ecx) # sched: [10:1.00]
; HASWELL-NEXT: faddl (%eax) # sched: [10:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fadd:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: fadd %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: fadd %st(2), %st # sched: [3:1.00]
; BROADWELL-NEXT: fadds (%ecx) # sched: [9:1.00]
; BROADWELL-NEXT: faddl (%eax) # sched: [9:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fadd:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: fadd %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: fadd %st(2), %st # sched: [3:1.00]
; SKYLAKE-NEXT: fadds (%ecx) # sched: [10:1.00]
; SKYLAKE-NEXT: faddl (%eax) # sched: [10:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fadd:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; SKX-NEXT: fadd %st(2) # sched: [3:1.00]
+; SKX-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: fadd %st(2), %st # sched: [3:1.00]
; SKX-NEXT: fadds (%ecx) # sched: [10:1.00]
; SKX-NEXT: faddl (%eax) # sched: [10:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fadd:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fadd %st(0), %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fadd %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fadd %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fadd %st(2), %st # sched: [5:1.00]
; BDVER2-NEXT: fadds (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: faddl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fadd:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: fadd %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: fadd %st(2), %st # sched: [3:1.00]
; BTVER2-NEXT: fadds (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: faddl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fadd:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fadd %st(0), %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: fadd %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: fadd %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: fadd %st(2), %st # sched: [3:1.00]
; ZNVER1-NEXT: fadds (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: faddl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fadd %st(0), %st(1) \0A\09 fadd %st(2), %st(0) \0A\09 fadds $0 \0A\09 faddl $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_faddp_fiadd:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: faddp %st(1)
-; GENERIC-NEXT: faddp %st(2)
+; GENERIC-NEXT: faddp %st, %st(1)
+; GENERIC-NEXT: faddp %st, %st(2)
; GENERIC-NEXT: fiadds (%ecx)
; GENERIC-NEXT: fiaddl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_faddp_fiadd:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: faddp %st(1) # sched: [5:5.00]
-; ATOM-NEXT: faddp %st(2) # sched: [5:5.00]
+; ATOM-NEXT: faddp %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: faddp %st, %st(2) # sched: [5:5.00]
; ATOM-NEXT: fiadds (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fiaddl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_faddp_fiadd:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: faddp %st(1) # sched: [3:1.00]
-; SLM-NEXT: faddp %st(2) # sched: [3:1.00]
+; SLM-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; SLM-NEXT: fiadds (%ecx) # sched: [6:1.00]
; SLM-NEXT: fiaddl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_faddp_fiadd:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: faddp %st(1) # sched: [3:1.00]
-; SANDY-NEXT: faddp %st(2) # sched: [3:1.00]
+; SANDY-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; SANDY-NEXT: fiadds (%ecx) # sched: [13:2.00]
; SANDY-NEXT: fiaddl (%eax) # sched: [13:2.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_faddp_fiadd:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: faddp %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: faddp %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; HASWELL-NEXT: fiadds (%ecx) # sched: [13:2.00]
; HASWELL-NEXT: fiaddl (%eax) # sched: [13:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_faddp_fiadd:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: faddp %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: faddp %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; BROADWELL-NEXT: fiadds (%ecx) # sched: [12:2.00]
; BROADWELL-NEXT: fiaddl (%eax) # sched: [12:2.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_faddp_fiadd:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: faddp %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: faddp %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; SKYLAKE-NEXT: fiadds (%ecx) # sched: [13:2.00]
; SKYLAKE-NEXT: fiaddl (%eax) # sched: [13:2.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_faddp_fiadd:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: faddp %st(1) # sched: [3:1.00]
-; SKX-NEXT: faddp %st(2) # sched: [3:1.00]
+; SKX-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; SKX-NEXT: fiadds (%ecx) # sched: [13:2.00]
; SKX-NEXT: fiaddl (%eax) # sched: [13:2.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_faddp_fiadd:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: faddp %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: faddp %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: faddp %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: faddp %st, %st(2) # sched: [5:1.00]
; BDVER2-NEXT: fiadds (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fiaddl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_faddp_fiadd:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: faddp %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: faddp %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; BTVER2-NEXT: fiadds (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fiaddl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_faddp_fiadd:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: faddp %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: faddp %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: faddp %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: faddp %st, %st(2) # sched: [3:1.00]
; ZNVER1-NEXT: fiadds (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: fiaddl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "faddp \0A\09 faddp %st(2), %st(0) \0A\09 fiadds $0 \0A\09 fiaddl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_fbld_fbstp(i8* %a0) optsize {
; GENERIC-LABEL: test_fbld_fbstp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fbld (%eax)
; GENERIC-NEXT: fbstp (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fbld_fbstp:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fbld (%eax) # sched: [100:0.50]
; ATOM-NEXT: fbstp (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fbld_fbstp:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fbld (%eax) # sched: [100:1.00]
; SLM-NEXT: fbstp (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fbld_fbstp:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fbld (%eax) # sched: [100:0.33]
; SANDY-NEXT: fbstp (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fbld_fbstp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fbld (%eax) # sched: [47:10.75]
; HASWELL-NEXT: fbstp (%eax) # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fbld_fbstp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fbld (%eax) # sched: [100:0.25]
; BROADWELL-NEXT: fbstp (%eax) # sched: [1:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fbld_fbstp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fbld (%eax) # sched: [100:0.25]
; SKYLAKE-NEXT: fbstp (%eax) # sched: [1:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fbld_fbstp:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fbld (%eax) # sched: [100:0.25]
; SKX-NEXT: fbstp (%eax) # sched: [1:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fbld_fbstp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fbld (%eax) # sched: [100:0.50]
; BDVER2-NEXT: fbstp (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fbld_fbstp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fbld (%eax) # sched: [100:0.50]
; BTVER2-NEXT: fbstp (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fbld_fbstp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fbld (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: fbstp (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fbld $0 \0A\09 fbstp $0", "*m"(i8 *%a0) nounwind
ret void
}
define void @test_fchs() optsize {
; GENERIC-LABEL: test_fchs:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fchs
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fchs:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fchs # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fchs:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fchs # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fchs:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fchs # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fchs:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fchs # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fchs:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fchs # sched: [1:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fchs:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fchs # sched: [1:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fchs:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fchs # sched: [1:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fchs:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fchs # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fchs:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fchs # sched: [2:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fchs:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fchs # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fchs", ""() nounwind
ret void
}
define void @test_fclex() optsize {
; GENERIC-LABEL: test_fclex:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnclex
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fclex:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnclex # sched: [25:12.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fclex:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnclex # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fclex:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnclex # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fclex:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fnclex # sched: [4:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fclex:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnclex # sched: [4:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fclex:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnclex # sched: [4:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fclex:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnclex # sched: [4:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fclex:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fnclex # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fclex:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fnclex # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fclex:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnclex # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fclex", ""() nounwind
ret void
}
define void @test_fnclex() optsize {
; GENERIC-LABEL: test_fnclex:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fnclex
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fnclex:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fnclex # sched: [25:12.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fnclex:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fnclex # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fnclex:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fnclex # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fnclex:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fnclex # sched: [4:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fnclex:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fnclex # sched: [4:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fnclex:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fnclex # sched: [4:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fnclex:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fnclex # sched: [4:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fnclex:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fnclex # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fnclex:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fnclex # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fnclex:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fnclex # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fnclex", ""() nounwind
ret void
}
define void @test_fcmov() optsize {
; GENERIC-LABEL: test_fcmov:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fcmovb %st(1), %st(0)
-; GENERIC-NEXT: fcmovbe %st(1), %st(0)
-; GENERIC-NEXT: fcmove %st(1), %st(0)
-; GENERIC-NEXT: fcmovnb %st(1), %st(0)
-; GENERIC-NEXT: fcmovnbe %st(1), %st(0)
-; GENERIC-NEXT: fcmovne %st(1), %st(0)
-; GENERIC-NEXT: fcmovnu %st(1), %st(0)
-; GENERIC-NEXT: fcmovu %st(1), %st(0)
+; GENERIC-NEXT: fcmovb %st(1), %st
+; GENERIC-NEXT: fcmovbe %st(1), %st
+; GENERIC-NEXT: fcmove %st(1), %st
+; GENERIC-NEXT: fcmovnb %st(1), %st
+; GENERIC-NEXT: fcmovnbe %st(1), %st
+; GENERIC-NEXT: fcmovne %st(1), %st
+; GENERIC-NEXT: fcmovnu %st(1), %st
+; GENERIC-NEXT: fcmovu %st(1), %st
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcmov:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
-; ATOM-NEXT: fcmovb %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovbe %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmove %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovnb %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovnbe %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovne %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovnu %st(1), %st(0) # sched: [9:4.50]
-; ATOM-NEXT: fcmovu %st(1), %st(0) # sched: [9:4.50]
+; ATOM-NEXT: fcmovb %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovbe %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmove %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovnb %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovnbe %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovne %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovnu %st(1), %st # sched: [9:4.50]
+; ATOM-NEXT: fcmovu %st(1), %st # sched: [9:4.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcmov:
; SLM: # %bb.0:
; SLM-NEXT: #APP
-; SLM-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; SLM-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; SLM-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; SLM-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcmov:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
-; SANDY-NEXT: fcmovb %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovbe %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmove %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovnb %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovne %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovnu %st(1), %st(0) # sched: [3:2.00]
-; SANDY-NEXT: fcmovu %st(1), %st(0) # sched: [3:2.00]
+; SANDY-NEXT: fcmovb %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovbe %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmove %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovnb %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovnbe %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovne %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovnu %st(1), %st # sched: [3:2.00]
+; SANDY-NEXT: fcmovu %st(1), %st # sched: [3:2.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fcmov:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; HASWELL-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; HASWELL-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; HASWELL-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fcmov:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; BROADWELL-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcmov:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; SKYLAKE-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; SKYLAKE-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcmov:
; SKX: # %bb.0:
; SKX-NEXT: #APP
-; SKX-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; SKX-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; SKX-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; SKX-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fcmov:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fcmovb %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovbe %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmove %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovnb %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovnbe %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovne %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovnu %st(1), %st(0) # sched: [1:1.00]
-; BDVER2-NEXT: fcmovu %st(1), %st(0) # sched: [1:1.00]
+; BDVER2-NEXT: fcmovb %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovbe %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmove %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovnb %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovnbe %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovne %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovnu %st(1), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcmovu %st(1), %st # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fcmov:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fcmovb %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovbe %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmove %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovnb %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovnbe %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovne %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovnu %st(1), %st(0) # sched: [3:1.00]
-; BTVER2-NEXT: fcmovu %st(1), %st(0) # sched: [3:1.00]
+; BTVER2-NEXT: fcmovb %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovbe %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmove %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovnb %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovnbe %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovne %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovnu %st(1), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcmovu %st(1), %st # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcmov:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fcmovb %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovbe %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmove %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovnb %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovnbe %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovne %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovnu %st(1), %st(0) # sched: [100:0.25]
-; ZNVER1-NEXT: fcmovu %st(1), %st(0) # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovb %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovbe %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmove %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovnb %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovnbe %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovne %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovnu %st(1), %st # sched: [100:0.25]
+; ZNVER1-NEXT: fcmovu %st(1), %st # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fcmovb %st(1), %st(0) \0A\09 fcmovbe %st(1), %st(0) \0A\09 fcmove %st(1), %st(0) \0A\09 fcmovnb %st(1), %st(0) \0A\09 fcmovnbe %st(1), %st(0) \0A\09 fcmovne %st(1), %st(0) \0A\09 fcmovnu %st(1), %st(0) \0A\09 fcmovu %st(1), %st(0)", ""() nounwind
ret void
}
define void @test_fcom(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fcom:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcom %st(1)
; GENERIC-NEXT: fcom %st(3)
; GENERIC-NEXT: fcoms (%ecx)
; GENERIC-NEXT: fcoml (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcom:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fcom %st(1) # sched: [5:5.00]
; ATOM-NEXT: fcom %st(3) # sched: [5:5.00]
; ATOM-NEXT: fcoms (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fcoml (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcom:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fcom %st(1) # sched: [3:1.00]
; SLM-NEXT: fcom %st(3) # sched: [3:1.00]
; SLM-NEXT: fcoms (%ecx) # sched: [6:1.00]
; SLM-NEXT: fcoml (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcom:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fcom %st(1) # sched: [1:1.00]
; SANDY-NEXT: fcom %st(3) # sched: [1:1.00]
; SANDY-NEXT: fcoms (%ecx) # sched: [8:1.00]
; SANDY-NEXT: fcoml (%eax) # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fcom:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcom %st(1) # sched: [1:1.00]
; HASWELL-NEXT: fcom %st(3) # sched: [1:1.00]
; HASWELL-NEXT: fcoms (%ecx) # sched: [8:1.00]
; HASWELL-NEXT: fcoml (%eax) # sched: [8:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fcom:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcom %st(1) # sched: [1:1.00]
; BROADWELL-NEXT: fcom %st(3) # sched: [1:1.00]
; BROADWELL-NEXT: fcoms (%ecx) # sched: [7:1.00]
; BROADWELL-NEXT: fcoml (%eax) # sched: [7:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcom:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcom %st(1) # sched: [1:1.00]
; SKYLAKE-NEXT: fcom %st(3) # sched: [1:1.00]
; SKYLAKE-NEXT: fcoms (%ecx) # sched: [8:1.00]
; SKYLAKE-NEXT: fcoml (%eax) # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcom:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fcom %st(1) # sched: [1:1.00]
; SKX-NEXT: fcom %st(3) # sched: [1:1.00]
; SKX-NEXT: fcoms (%ecx) # sched: [8:1.00]
; SKX-NEXT: fcoml (%eax) # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fcom:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fcom %st(1) # sched: [1:1.00]
; BDVER2-NEXT: fcom %st(3) # sched: [1:1.00]
; BDVER2-NEXT: fcoms (%ecx) # sched: [6:1.00]
; BDVER2-NEXT: fcoml (%eax) # sched: [6:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fcom:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcom %st(1) # sched: [3:1.00]
; BTVER2-NEXT: fcom %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fcoms (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fcoml (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcom:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcom %st(1) # sched: [1:1.00]
; ZNVER1-NEXT: fcom %st(3) # sched: [1:1.00]
; ZNVER1-NEXT: fcoms (%ecx) # sched: [8:1.00]
; ZNVER1-NEXT: fcoml (%eax) # sched: [8:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fcom \0A\09 fcom %st(3) \0A\09 fcoms $0 \0A\09 fcoml $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fcomp_fcompp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcomp %st(1)
; GENERIC-NEXT: fcomp %st(3)
; GENERIC-NEXT: fcomps (%ecx)
; GENERIC-NEXT: fcompl (%eax)
; GENERIC-NEXT: fcompp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcomp_fcompp:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fcomp %st(1) # sched: [5:5.00]
; ATOM-NEXT: fcomp %st(3) # sched: [5:5.00]
; ATOM-NEXT: fcomps (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fcompl (%eax) # sched: [5:5.00]
; ATOM-NEXT: fcompp # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcomp_fcompp:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fcomp %st(1) # sched: [3:1.00]
; SLM-NEXT: fcomp %st(3) # sched: [3:1.00]
; SLM-NEXT: fcomps (%ecx) # sched: [6:1.00]
; SLM-NEXT: fcompl (%eax) # sched: [6:1.00]
; SLM-NEXT: fcompp # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcomp_fcompp:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fcomp %st(1) # sched: [1:1.00]
; SANDY-NEXT: fcomp %st(3) # sched: [1:1.00]
; SANDY-NEXT: fcomps (%ecx) # sched: [8:1.00]
; SANDY-NEXT: fcompl (%eax) # sched: [8:1.00]
; SANDY-NEXT: fcompp # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fcomp_fcompp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcomp %st(1) # sched: [1:1.00]
; HASWELL-NEXT: fcomp %st(3) # sched: [1:1.00]
; HASWELL-NEXT: fcomps (%ecx) # sched: [8:1.00]
; HASWELL-NEXT: fcompl (%eax) # sched: [8:1.00]
; HASWELL-NEXT: fcompp # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fcomp_fcompp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcomp %st(1) # sched: [1:1.00]
; BROADWELL-NEXT: fcomp %st(3) # sched: [1:1.00]
; BROADWELL-NEXT: fcomps (%ecx) # sched: [7:1.00]
; BROADWELL-NEXT: fcompl (%eax) # sched: [7:1.00]
; BROADWELL-NEXT: fcompp # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcomp_fcompp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcomp %st(1) # sched: [1:1.00]
; SKYLAKE-NEXT: fcomp %st(3) # sched: [1:1.00]
; SKYLAKE-NEXT: fcomps (%ecx) # sched: [8:1.00]
; SKYLAKE-NEXT: fcompl (%eax) # sched: [8:1.00]
; SKYLAKE-NEXT: fcompp # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcomp_fcompp:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fcomp %st(1) # sched: [1:1.00]
; SKX-NEXT: fcomp %st(3) # sched: [1:1.00]
; SKX-NEXT: fcomps (%ecx) # sched: [8:1.00]
; SKX-NEXT: fcompl (%eax) # sched: [8:1.00]
; SKX-NEXT: fcompp # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fcomp_fcompp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fcomp %st(1) # sched: [1:1.00]
; BDVER2-NEXT: fcomp %st(3) # sched: [1:1.00]
; BDVER2-NEXT: fcomps (%ecx) # sched: [6:1.00]
; BDVER2-NEXT: fcompl (%eax) # sched: [6:1.00]
; BDVER2-NEXT: fcompp # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fcomp_fcompp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcomp %st(1) # sched: [3:1.00]
; BTVER2-NEXT: fcomp %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fcomps (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fcompl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: fcompp # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcomp_fcompp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcomp %st(1) # sched: [1:1.00]
; ZNVER1-NEXT: fcomp %st(3) # sched: [1:1.00]
; ZNVER1-NEXT: fcomps (%ecx) # sched: [8:1.00]
; ZNVER1-NEXT: fcompl (%eax) # sched: [8:1.00]
; ZNVER1-NEXT: fcompp # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fcomp \0A\09 fcomp %st(3) \0A\09 fcomps $0 \0A\09 fcompl $1 \0A\09 fcompp", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fcomi_fcomip() optsize {
; GENERIC-LABEL: test_fcomi_fcomip:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fcomi %st(3)
-; GENERIC-NEXT: fcompi %st(3)
+; GENERIC-NEXT: fcomi %st(3), %st
+; GENERIC-NEXT: fcompi %st(3), %st
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcomi_fcomip:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
-; ATOM-NEXT: fcomi %st(3) # sched: [9:4.50]
-; ATOM-NEXT: fcompi %st(3) # sched: [9:4.50]
+; ATOM-NEXT: fcomi %st(3), %st # sched: [9:4.50]
+; ATOM-NEXT: fcompi %st(3), %st # sched: [9:4.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcomi_fcomip:
; SLM: # %bb.0:
; SLM-NEXT: #APP
-; SLM-NEXT: fcomi %st(3) # sched: [3:1.00]
-; SLM-NEXT: fcompi %st(3) # sched: [3:1.00]
+; SLM-NEXT: fcomi %st(3), %st # sched: [3:1.00]
+; SLM-NEXT: fcompi %st(3), %st # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcomi_fcomip:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
-; SANDY-NEXT: fcomi %st(3) # sched: [3:1.00]
-; SANDY-NEXT: fcompi %st(3) # sched: [3:1.00]
+; SANDY-NEXT: fcomi %st(3), %st # sched: [3:1.00]
+; SANDY-NEXT: fcompi %st(3), %st # sched: [3:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fcomi_fcomip:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fcomi %st(3) # sched: [1:0.50]
-; HASWELL-NEXT: fcompi %st(3) # sched: [1:0.50]
+; HASWELL-NEXT: fcomi %st(3), %st # sched: [1:0.50]
+; HASWELL-NEXT: fcompi %st(3), %st # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fcomi_fcomip:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fcomi %st(3) # sched: [3:1.00]
-; BROADWELL-NEXT: fcompi %st(3) # sched: [3:1.00]
+; BROADWELL-NEXT: fcomi %st(3), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fcompi %st(3), %st # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcomi_fcomip:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fcomi %st(3) # sched: [2:1.00]
-; SKYLAKE-NEXT: fcompi %st(3) # sched: [2:1.00]
+; SKYLAKE-NEXT: fcomi %st(3), %st # sched: [2:1.00]
+; SKYLAKE-NEXT: fcompi %st(3), %st # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcomi_fcomip:
; SKX: # %bb.0:
; SKX-NEXT: #APP
-; SKX-NEXT: fcomi %st(3) # sched: [2:1.00]
-; SKX-NEXT: fcompi %st(3) # sched: [2:1.00]
+; SKX-NEXT: fcomi %st(3), %st # sched: [2:1.00]
+; SKX-NEXT: fcompi %st(3), %st # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fcomi_fcomip:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fcomi %st(3) # sched: [1:1.00]
-; BDVER2-NEXT: fcompi %st(3) # sched: [1:1.00]
+; BDVER2-NEXT: fcomi %st(3), %st # sched: [1:1.00]
+; BDVER2-NEXT: fcompi %st(3), %st # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fcomi_fcomip:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fcomi %st(3) # sched: [3:1.00]
-; BTVER2-NEXT: fcompi %st(3) # sched: [3:1.00]
+; BTVER2-NEXT: fcomi %st(3), %st # sched: [3:1.00]
+; BTVER2-NEXT: fcompi %st(3), %st # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcomi_fcomip:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fcomi %st(3) # sched: [9:0.50]
-; ZNVER1-NEXT: fcompi %st(3) # sched: [9:0.50]
+; ZNVER1-NEXT: fcomi %st(3), %st # sched: [9:0.50]
+; ZNVER1-NEXT: fcompi %st(3), %st # sched: [9:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fcomi %st(3) \0A\09 fcomip %st(3)", ""() nounwind
ret void
}
define void @test_fcos() optsize {
; GENERIC-LABEL: test_fcos:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcos
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcos:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fcos # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcos:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fcos # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcos:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fcos # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fcos:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcos # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fcos:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcos # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcos:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcos # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcos:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fcos # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fcos:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fcos # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fcos:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcos # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcos:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcos # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fcos", ""() nounwind
ret void
}
define void @test_fdecstp() optsize {
; GENERIC-LABEL: test_fdecstp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fdecstp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdecstp:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fdecstp # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdecstp:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fdecstp # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdecstp:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fdecstp # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fdecstp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fdecstp # sched: [2:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fdecstp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fdecstp # sched: [2:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdecstp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fdecstp # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdecstp:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fdecstp # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fdecstp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fdecstp # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fdecstp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fdecstp # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdecstp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fdecstp # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fdecstp", ""() nounwind
ret void
}
define void @test_fdiv(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fdiv:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fdiv %st(0), %st(1)
-; GENERIC-NEXT: fdiv %st(2)
+; GENERIC-NEXT: fdiv %st, %st(1)
+; GENERIC-NEXT: fdiv %st(2), %st
; GENERIC-NEXT: fdivs (%ecx)
; GENERIC-NEXT: fdivl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdiv:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fdiv %st(0), %st(1) # sched: [34:17.00]
-; ATOM-NEXT: fdiv %st(2) # sched: [34:17.00]
+; ATOM-NEXT: fdiv %st, %st(1) # sched: [34:17.00]
+; ATOM-NEXT: fdiv %st(2), %st # sched: [34:17.00]
; ATOM-NEXT: fdivs (%ecx) # sched: [34:17.00]
; ATOM-NEXT: fdivl (%eax) # sched: [34:17.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdiv:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fdiv %st(0), %st(1) # sched: [19:17.00]
-; SLM-NEXT: fdiv %st(2) # sched: [19:17.00]
+; SLM-NEXT: fdiv %st, %st(1) # sched: [19:17.00]
+; SLM-NEXT: fdiv %st(2), %st # sched: [19:17.00]
; SLM-NEXT: fdivs (%ecx) # sched: [22:17.00]
; SLM-NEXT: fdivl (%eax) # sched: [22:17.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdiv:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fdiv %st(0), %st(1) # sched: [14:14.00]
-; SANDY-NEXT: fdiv %st(2) # sched: [14:14.00]
+; SANDY-NEXT: fdiv %st, %st(1) # sched: [14:14.00]
+; SANDY-NEXT: fdiv %st(2), %st # sched: [14:14.00]
; SANDY-NEXT: fdivs (%ecx) # sched: [31:1.00]
; SANDY-NEXT: fdivl (%eax) # sched: [31:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fdiv:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fdiv %st(0), %st(1) # sched: [24:1.00]
-; HASWELL-NEXT: fdiv %st(2) # sched: [20:1.00]
+; HASWELL-NEXT: fdiv %st, %st(1) # sched: [24:1.00]
+; HASWELL-NEXT: fdiv %st(2), %st # sched: [20:1.00]
; HASWELL-NEXT: fdivs (%ecx) # sched: [31:1.00]
; HASWELL-NEXT: fdivl (%eax) # sched: [31:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fdiv:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fdiv %st(0), %st(1) # sched: [15:1.00]
-; BROADWELL-NEXT: fdiv %st(2) # sched: [20:1.00]
+; BROADWELL-NEXT: fdiv %st, %st(1) # sched: [15:1.00]
+; BROADWELL-NEXT: fdiv %st(2), %st # sched: [20:1.00]
; BROADWELL-NEXT: fdivs (%ecx) # sched: [21:1.00]
; BROADWELL-NEXT: fdivl (%eax) # sched: [21:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdiv:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fdiv %st(0), %st(1) # sched: [15:1.00]
-; SKYLAKE-NEXT: fdiv %st(2) # sched: [20:1.00]
+; SKYLAKE-NEXT: fdiv %st, %st(1) # sched: [15:1.00]
+; SKYLAKE-NEXT: fdiv %st(2), %st # sched: [20:1.00]
; SKYLAKE-NEXT: fdivs (%ecx) # sched: [22:1.00]
; SKYLAKE-NEXT: fdivl (%eax) # sched: [22:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdiv:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fdiv %st(0), %st(1) # sched: [15:1.00]
-; SKX-NEXT: fdiv %st(2) # sched: [20:1.00]
+; SKX-NEXT: fdiv %st, %st(1) # sched: [15:1.00]
+; SKX-NEXT: fdiv %st(2), %st # sched: [20:1.00]
; SKX-NEXT: fdivs (%ecx) # sched: [22:1.00]
; SKX-NEXT: fdivl (%eax) # sched: [22:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fdiv:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fdiv %st(0), %st(1) # sched: [9:9.50]
-; BDVER2-NEXT: fdiv %st(2) # sched: [9:9.50]
+; BDVER2-NEXT: fdiv %st, %st(1) # sched: [9:9.50]
+; BDVER2-NEXT: fdiv %st(2), %st # sched: [9:9.50]
; BDVER2-NEXT: fdivs (%ecx) # sched: [14:9.50]
; BDVER2-NEXT: fdivl (%eax) # sched: [14:9.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fdiv:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fdiv %st(0), %st(1) # sched: [19:19.00]
-; BTVER2-NEXT: fdiv %st(2) # sched: [19:19.00]
+; BTVER2-NEXT: fdiv %st, %st(1) # sched: [19:19.00]
+; BTVER2-NEXT: fdiv %st(2), %st # sched: [19:19.00]
; BTVER2-NEXT: fdivs (%ecx) # sched: [24:19.00]
; BTVER2-NEXT: fdivl (%eax) # sched: [24:19.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdiv:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fdiv %st(0), %st(1) # sched: [15:1.00]
-; ZNVER1-NEXT: fdiv %st(2) # sched: [15:1.00]
+; ZNVER1-NEXT: fdiv %st, %st(1) # sched: [15:1.00]
+; ZNVER1-NEXT: fdiv %st(2), %st # sched: [15:1.00]
; ZNVER1-NEXT: fdivs (%ecx) # sched: [22:1.00]
; ZNVER1-NEXT: fdivl (%eax) # sched: [22:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fdiv %st(0), %st(1) \0A\09 fdiv %st(2), %st(0) \0A\09 fdivs $0 \0A\09 fdivl $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fdivp_fidiv:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fdivp %st(1)
-; GENERIC-NEXT: fdivp %st(2)
+; GENERIC-NEXT: fdivp %st, %st(1)
+; GENERIC-NEXT: fdivp %st, %st(2)
; GENERIC-NEXT: fidivs (%ecx)
; GENERIC-NEXT: fidivl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivp_fidiv:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fdivp %st(1) # sched: [34:17.00]
-; ATOM-NEXT: fdivp %st(2) # sched: [34:17.00]
+; ATOM-NEXT: fdivp %st, %st(1) # sched: [34:17.00]
+; ATOM-NEXT: fdivp %st, %st(2) # sched: [34:17.00]
; ATOM-NEXT: fidivs (%ecx) # sched: [34:17.00]
; ATOM-NEXT: fidivl (%eax) # sched: [34:17.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivp_fidiv:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fdivp %st(1) # sched: [19:17.00]
-; SLM-NEXT: fdivp %st(2) # sched: [19:17.00]
+; SLM-NEXT: fdivp %st, %st(1) # sched: [19:17.00]
+; SLM-NEXT: fdivp %st, %st(2) # sched: [19:17.00]
; SLM-NEXT: fidivs (%ecx) # sched: [22:17.00]
; SLM-NEXT: fidivl (%eax) # sched: [22:17.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivp_fidiv:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fdivp %st(1) # sched: [14:14.00]
-; SANDY-NEXT: fdivp %st(2) # sched: [14:14.00]
+; SANDY-NEXT: fdivp %st, %st(1) # sched: [14:14.00]
+; SANDY-NEXT: fdivp %st, %st(2) # sched: [14:14.00]
; SANDY-NEXT: fidivs (%ecx) # sched: [34:1.00]
; SANDY-NEXT: fidivl (%eax) # sched: [34:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fdivp_fidiv:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fdivp %st(1) # sched: [24:1.00]
-; HASWELL-NEXT: fdivp %st(2) # sched: [24:1.00]
+; HASWELL-NEXT: fdivp %st, %st(1) # sched: [24:1.00]
+; HASWELL-NEXT: fdivp %st, %st(2) # sched: [24:1.00]
; HASWELL-NEXT: fidivs (%ecx) # sched: [34:1.00]
; HASWELL-NEXT: fidivl (%eax) # sched: [34:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fdivp_fidiv:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fdivp %st(1) # sched: [15:1.00]
-; BROADWELL-NEXT: fdivp %st(2) # sched: [15:1.00]
+; BROADWELL-NEXT: fdivp %st, %st(1) # sched: [15:1.00]
+; BROADWELL-NEXT: fdivp %st, %st(2) # sched: [15:1.00]
; BROADWELL-NEXT: fidivs (%ecx) # sched: [24:1.00]
; BROADWELL-NEXT: fidivl (%eax) # sched: [24:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivp_fidiv:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fdivp %st(1) # sched: [15:1.00]
-; SKYLAKE-NEXT: fdivp %st(2) # sched: [15:1.00]
+; SKYLAKE-NEXT: fdivp %st, %st(1) # sched: [15:1.00]
+; SKYLAKE-NEXT: fdivp %st, %st(2) # sched: [15:1.00]
; SKYLAKE-NEXT: fidivs (%ecx) # sched: [25:1.00]
; SKYLAKE-NEXT: fidivl (%eax) # sched: [25:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivp_fidiv:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fdivp %st(1) # sched: [15:1.00]
-; SKX-NEXT: fdivp %st(2) # sched: [15:1.00]
+; SKX-NEXT: fdivp %st, %st(1) # sched: [15:1.00]
+; SKX-NEXT: fdivp %st, %st(2) # sched: [15:1.00]
; SKX-NEXT: fidivs (%ecx) # sched: [25:1.00]
; SKX-NEXT: fidivl (%eax) # sched: [25:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fdivp_fidiv:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fdivp %st(1) # sched: [9:9.50]
-; BDVER2-NEXT: fdivp %st(2) # sched: [9:9.50]
+; BDVER2-NEXT: fdivp %st, %st(1) # sched: [9:9.50]
+; BDVER2-NEXT: fdivp %st, %st(2) # sched: [9:9.50]
; BDVER2-NEXT: fidivs (%ecx) # sched: [14:9.50]
; BDVER2-NEXT: fidivl (%eax) # sched: [14:9.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fdivp_fidiv:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fdivp %st(1) # sched: [19:19.00]
-; BTVER2-NEXT: fdivp %st(2) # sched: [19:19.00]
+; BTVER2-NEXT: fdivp %st, %st(1) # sched: [19:19.00]
+; BTVER2-NEXT: fdivp %st, %st(2) # sched: [19:19.00]
; BTVER2-NEXT: fidivs (%ecx) # sched: [24:19.00]
; BTVER2-NEXT: fidivl (%eax) # sched: [24:19.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivp_fidiv:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fdivp %st(1) # sched: [15:1.00]
-; ZNVER1-NEXT: fdivp %st(2) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivp %st, %st(1) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivp %st, %st(2) # sched: [15:1.00]
; ZNVER1-NEXT: fidivs (%ecx) # sched: [22:1.00]
; ZNVER1-NEXT: fidivl (%eax) # sched: [22:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fdivp \0A\09 fdivp %st(2), %st(0) \0A\09 fidivs $0 \0A\09 fidivl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_fdivr(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fdivr:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fdivr %st(0), %st(1)
-; GENERIC-NEXT: fdivr %st(2)
+; GENERIC-NEXT: fdivr %st, %st(1)
+; GENERIC-NEXT: fdivr %st(2), %st
; GENERIC-NEXT: fdivrs (%ecx)
; GENERIC-NEXT: fdivrl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivr:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fdivr %st(0), %st(1) # sched: [34:17.00]
-; ATOM-NEXT: fdivr %st(2) # sched: [34:17.00]
+; ATOM-NEXT: fdivr %st, %st(1) # sched: [34:17.00]
+; ATOM-NEXT: fdivr %st(2), %st # sched: [34:17.00]
; ATOM-NEXT: fdivrs (%ecx) # sched: [34:17.00]
; ATOM-NEXT: fdivrl (%eax) # sched: [34:17.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivr:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fdivr %st(0), %st(1) # sched: [19:17.00]
-; SLM-NEXT: fdivr %st(2) # sched: [19:17.00]
+; SLM-NEXT: fdivr %st, %st(1) # sched: [19:17.00]
+; SLM-NEXT: fdivr %st(2), %st # sched: [19:17.00]
; SLM-NEXT: fdivrs (%ecx) # sched: [22:17.00]
; SLM-NEXT: fdivrl (%eax) # sched: [22:17.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivr:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fdivr %st(0), %st(1) # sched: [14:14.00]
-; SANDY-NEXT: fdivr %st(2) # sched: [14:14.00]
+; SANDY-NEXT: fdivr %st, %st(1) # sched: [14:14.00]
+; SANDY-NEXT: fdivr %st(2), %st # sched: [14:14.00]
; SANDY-NEXT: fdivrs (%ecx) # sched: [31:1.00]
; SANDY-NEXT: fdivrl (%eax) # sched: [31:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fdivr:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fdivr %st(0), %st(1) # sched: [20:1.00]
-; HASWELL-NEXT: fdivr %st(2) # sched: [24:1.00]
+; HASWELL-NEXT: fdivr %st, %st(1) # sched: [20:1.00]
+; HASWELL-NEXT: fdivr %st(2), %st # sched: [24:1.00]
; HASWELL-NEXT: fdivrs (%ecx) # sched: [27:1.00]
; HASWELL-NEXT: fdivrl (%eax) # sched: [27:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fdivr:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fdivr %st(0), %st(1) # sched: [20:1.00]
-; BROADWELL-NEXT: fdivr %st(2) # sched: [15:1.00]
+; BROADWELL-NEXT: fdivr %st, %st(1) # sched: [20:1.00]
+; BROADWELL-NEXT: fdivr %st(2), %st # sched: [15:1.00]
; BROADWELL-NEXT: fdivrs (%ecx) # sched: [26:1.00]
; BROADWELL-NEXT: fdivrl (%eax) # sched: [26:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivr:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fdivr %st(0), %st(1) # sched: [20:1.00]
-; SKYLAKE-NEXT: fdivr %st(2) # sched: [15:1.00]
+; SKYLAKE-NEXT: fdivr %st, %st(1) # sched: [20:1.00]
+; SKYLAKE-NEXT: fdivr %st(2), %st # sched: [15:1.00]
; SKYLAKE-NEXT: fdivrs (%ecx) # sched: [27:1.00]
; SKYLAKE-NEXT: fdivrl (%eax) # sched: [27:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivr:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fdivr %st(0), %st(1) # sched: [20:1.00]
-; SKX-NEXT: fdivr %st(2) # sched: [15:1.00]
+; SKX-NEXT: fdivr %st, %st(1) # sched: [20:1.00]
+; SKX-NEXT: fdivr %st(2), %st # sched: [15:1.00]
; SKX-NEXT: fdivrs (%ecx) # sched: [27:1.00]
; SKX-NEXT: fdivrl (%eax) # sched: [27:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fdivr:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fdivr %st(0), %st(1) # sched: [9:9.50]
-; BDVER2-NEXT: fdivr %st(2) # sched: [9:9.50]
+; BDVER2-NEXT: fdivr %st, %st(1) # sched: [9:9.50]
+; BDVER2-NEXT: fdivr %st(2), %st # sched: [9:9.50]
; BDVER2-NEXT: fdivrs (%ecx) # sched: [14:9.50]
; BDVER2-NEXT: fdivrl (%eax) # sched: [14:9.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fdivr:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fdivr %st(0), %st(1) # sched: [19:19.00]
-; BTVER2-NEXT: fdivr %st(2) # sched: [19:19.00]
+; BTVER2-NEXT: fdivr %st, %st(1) # sched: [19:19.00]
+; BTVER2-NEXT: fdivr %st(2), %st # sched: [19:19.00]
; BTVER2-NEXT: fdivrs (%ecx) # sched: [24:19.00]
; BTVER2-NEXT: fdivrl (%eax) # sched: [24:19.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivr:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fdivr %st(0), %st(1) # sched: [15:1.00]
-; ZNVER1-NEXT: fdivr %st(2) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivr %st, %st(1) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivr %st(2), %st # sched: [15:1.00]
; ZNVER1-NEXT: fdivrs (%ecx) # sched: [22:1.00]
; ZNVER1-NEXT: fdivrl (%eax) # sched: [22:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fdivr %st(0), %st(1) \0A\09 fdivr %st(2), %st(0) \0A\09 fdivrs $0 \0A\09 fdivrl $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fdivrp_fidivr:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fdivrp %st(1)
-; GENERIC-NEXT: fdivrp %st(2)
+; GENERIC-NEXT: fdivrp %st, %st(1)
+; GENERIC-NEXT: fdivrp %st, %st(2)
; GENERIC-NEXT: fidivrs (%ecx)
; GENERIC-NEXT: fidivrl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivrp_fidivr:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fdivrp %st(1) # sched: [34:17.00]
-; ATOM-NEXT: fdivrp %st(2) # sched: [34:17.00]
+; ATOM-NEXT: fdivrp %st, %st(1) # sched: [34:17.00]
+; ATOM-NEXT: fdivrp %st, %st(2) # sched: [34:17.00]
; ATOM-NEXT: fidivrs (%ecx) # sched: [34:17.00]
; ATOM-NEXT: fidivrl (%eax) # sched: [34:17.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivrp_fidivr:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fdivrp %st(1) # sched: [19:17.00]
-; SLM-NEXT: fdivrp %st(2) # sched: [19:17.00]
+; SLM-NEXT: fdivrp %st, %st(1) # sched: [19:17.00]
+; SLM-NEXT: fdivrp %st, %st(2) # sched: [19:17.00]
; SLM-NEXT: fidivrs (%ecx) # sched: [22:17.00]
; SLM-NEXT: fidivrl (%eax) # sched: [22:17.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivrp_fidivr:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fdivrp %st(1) # sched: [14:14.00]
-; SANDY-NEXT: fdivrp %st(2) # sched: [14:14.00]
+; SANDY-NEXT: fdivrp %st, %st(1) # sched: [14:14.00]
+; SANDY-NEXT: fdivrp %st, %st(2) # sched: [14:14.00]
; SANDY-NEXT: fidivrs (%ecx) # sched: [34:1.00]
; SANDY-NEXT: fidivrl (%eax) # sched: [34:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fdivrp_fidivr:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fdivrp %st(1) # sched: [20:1.00]
-; HASWELL-NEXT: fdivrp %st(2) # sched: [20:1.00]
+; HASWELL-NEXT: fdivrp %st, %st(1) # sched: [20:1.00]
+; HASWELL-NEXT: fdivrp %st, %st(2) # sched: [20:1.00]
; HASWELL-NEXT: fidivrs (%ecx) # sched: [30:1.00]
; HASWELL-NEXT: fidivrl (%eax) # sched: [30:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fdivrp_fidivr:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fdivrp %st(1) # sched: [20:1.00]
-; BROADWELL-NEXT: fdivrp %st(2) # sched: [20:1.00]
+; BROADWELL-NEXT: fdivrp %st, %st(1) # sched: [20:1.00]
+; BROADWELL-NEXT: fdivrp %st, %st(2) # sched: [20:1.00]
; BROADWELL-NEXT: fidivrs (%ecx) # sched: [29:1.00]
; BROADWELL-NEXT: fidivrl (%eax) # sched: [29:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivrp_fidivr:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fdivrp %st(1) # sched: [20:1.00]
-; SKYLAKE-NEXT: fdivrp %st(2) # sched: [20:1.00]
+; SKYLAKE-NEXT: fdivrp %st, %st(1) # sched: [20:1.00]
+; SKYLAKE-NEXT: fdivrp %st, %st(2) # sched: [20:1.00]
; SKYLAKE-NEXT: fidivrs (%ecx) # sched: [30:1.00]
; SKYLAKE-NEXT: fidivrl (%eax) # sched: [30:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivrp_fidivr:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fdivrp %st(1) # sched: [20:1.00]
-; SKX-NEXT: fdivrp %st(2) # sched: [20:1.00]
+; SKX-NEXT: fdivrp %st, %st(1) # sched: [20:1.00]
+; SKX-NEXT: fdivrp %st, %st(2) # sched: [20:1.00]
; SKX-NEXT: fidivrs (%ecx) # sched: [30:1.00]
; SKX-NEXT: fidivrl (%eax) # sched: [30:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fdivrp_fidivr:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fdivrp %st(1) # sched: [9:9.50]
-; BDVER2-NEXT: fdivrp %st(2) # sched: [9:9.50]
+; BDVER2-NEXT: fdivrp %st, %st(1) # sched: [9:9.50]
+; BDVER2-NEXT: fdivrp %st, %st(2) # sched: [9:9.50]
; BDVER2-NEXT: fidivrs (%ecx) # sched: [14:9.50]
; BDVER2-NEXT: fidivrl (%eax) # sched: [14:9.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fdivrp_fidivr:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fdivrp %st(1) # sched: [19:19.00]
-; BTVER2-NEXT: fdivrp %st(2) # sched: [19:19.00]
+; BTVER2-NEXT: fdivrp %st, %st(1) # sched: [19:19.00]
+; BTVER2-NEXT: fdivrp %st, %st(2) # sched: [19:19.00]
; BTVER2-NEXT: fidivrs (%ecx) # sched: [24:19.00]
; BTVER2-NEXT: fidivrl (%eax) # sched: [24:19.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivrp_fidivr:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fdivrp %st(1) # sched: [15:1.00]
-; ZNVER1-NEXT: fdivrp %st(2) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivrp %st, %st(1) # sched: [15:1.00]
+; ZNVER1-NEXT: fdivrp %st, %st(2) # sched: [15:1.00]
; ZNVER1-NEXT: fidivrs (%ecx) # sched: [22:1.00]
; ZNVER1-NEXT: fidivrl (%eax) # sched: [22:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fdivrp \0A\09 fdivrp %st(2), %st(0) \0A\09 fidivrs $0 \0A\09 fidivrl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_ffree() optsize {
; GENERIC-LABEL: test_ffree:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: ffree %st(0)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ffree:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: ffree %st(0) # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ffree:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: ffree %st(0) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ffree:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: ffree %st(0) # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_ffree:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: ffree %st(0) # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_ffree:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: ffree %st(0) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ffree:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: ffree %st(0) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ffree:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: ffree %st(0) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_ffree:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: ffree %st(0) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_ffree:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: ffree %st(0) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ffree:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: ffree %st(0) # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "ffree %st(0)", ""() nounwind
ret void
}
define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_ficom:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: ficoms (%ecx)
; GENERIC-NEXT: ficoml (%eax)
; GENERIC-NEXT: ficomps (%ecx)
; GENERIC-NEXT: ficompl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ficom:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: ficoms (%ecx) # sched: [5:5.00]
; ATOM-NEXT: ficoml (%eax) # sched: [5:5.00]
; ATOM-NEXT: ficomps (%ecx) # sched: [5:5.00]
; ATOM-NEXT: ficompl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ficom:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: ficoms (%ecx) # sched: [6:1.00]
; SLM-NEXT: ficoml (%eax) # sched: [6:1.00]
; SLM-NEXT: ficomps (%ecx) # sched: [6:1.00]
; SLM-NEXT: ficompl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ficom:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: ficoms (%ecx) # sched: [11:2.00]
; SANDY-NEXT: ficoml (%eax) # sched: [11:2.00]
; SANDY-NEXT: ficomps (%ecx) # sched: [11:2.00]
; SANDY-NEXT: ficompl (%eax) # sched: [11:2.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_ficom:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: ficoms (%ecx) # sched: [11:2.00]
; HASWELL-NEXT: ficoml (%eax) # sched: [11:2.00]
; HASWELL-NEXT: ficomps (%ecx) # sched: [11:2.00]
; HASWELL-NEXT: ficompl (%eax) # sched: [11:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_ficom:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: ficoms (%ecx) # sched: [10:2.00]
; BROADWELL-NEXT: ficoml (%eax) # sched: [10:2.00]
; BROADWELL-NEXT: ficomps (%ecx) # sched: [10:2.00]
; BROADWELL-NEXT: ficompl (%eax) # sched: [10:2.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ficom:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: ficoms (%ecx) # sched: [11:2.00]
; SKYLAKE-NEXT: ficoml (%eax) # sched: [11:2.00]
; SKYLAKE-NEXT: ficomps (%ecx) # sched: [11:2.00]
; SKYLAKE-NEXT: ficompl (%eax) # sched: [11:2.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ficom:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: ficoms (%ecx) # sched: [11:2.00]
; SKX-NEXT: ficoml (%eax) # sched: [11:2.00]
; SKX-NEXT: ficomps (%ecx) # sched: [11:2.00]
; SKX-NEXT: ficompl (%eax) # sched: [11:2.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_ficom:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: ficoms (%ecx) # sched: [6:1.00]
; BDVER2-NEXT: ficoml (%eax) # sched: [6:1.00]
; BDVER2-NEXT: ficomps (%ecx) # sched: [6:1.00]
; BDVER2-NEXT: ficompl (%eax) # sched: [6:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_ficom:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: ficoms (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: ficoml (%eax) # sched: [8:1.00]
; BTVER2-NEXT: ficomps (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: ficompl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ficom:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: ficoms (%ecx) # sched: [12:1.50]
; ZNVER1-NEXT: ficoml (%eax) # sched: [12:1.50]
; ZNVER1-NEXT: ficomps (%ecx) # sched: [12:1.50]
; ZNVER1-NEXT: ficompl (%eax) # sched: [12:1.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "ficoms $0 \0A\09 ficoml $1 \0A\09 ficomps $0 \0A\09 ficompl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_fild:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %edx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: filds (%edx)
; GENERIC-NEXT: fildl (%ecx)
; GENERIC-NEXT: fildll (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fild:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: filds (%edx) # sched: [5:5.00]
; ATOM-NEXT: fildl (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fildll (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fild:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: filds (%edx) # sched: [3:1.00]
; SLM-NEXT: fildl (%ecx) # sched: [3:1.00]
; SLM-NEXT: fildll (%eax) # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fild:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: filds (%edx) # sched: [10:1.00]
; SANDY-NEXT: fildl (%ecx) # sched: [10:1.00]
; SANDY-NEXT: fildll (%eax) # sched: [10:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fild:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: filds (%edx) # sched: [10:1.00]
; HASWELL-NEXT: fildl (%ecx) # sched: [10:1.00]
; HASWELL-NEXT: fildll (%eax) # sched: [10:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fild:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: filds (%edx) # sched: [9:1.00]
; BROADWELL-NEXT: fildl (%ecx) # sched: [9:1.00]
; BROADWELL-NEXT: fildll (%eax) # sched: [9:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fild:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: filds (%edx) # sched: [10:1.00]
; SKYLAKE-NEXT: fildl (%ecx) # sched: [10:1.00]
; SKYLAKE-NEXT: fildll (%eax) # sched: [10:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fild:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: filds (%edx) # sched: [10:1.00]
; SKX-NEXT: fildl (%ecx) # sched: [10:1.00]
; SKX-NEXT: fildll (%eax) # sched: [10:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fild:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: filds (%edx) # sched: [5:0.50]
; BDVER2-NEXT: fildl (%ecx) # sched: [5:0.50]
; BDVER2-NEXT: fildll (%eax) # sched: [5:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fild:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: filds (%edx) # sched: [5:1.00]
; BTVER2-NEXT: fildl (%ecx) # sched: [5:1.00]
; BTVER2-NEXT: fildll (%eax) # sched: [5:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fild:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: filds (%edx) # sched: [11:1.00]
; ZNVER1-NEXT: fildl (%ecx) # sched: [11:1.00]
; ZNVER1-NEXT: fildll (%eax) # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "filds $0 \0A\09 fildl $1 \0A\09 fildll $2", "*m,*m,*m"(i16 *%a0, i32 *%a1, i64 *%a2) nounwind
ret void
}
define void @test_fincstp() optsize {
; GENERIC-LABEL: test_fincstp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fincstp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fincstp:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fincstp # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fincstp:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fincstp # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fincstp:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fincstp # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fincstp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fincstp # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fincstp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fincstp # sched: [1:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fincstp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fincstp # sched: [1:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fincstp:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fincstp # sched: [1:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fincstp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fincstp # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fincstp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fincstp # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fincstp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fincstp # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fincstp", ""() nounwind
ret void
}
define void @test_finit() optsize {
; GENERIC-LABEL: test_finit:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fninit
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_finit:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fninit # sched: [63:31.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_finit:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fninit # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_finit:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fninit # sched: [5:1.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_finit:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fninit # sched: [75:6.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_finit:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fninit # sched: [75:6.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_finit:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fninit # sched: [75:6.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_finit:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fninit # sched: [75:6.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_finit:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fninit # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_finit:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fninit # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_finit:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fninit # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "finit", ""() nounwind
ret void
}
define void @test_fninit() optsize {
; GENERIC-LABEL: test_fninit:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fninit
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fninit:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fninit # sched: [63:31.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fninit:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fninit # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fninit:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fninit # sched: [5:1.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fninit:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fninit # sched: [75:6.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fninit:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fninit # sched: [75:6.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fninit:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fninit # sched: [75:6.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fninit:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fninit # sched: [75:6.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fninit:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fninit # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fninit:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fninit # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fninit:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fninit # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fninit", ""() nounwind
ret void
}
define void @test_fist_fistp_fisttp(i16* %a0, i32* %a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_fist_fistp_fisttp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %edx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fists (%edx)
; GENERIC-NEXT: fistl (%ecx)
; GENERIC-NEXT: fistps (%edx)
; GENERIC-NEXT: fistpl (%ecx)
; GENERIC-NEXT: fistpll (%eax)
; GENERIC-NEXT: fisttps (%edx)
; GENERIC-NEXT: fisttpl (%ecx)
; GENERIC-NEXT: fisttpll (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fist_fistp_fisttp:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fists (%edx) # sched: [6:3.00]
; ATOM-NEXT: fistl (%ecx) # sched: [6:3.00]
; ATOM-NEXT: fistps (%edx) # sched: [6:3.00]
; ATOM-NEXT: fistpl (%ecx) # sched: [6:3.00]
; ATOM-NEXT: fistpll (%eax) # sched: [6:3.00]
; ATOM-NEXT: fisttps (%edx) # sched: [2:1.00]
; ATOM-NEXT: fisttpl (%ecx) # sched: [2:1.00]
; ATOM-NEXT: fisttpll (%eax) # sched: [2:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fist_fistp_fisttp:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fists (%edx) # sched: [1:1.00]
; SLM-NEXT: fistl (%ecx) # sched: [1:1.00]
; SLM-NEXT: fistps (%edx) # sched: [1:1.00]
; SLM-NEXT: fistpl (%ecx) # sched: [1:1.00]
; SLM-NEXT: fistpll (%eax) # sched: [1:1.00]
; SLM-NEXT: fisttps (%edx) # sched: [1:1.00]
; SLM-NEXT: fisttpl (%ecx) # sched: [1:1.00]
; SLM-NEXT: fisttpll (%eax) # sched: [1:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fist_fistp_fisttp:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fists (%edx) # sched: [9:1.00]
; SANDY-NEXT: fistl (%ecx) # sched: [9:1.00]
; SANDY-NEXT: fistps (%edx) # sched: [9:1.00]
; SANDY-NEXT: fistpl (%ecx) # sched: [9:1.00]
; SANDY-NEXT: fistpll (%eax) # sched: [9:1.00]
; SANDY-NEXT: fisttps (%edx) # sched: [5:1.00]
; SANDY-NEXT: fisttpl (%ecx) # sched: [5:1.00]
; SANDY-NEXT: fisttpll (%eax) # sched: [5:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fist_fistp_fisttp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fists (%edx) # sched: [4:1.00]
; HASWELL-NEXT: fistl (%ecx) # sched: [4:1.00]
; HASWELL-NEXT: fistps (%edx) # sched: [4:1.00]
; HASWELL-NEXT: fistpl (%ecx) # sched: [4:1.00]
; HASWELL-NEXT: fistpll (%eax) # sched: [4:1.00]
; HASWELL-NEXT: fisttps (%edx) # sched: [4:1.00]
; HASWELL-NEXT: fisttpl (%ecx) # sched: [4:1.00]
; HASWELL-NEXT: fisttpll (%eax) # sched: [4:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fist_fistp_fisttp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fists (%edx) # sched: [4:1.00]
; BROADWELL-NEXT: fistl (%ecx) # sched: [4:1.00]
; BROADWELL-NEXT: fistps (%edx) # sched: [4:1.00]
; BROADWELL-NEXT: fistpl (%ecx) # sched: [4:1.00]
; BROADWELL-NEXT: fistpll (%eax) # sched: [4:1.00]
; BROADWELL-NEXT: fisttps (%edx) # sched: [4:1.00]
; BROADWELL-NEXT: fisttpl (%ecx) # sched: [4:1.00]
; BROADWELL-NEXT: fisttpll (%eax) # sched: [4:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fist_fistp_fisttp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fists (%edx) # sched: [4:1.00]
; SKYLAKE-NEXT: fistl (%ecx) # sched: [4:1.00]
; SKYLAKE-NEXT: fistps (%edx) # sched: [4:1.00]
; SKYLAKE-NEXT: fistpl (%ecx) # sched: [4:1.00]
; SKYLAKE-NEXT: fistpll (%eax) # sched: [4:1.00]
; SKYLAKE-NEXT: fisttps (%edx) # sched: [4:1.00]
; SKYLAKE-NEXT: fisttpl (%ecx) # sched: [4:1.00]
; SKYLAKE-NEXT: fisttpll (%eax) # sched: [4:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fist_fistp_fisttp:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fists (%edx) # sched: [4:1.00]
; SKX-NEXT: fistl (%ecx) # sched: [4:1.00]
; SKX-NEXT: fistps (%edx) # sched: [4:1.00]
; SKX-NEXT: fistpl (%ecx) # sched: [4:1.00]
; SKX-NEXT: fistpll (%eax) # sched: [4:1.00]
; SKX-NEXT: fisttps (%edx) # sched: [4:1.00]
; SKX-NEXT: fisttpl (%ecx) # sched: [4:1.00]
; SKX-NEXT: fisttpll (%eax) # sched: [4:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fist_fistp_fisttp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fists (%edx) # sched: [1:1.00]
; BDVER2-NEXT: fistl (%ecx) # sched: [1:1.00]
; BDVER2-NEXT: fistps (%edx) # sched: [1:1.00]
; BDVER2-NEXT: fistpl (%ecx) # sched: [1:1.00]
; BDVER2-NEXT: fistpll (%eax) # sched: [1:1.00]
; BDVER2-NEXT: fisttps (%edx) # sched: [1:1.00]
; BDVER2-NEXT: fisttpl (%ecx) # sched: [1:1.00]
; BDVER2-NEXT: fisttpll (%eax) # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fist_fistp_fisttp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fists (%edx) # sched: [1:1.00]
; BTVER2-NEXT: fistl (%ecx) # sched: [1:1.00]
; BTVER2-NEXT: fistps (%edx) # sched: [1:1.00]
; BTVER2-NEXT: fistpl (%ecx) # sched: [1:1.00]
; BTVER2-NEXT: fistpll (%eax) # sched: [1:1.00]
; BTVER2-NEXT: fisttps (%edx) # sched: [1:1.00]
; BTVER2-NEXT: fisttpl (%ecx) # sched: [1:1.00]
; BTVER2-NEXT: fisttpll (%eax) # sched: [1:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fist_fistp_fisttp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fists (%edx) # sched: [12:0.50]
; ZNVER1-NEXT: fistl (%ecx) # sched: [12:0.50]
; ZNVER1-NEXT: fistps (%edx) # sched: [12:0.50]
; ZNVER1-NEXT: fistpl (%ecx) # sched: [12:0.50]
; ZNVER1-NEXT: fistpll (%eax) # sched: [12:0.50]
; ZNVER1-NEXT: fisttps (%edx) # sched: [12:0.50]
; ZNVER1-NEXT: fisttpl (%ecx) # sched: [12:0.50]
; ZNVER1-NEXT: fisttpll (%eax) # sched: [12:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fists $0 \0A\09 fistl $1 \0A\09 fistps $0 \0A\09 fistpl $1 \0A\09 fistpll $2 \0A\09 fisttps $0 \0A\09 fisttpl $1 \0A\09 fisttpll $2", "*m,*m,*m"(i16* %a0, i32* %a1, i64 *%a2) nounwind
ret void
}
define void @test_fld(i16* %a0, i32* %a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_fld:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %edx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fld %st(0)
; GENERIC-NEXT: flds (%edx)
; GENERIC-NEXT: fldl (%ecx)
; GENERIC-NEXT: fldt (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fld:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fld %st(0) # sched: [1:1.00]
; ATOM-NEXT: flds (%edx) # sched: [1:1.00]
; ATOM-NEXT: fldl (%ecx) # sched: [1:1.00]
; ATOM-NEXT: fldt (%eax) # sched: [4:2.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fld:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fld %st(0) # sched: [1:0.50]
; SLM-NEXT: flds (%edx) # sched: [3:1.00]
; SLM-NEXT: fldl (%ecx) # sched: [3:1.00]
; SLM-NEXT: fldt (%eax) # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fld:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fld %st(0) # sched: [1:1.00]
; SANDY-NEXT: flds (%edx) # sched: [9:1.00]
; SANDY-NEXT: fldl (%ecx) # sched: [9:1.00]
; SANDY-NEXT: fldt (%eax) # sched: [9:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fld:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fld %st(0) # sched: [1:0.50]
; HASWELL-NEXT: flds (%edx) # sched: [7:0.50]
; HASWELL-NEXT: fldl (%ecx) # sched: [7:0.50]
; HASWELL-NEXT: fldt (%eax) # sched: [7:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fld:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fld %st(0) # sched: [1:0.25]
; BROADWELL-NEXT: flds (%edx) # sched: [6:0.50]
; BROADWELL-NEXT: fldl (%ecx) # sched: [6:0.50]
; BROADWELL-NEXT: fldt (%eax) # sched: [6:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fld:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fld %st(0) # sched: [1:0.25]
; SKYLAKE-NEXT: flds (%edx) # sched: [7:0.50]
; SKYLAKE-NEXT: fldl (%ecx) # sched: [7:0.50]
; SKYLAKE-NEXT: fldt (%eax) # sched: [7:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fld:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fld %st(0) # sched: [1:0.25]
; SKX-NEXT: flds (%edx) # sched: [7:0.50]
; SKX-NEXT: fldl (%ecx) # sched: [7:0.50]
; SKX-NEXT: fldt (%eax) # sched: [7:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fld:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fld %st(0) # sched: [1:0.50]
; BDVER2-NEXT: flds (%edx) # sched: [5:0.50]
; BDVER2-NEXT: fldl (%ecx) # sched: [5:0.50]
; BDVER2-NEXT: fldt (%eax) # sched: [5:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fld:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fld %st(0) # sched: [1:0.50]
; BTVER2-NEXT: flds (%edx) # sched: [5:1.00]
; BTVER2-NEXT: fldl (%ecx) # sched: [5:1.00]
; BTVER2-NEXT: fldt (%eax) # sched: [5:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fld:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fld %st(0) # sched: [1:0.50]
; ZNVER1-NEXT: flds (%edx) # sched: [8:0.50]
; ZNVER1-NEXT: fldl (%ecx) # sched: [8:0.50]
; ZNVER1-NEXT: fldt (%eax) # sched: [1:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fld %st(0) \0A\09 flds $0 \0A\09 fldl $1 \0A\09 fldt $2", "*m,*m,*m"(i16* %a0, i32* %a1, i64 *%a2) nounwind
ret void
}
define void @test_fldcw_fldenv(i8* %a0) optsize {
; GENERIC-LABEL: test_fldcw_fldenv:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fldcw (%eax)
; GENERIC-NEXT: fldenv (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fldcw_fldenv:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fldcw (%eax) # sched: [5:2.50]
; ATOM-NEXT: fldenv (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fldcw_fldenv:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fldcw (%eax) # sched: [3:1.00]
; SLM-NEXT: fldenv (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fldcw_fldenv:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fldcw (%eax) # sched: [8:2.00]
; SANDY-NEXT: fldenv (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fldcw_fldenv:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fldcw (%eax) # sched: [7:1.00]
; HASWELL-NEXT: fldenv (%eax) # sched: [61:14.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fldcw_fldenv:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fldcw (%eax) # sched: [7:1.00]
; BROADWELL-NEXT: fldenv (%eax) # sched: [60:14.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fldcw_fldenv:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fldcw (%eax) # sched: [7:1.00]
; SKYLAKE-NEXT: fldenv (%eax) # sched: [62:14.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fldcw_fldenv:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fldcw (%eax) # sched: [7:1.00]
; SKX-NEXT: fldenv (%eax) # sched: [62:14.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fldcw_fldenv:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fldcw (%eax) # sched: [5:0.50]
; BDVER2-NEXT: fldenv (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fldcw_fldenv:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fldcw (%eax) # sched: [5:1.00]
; BTVER2-NEXT: fldenv (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fldcw_fldenv:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fldcw (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: fldenv (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fldcw $0 \0A\09 fldenv $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; GENERIC-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fld1
; GENERIC-NEXT: fldl2e
; GENERIC-NEXT: fldl2t
; GENERIC-NEXT: fldlg2
; GENERIC-NEXT: fldln2
; GENERIC-NEXT: fldpi
; GENERIC-NEXT: fldz
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fld1 # sched: [6:3.00]
; ATOM-NEXT: fldl2e # sched: [10:5.00]
; ATOM-NEXT: fldl2t # sched: [10:5.00]
; ATOM-NEXT: fldlg2 # sched: [10:5.00]
; ATOM-NEXT: fldln2 # sched: [10:5.00]
; ATOM-NEXT: fldpi # sched: [10:5.00]
; ATOM-NEXT: fldz # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fld1 # sched: [1:0.50]
; SLM-NEXT: fldl2e # sched: [1:1.00]
; SLM-NEXT: fldl2t # sched: [1:1.00]
; SLM-NEXT: fldlg2 # sched: [1:1.00]
; SLM-NEXT: fldln2 # sched: [1:1.00]
; SLM-NEXT: fldpi # sched: [1:1.00]
; SLM-NEXT: fldz # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fld1 # sched: [1:1.00]
; SANDY-NEXT: fldl2e # sched: [1:1.00]
; SANDY-NEXT: fldl2t # sched: [1:1.00]
; SANDY-NEXT: fldlg2 # sched: [1:1.00]
; SANDY-NEXT: fldln2 # sched: [1:1.00]
; SANDY-NEXT: fldpi # sched: [1:1.00]
; SANDY-NEXT: fldz # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fld1 # sched: [1:1.00]
; HASWELL-NEXT: fldl2e # sched: [1:1.00]
; HASWELL-NEXT: fldl2t # sched: [1:1.00]
; HASWELL-NEXT: fldlg2 # sched: [1:1.00]
; HASWELL-NEXT: fldln2 # sched: [1:1.00]
; HASWELL-NEXT: fldpi # sched: [1:1.00]
; HASWELL-NEXT: fldz # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fld1 # sched: [1:1.00]
; BROADWELL-NEXT: fldl2e # sched: [1:1.00]
; BROADWELL-NEXT: fldl2t # sched: [1:1.00]
; BROADWELL-NEXT: fldlg2 # sched: [1:1.00]
; BROADWELL-NEXT: fldln2 # sched: [1:1.00]
; BROADWELL-NEXT: fldpi # sched: [1:1.00]
; BROADWELL-NEXT: fldz # sched: [1:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fld1 # sched: [1:1.00]
; SKYLAKE-NEXT: fldl2e # sched: [1:1.00]
; SKYLAKE-NEXT: fldl2t # sched: [1:1.00]
; SKYLAKE-NEXT: fldlg2 # sched: [1:1.00]
; SKYLAKE-NEXT: fldln2 # sched: [1:1.00]
; SKYLAKE-NEXT: fldpi # sched: [1:1.00]
; SKYLAKE-NEXT: fldz # sched: [1:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fld1 # sched: [1:1.00]
; SKX-NEXT: fldl2e # sched: [1:1.00]
; SKX-NEXT: fldl2t # sched: [1:1.00]
; SKX-NEXT: fldlg2 # sched: [1:1.00]
; SKX-NEXT: fldln2 # sched: [1:1.00]
; SKX-NEXT: fldpi # sched: [1:1.00]
; SKX-NEXT: fldz # sched: [1:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fld1 # sched: [3:1.00]
; BDVER2-NEXT: fldl2e # sched: [3:1.00]
; BDVER2-NEXT: fldl2t # sched: [3:1.00]
; BDVER2-NEXT: fldlg2 # sched: [3:1.00]
; BDVER2-NEXT: fldln2 # sched: [3:1.00]
; BDVER2-NEXT: fldpi # sched: [3:1.00]
; BDVER2-NEXT: fldz # sched: [3:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fld1 # sched: [3:1.00]
; BTVER2-NEXT: fldl2e # sched: [3:1.00]
; BTVER2-NEXT: fldl2t # sched: [3:1.00]
; BTVER2-NEXT: fldlg2 # sched: [3:1.00]
; BTVER2-NEXT: fldln2 # sched: [3:1.00]
; BTVER2-NEXT: fldpi # sched: [3:1.00]
; BTVER2-NEXT: fldz # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fld1 # sched: [11:1.00]
; ZNVER1-NEXT: fldl2e # sched: [11:1.00]
; ZNVER1-NEXT: fldl2t # sched: [11:1.00]
; ZNVER1-NEXT: fldlg2 # sched: [11:1.00]
; ZNVER1-NEXT: fldln2 # sched: [11:1.00]
; ZNVER1-NEXT: fldpi # sched: [11:1.00]
; ZNVER1-NEXT: fldz # sched: [8:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fld1 \0A\09 fldl2e \0A\09 fldl2t \0A\09 fldlg2 \0A\09 fldln2 \0A\09 fldpi \0A\09 fldz", ""() nounwind
ret void
}
define void @test_fmul(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fmul:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fmul %st(0), %st(1)
-; GENERIC-NEXT: fmul %st(2)
+; GENERIC-NEXT: fmul %st, %st(1)
+; GENERIC-NEXT: fmul %st(2), %st
; GENERIC-NEXT: fmuls (%ecx)
; GENERIC-NEXT: fmull (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fmul:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fmul %st(0), %st(1) # sched: [4:4.00]
-; ATOM-NEXT: fmul %st(2) # sched: [4:4.00]
+; ATOM-NEXT: fmul %st, %st(1) # sched: [4:4.00]
+; ATOM-NEXT: fmul %st(2), %st # sched: [4:4.00]
; ATOM-NEXT: fmuls (%ecx) # sched: [4:4.00]
; ATOM-NEXT: fmull (%eax) # sched: [4:4.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fmul:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fmul %st(0), %st(1) # sched: [5:2.00]
-; SLM-NEXT: fmul %st(2) # sched: [5:2.00]
+; SLM-NEXT: fmul %st, %st(1) # sched: [5:2.00]
+; SLM-NEXT: fmul %st(2), %st # sched: [5:2.00]
; SLM-NEXT: fmuls (%ecx) # sched: [8:2.00]
; SLM-NEXT: fmull (%eax) # sched: [8:2.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fmul:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fmul %st(0), %st(1) # sched: [5:1.00]
-; SANDY-NEXT: fmul %st(2) # sched: [5:1.00]
+; SANDY-NEXT: fmul %st, %st(1) # sched: [5:1.00]
+; SANDY-NEXT: fmul %st(2), %st # sched: [5:1.00]
; SANDY-NEXT: fmuls (%ecx) # sched: [12:1.00]
; SANDY-NEXT: fmull (%eax) # sched: [12:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fmul:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fmul %st(0), %st(1) # sched: [5:1.00]
-; HASWELL-NEXT: fmul %st(2) # sched: [5:1.00]
+; HASWELL-NEXT: fmul %st, %st(1) # sched: [5:1.00]
+; HASWELL-NEXT: fmul %st(2), %st # sched: [5:1.00]
; HASWELL-NEXT: fmuls (%ecx) # sched: [12:1.00]
; HASWELL-NEXT: fmull (%eax) # sched: [12:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fmul:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fmul %st(0), %st(1) # sched: [5:1.00]
-; BROADWELL-NEXT: fmul %st(2) # sched: [5:1.00]
+; BROADWELL-NEXT: fmul %st, %st(1) # sched: [5:1.00]
+; BROADWELL-NEXT: fmul %st(2), %st # sched: [5:1.00]
; BROADWELL-NEXT: fmuls (%ecx) # sched: [11:1.00]
; BROADWELL-NEXT: fmull (%eax) # sched: [11:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fmul:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fmul %st(0), %st(1) # sched: [4:1.00]
-; SKYLAKE-NEXT: fmul %st(2) # sched: [4:1.00]
+; SKYLAKE-NEXT: fmul %st, %st(1) # sched: [4:1.00]
+; SKYLAKE-NEXT: fmul %st(2), %st # sched: [4:1.00]
; SKYLAKE-NEXT: fmuls (%ecx) # sched: [11:1.00]
; SKYLAKE-NEXT: fmull (%eax) # sched: [11:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fmul:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fmul %st(0), %st(1) # sched: [4:1.00]
-; SKX-NEXT: fmul %st(2) # sched: [4:1.00]
+; SKX-NEXT: fmul %st, %st(1) # sched: [4:1.00]
+; SKX-NEXT: fmul %st(2), %st # sched: [4:1.00]
; SKX-NEXT: fmuls (%ecx) # sched: [11:1.00]
; SKX-NEXT: fmull (%eax) # sched: [11:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fmul:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fmul %st(0), %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fmul %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fmul %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fmul %st(2), %st # sched: [5:1.00]
; BDVER2-NEXT: fmuls (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fmull (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fmul:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fmul %st(0), %st(1) # sched: [2:1.00]
-; BTVER2-NEXT: fmul %st(2) # sched: [2:1.00]
+; BTVER2-NEXT: fmul %st, %st(1) # sched: [2:1.00]
+; BTVER2-NEXT: fmul %st(2), %st # sched: [2:1.00]
; BTVER2-NEXT: fmuls (%ecx) # sched: [7:1.00]
; BTVER2-NEXT: fmull (%eax) # sched: [7:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fmul:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fmul %st(0), %st(1) # sched: [3:0.50]
-; ZNVER1-NEXT: fmul %st(2) # sched: [3:0.50]
+; ZNVER1-NEXT: fmul %st, %st(1) # sched: [3:0.50]
+; ZNVER1-NEXT: fmul %st(2), %st # sched: [3:0.50]
; ZNVER1-NEXT: fmuls (%ecx) # sched: [10:0.50]
; ZNVER1-NEXT: fmull (%eax) # sched: [10:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fmul %st(0), %st(1) \0A\09 fmul %st(2), %st(0) \0A\09 fmuls $0 \0A\09 fmull $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fmulp_fimul:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fmulp %st(1)
-; GENERIC-NEXT: fmulp %st(2)
+; GENERIC-NEXT: fmulp %st, %st(1)
+; GENERIC-NEXT: fmulp %st, %st(2)
; GENERIC-NEXT: fimuls (%ecx)
; GENERIC-NEXT: fimull (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fmulp_fimul:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fmulp %st(1) # sched: [4:4.00]
-; ATOM-NEXT: fmulp %st(2) # sched: [4:4.00]
+; ATOM-NEXT: fmulp %st, %st(1) # sched: [4:4.00]
+; ATOM-NEXT: fmulp %st, %st(2) # sched: [4:4.00]
; ATOM-NEXT: fimuls (%ecx) # sched: [4:4.00]
; ATOM-NEXT: fimull (%eax) # sched: [4:4.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fmulp_fimul:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fmulp %st(1) # sched: [5:2.00]
-; SLM-NEXT: fmulp %st(2) # sched: [5:2.00]
+; SLM-NEXT: fmulp %st, %st(1) # sched: [5:2.00]
+; SLM-NEXT: fmulp %st, %st(2) # sched: [5:2.00]
; SLM-NEXT: fimuls (%ecx) # sched: [8:2.00]
; SLM-NEXT: fimull (%eax) # sched: [8:2.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fmulp_fimul:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fmulp %st(1) # sched: [5:1.00]
-; SANDY-NEXT: fmulp %st(2) # sched: [5:1.00]
+; SANDY-NEXT: fmulp %st, %st(1) # sched: [5:1.00]
+; SANDY-NEXT: fmulp %st, %st(2) # sched: [5:1.00]
; SANDY-NEXT: fimuls (%ecx) # sched: [15:1.00]
; SANDY-NEXT: fimull (%eax) # sched: [15:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fmulp_fimul:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fmulp %st(1) # sched: [5:1.00]
-; HASWELL-NEXT: fmulp %st(2) # sched: [5:1.00]
+; HASWELL-NEXT: fmulp %st, %st(1) # sched: [5:1.00]
+; HASWELL-NEXT: fmulp %st, %st(2) # sched: [5:1.00]
; HASWELL-NEXT: fimuls (%ecx) # sched: [15:1.00]
; HASWELL-NEXT: fimull (%eax) # sched: [15:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fmulp_fimul:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fmulp %st(1) # sched: [5:1.00]
-; BROADWELL-NEXT: fmulp %st(2) # sched: [5:1.00]
+; BROADWELL-NEXT: fmulp %st, %st(1) # sched: [5:1.00]
+; BROADWELL-NEXT: fmulp %st, %st(2) # sched: [5:1.00]
; BROADWELL-NEXT: fimuls (%ecx) # sched: [14:1.00]
; BROADWELL-NEXT: fimull (%eax) # sched: [14:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fmulp_fimul:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fmulp %st(1) # sched: [4:1.00]
-; SKYLAKE-NEXT: fmulp %st(2) # sched: [4:1.00]
+; SKYLAKE-NEXT: fmulp %st, %st(1) # sched: [4:1.00]
+; SKYLAKE-NEXT: fmulp %st, %st(2) # sched: [4:1.00]
; SKYLAKE-NEXT: fimuls (%ecx) # sched: [14:1.00]
; SKYLAKE-NEXT: fimull (%eax) # sched: [14:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fmulp_fimul:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fmulp %st(1) # sched: [4:1.00]
-; SKX-NEXT: fmulp %st(2) # sched: [4:1.00]
+; SKX-NEXT: fmulp %st, %st(1) # sched: [4:1.00]
+; SKX-NEXT: fmulp %st, %st(2) # sched: [4:1.00]
; SKX-NEXT: fimuls (%ecx) # sched: [14:1.00]
; SKX-NEXT: fimull (%eax) # sched: [14:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fmulp_fimul:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fmulp %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fmulp %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fmulp %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fmulp %st, %st(2) # sched: [5:1.00]
; BDVER2-NEXT: fimuls (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fimull (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fmulp_fimul:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fmulp %st(1) # sched: [2:1.00]
-; BTVER2-NEXT: fmulp %st(2) # sched: [2:1.00]
+; BTVER2-NEXT: fmulp %st, %st(1) # sched: [2:1.00]
+; BTVER2-NEXT: fmulp %st, %st(2) # sched: [2:1.00]
; BTVER2-NEXT: fimuls (%ecx) # sched: [7:1.00]
; BTVER2-NEXT: fimull (%eax) # sched: [7:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fmulp_fimul:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fmulp %st(1) # sched: [3:0.50]
-; ZNVER1-NEXT: fmulp %st(2) # sched: [3:0.50]
+; ZNVER1-NEXT: fmulp %st, %st(1) # sched: [3:0.50]
+; ZNVER1-NEXT: fmulp %st, %st(2) # sched: [3:0.50]
; ZNVER1-NEXT: fimuls (%ecx) # sched: [10:0.50]
; ZNVER1-NEXT: fimull (%eax) # sched: [10:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fmulp \0A\09 fmulp %st(2), %st(0) \0A\09 fimuls $0 \0A\09 fimull $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_fnop() optsize {
; GENERIC-LABEL: test_fnop:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fnop
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fnop:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fnop # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fnop:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fnop # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fnop:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fnop # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fnop:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fnop # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fnop:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fnop # sched: [1:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fnop:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fnop # sched: [1:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fnop:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fnop # sched: [1:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fnop:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fnop # sched: [1:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fnop:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fnop # sched: [1:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fnop:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fnop # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fnop", ""() nounwind
ret void
}
define void @test_fpatan() optsize {
; GENERIC-LABEL: test_fpatan:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fpatan
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fpatan:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fpatan # sched: [183:91.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fpatan:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fpatan # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fpatan:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fpatan # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fpatan:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fpatan # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fpatan:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fpatan # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fpatan:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fpatan # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fpatan:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fpatan # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fpatan:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fpatan # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fpatan:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fpatan # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fpatan:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fpatan # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fpatan", ""() nounwind
ret void
}
define void @test_fprem_fprem1() optsize {
; GENERIC-LABEL: test_fprem_fprem1:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fprem
; GENERIC-NEXT: fprem1
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fprem_fprem1:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fprem # sched: [55:27.50]
; ATOM-NEXT: fprem1 # sched: [71:35.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fprem_fprem1:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fprem # sched: [100:1.00]
; SLM-NEXT: fprem1 # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fprem_fprem1:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fprem # sched: [100:0.33]
; SANDY-NEXT: fprem1 # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fprem_fprem1:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fprem # sched: [19:7.00]
; HASWELL-NEXT: fprem1 # sched: [27:10.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fprem_fprem1:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fprem # sched: [100:0.25]
; BROADWELL-NEXT: fprem1 # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fprem_fprem1:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fprem # sched: [100:0.25]
; SKYLAKE-NEXT: fprem1 # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fprem_fprem1:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fprem # sched: [100:0.25]
; SKX-NEXT: fprem1 # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fprem_fprem1:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fprem # sched: [100:0.50]
; BDVER2-NEXT: fprem1 # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fprem_fprem1:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fprem # sched: [100:0.50]
; BTVER2-NEXT: fprem1 # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fprem_fprem1:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fprem # sched: [100:0.25]
; ZNVER1-NEXT: fprem1 # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fprem \0A\09 fprem1", ""() nounwind
ret void
}
define void @test_fptan() optsize {
; GENERIC-LABEL: test_fptan:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fptan
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fptan:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fptan # sched: [168:84.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fptan:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fptan # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fptan:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fptan # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fptan:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fptan # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fptan:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fptan # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fptan:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fptan # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fptan:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fptan # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fptan:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fptan # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fptan:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fptan # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fptan:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fptan # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fptan", ""() nounwind
ret void
}
define void @test_frndint() optsize {
; GENERIC-LABEL: test_frndint:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: frndint
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_frndint:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: frndint # sched: [46:23.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_frndint:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: frndint # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_frndint:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: frndint # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_frndint:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: frndint # sched: [11:4.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_frndint:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: frndint # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_frndint:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: frndint # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_frndint:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: frndint # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_frndint:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: frndint # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_frndint:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: frndint # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_frndint:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: frndint # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "frndint", ""() nounwind
ret void
}
define void @test_frstor(i8* %a0) optsize {
; GENERIC-LABEL: test_frstor:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: frstor (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_frstor:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: frstor (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_frstor:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: frstor (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_frstor:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: frstor (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_frstor:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: frstor (%eax) # sched: [1:22.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_frstor:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: frstor (%eax) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_frstor:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: frstor (%eax) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_frstor:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: frstor (%eax) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_frstor:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: frstor (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_frstor:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: frstor (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_frstor:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: frstor (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "frstor $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fsave(i8* %a0) optsize {
; GENERIC-LABEL: test_fsave:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnsave (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsave:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnsave (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsave:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnsave (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsave:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnsave (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsave:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fnsave (%eax) # sched: [1:36.75]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsave:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnsave (%eax) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsave:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnsave (%eax) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsave:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnsave (%eax) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsave:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fnsave (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsave:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fnsave (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsave:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnsave (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsave $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fnsave(i8* %a0) optsize {
; GENERIC-LABEL: test_fnsave:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fnsave (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fnsave:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fnsave (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fnsave:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fnsave (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fnsave:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fnsave (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fnsave:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fnsave (%eax) # sched: [1:36.75]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fnsave:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fnsave (%eax) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fnsave:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fnsave (%eax) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fnsave:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fnsave (%eax) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fnsave:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fnsave (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fnsave:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fnsave (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fnsave:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fnsave (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fnsave $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fscale() optsize {
; GENERIC-LABEL: test_fscale:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fscale
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fscale:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fscale # sched: [77:38.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fscale:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fscale # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fscale:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fscale # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fscale:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fscale # sched: [75:12.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fscale:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fscale # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fscale:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fscale # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fscale:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fscale # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fscale:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fscale # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fscale:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fscale # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fscale:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fscale # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fscale", ""() nounwind
ret void
}
define void @test_fsin() optsize {
; GENERIC-LABEL: test_fsin:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsin
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsin:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsin # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsin:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsin # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsin:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsin # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsin:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsin # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsin:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsin # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsin:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsin # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsin:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsin # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsin:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fsin # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsin:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsin # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsin:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsin # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsin", ""() nounwind
ret void
}
define void @test_fsincos() optsize {
; GENERIC-LABEL: test_fsincos:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsincos
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsincos:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsincos # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsincos:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsincos # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsincos:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsincos # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsincos:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsincos # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsincos:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsincos # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsincos:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsincos # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsincos:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsincos # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsincos:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fsincos # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsincos:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsincos # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsincos:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsincos # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsincos", ""() nounwind
ret void
}
define void @test_fsqrt() optsize {
; GENERIC-LABEL: test_fsqrt:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsqrt
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsqrt:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsqrt # sched: [71:35.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsqrt:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsqrt # sched: [40:40.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsqrt:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsqrt # sched: [24:24.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsqrt:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsqrt # sched: [23:17.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsqrt:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsqrt # sched: [23:9.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsqrt:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsqrt # sched: [21:7.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsqrt:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsqrt # sched: [21:7.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsqrt:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fsqrt # sched: [1:17.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsqrt:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsqrt # sched: [35:35.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsqrt:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsqrt # sched: [20:20.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsqrt", ""() nounwind
ret void
}
define void @test_fst_fstp(i16* %a0, i32* %a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_fst_fstp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %edx
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fst %st(0)
; GENERIC-NEXT: fsts (%edx)
; GENERIC-NEXT: fstl (%ecx)
; GENERIC-NEXT: fstp %st(0)
; GENERIC-NEXT: fstpl (%edx)
; GENERIC-NEXT: fstpl (%ecx)
; GENERIC-NEXT: fstpt (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fst_fstp:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fst %st(0) # sched: [2:1.00]
; ATOM-NEXT: fsts (%edx) # sched: [2:1.00]
; ATOM-NEXT: fstl (%ecx) # sched: [2:1.00]
; ATOM-NEXT: fstp %st(0) # sched: [2:1.00]
; ATOM-NEXT: fstpl (%edx) # sched: [2:1.00]
; ATOM-NEXT: fstpl (%ecx) # sched: [2:1.00]
; ATOM-NEXT: fstpt (%eax) # sched: [5:2.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fst_fstp:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fst %st(0) # sched: [1:0.50]
; SLM-NEXT: fsts (%edx) # sched: [1:1.00]
; SLM-NEXT: fstl (%ecx) # sched: [1:1.00]
; SLM-NEXT: fstp %st(0) # sched: [1:0.50]
; SLM-NEXT: fstpl (%edx) # sched: [1:1.00]
; SLM-NEXT: fstpl (%ecx) # sched: [1:1.00]
; SLM-NEXT: fstpt (%eax) # sched: [1:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fst_fstp:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fst %st(0) # sched: [1:1.00]
; SANDY-NEXT: fsts (%edx) # sched: [6:1.00]
; SANDY-NEXT: fstl (%ecx) # sched: [6:1.00]
; SANDY-NEXT: fstp %st(0) # sched: [1:1.00]
; SANDY-NEXT: fstpl (%edx) # sched: [6:1.00]
; SANDY-NEXT: fstpl (%ecx) # sched: [6:1.00]
; SANDY-NEXT: fstpt (%eax) # sched: [6:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fst_fstp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fst %st(0) # sched: [1:0.50]
; HASWELL-NEXT: fsts (%edx) # sched: [1:1.00]
; HASWELL-NEXT: fstl (%ecx) # sched: [1:1.00]
; HASWELL-NEXT: fstp %st(0) # sched: [1:0.50]
; HASWELL-NEXT: fstpl (%edx) # sched: [1:1.00]
; HASWELL-NEXT: fstpl (%ecx) # sched: [1:1.00]
; HASWELL-NEXT: fstpt (%eax) # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fst_fstp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fst %st(0) # sched: [1:0.25]
; BROADWELL-NEXT: fsts (%edx) # sched: [1:1.00]
; BROADWELL-NEXT: fstl (%ecx) # sched: [1:1.00]
; BROADWELL-NEXT: fstp %st(0) # sched: [1:0.25]
; BROADWELL-NEXT: fstpl (%edx) # sched: [1:1.00]
; BROADWELL-NEXT: fstpl (%ecx) # sched: [1:1.00]
; BROADWELL-NEXT: fstpt (%eax) # sched: [1:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fst_fstp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fst %st(0) # sched: [1:0.25]
; SKYLAKE-NEXT: fsts (%edx) # sched: [1:1.00]
; SKYLAKE-NEXT: fstl (%ecx) # sched: [1:1.00]
; SKYLAKE-NEXT: fstp %st(0) # sched: [1:0.25]
; SKYLAKE-NEXT: fstpl (%edx) # sched: [1:1.00]
; SKYLAKE-NEXT: fstpl (%ecx) # sched: [1:1.00]
; SKYLAKE-NEXT: fstpt (%eax) # sched: [1:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fst_fstp:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fst %st(0) # sched: [1:0.25]
; SKX-NEXT: fsts (%edx) # sched: [1:1.00]
; SKX-NEXT: fstl (%ecx) # sched: [1:1.00]
; SKX-NEXT: fstp %st(0) # sched: [1:0.25]
; SKX-NEXT: fstpl (%edx) # sched: [1:1.00]
; SKX-NEXT: fstpl (%ecx) # sched: [1:1.00]
; SKX-NEXT: fstpt (%eax) # sched: [1:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fst_fstp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fst %st(0) # sched: [1:0.50]
; BDVER2-NEXT: fsts (%edx) # sched: [1:1.00]
; BDVER2-NEXT: fstl (%ecx) # sched: [1:1.00]
; BDVER2-NEXT: fstp %st(0) # sched: [1:0.50]
; BDVER2-NEXT: fstpl (%edx) # sched: [1:1.00]
; BDVER2-NEXT: fstpl (%ecx) # sched: [1:1.00]
; BDVER2-NEXT: fstpt (%eax) # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fst_fstp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fst %st(0) # sched: [1:0.50]
; BTVER2-NEXT: fsts (%edx) # sched: [1:1.00]
; BTVER2-NEXT: fstl (%ecx) # sched: [1:1.00]
; BTVER2-NEXT: fstp %st(0) # sched: [1:0.50]
; BTVER2-NEXT: fstpl (%edx) # sched: [1:1.00]
; BTVER2-NEXT: fstpl (%ecx) # sched: [1:1.00]
; BTVER2-NEXT: fstpt (%eax) # sched: [1:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fst_fstp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fst %st(0) # sched: [5:0.50]
; ZNVER1-NEXT: fsts (%edx) # sched: [1:0.50]
; ZNVER1-NEXT: fstl (%ecx) # sched: [1:0.50]
; ZNVER1-NEXT: fstp %st(0) # sched: [5:0.50]
; ZNVER1-NEXT: fstpl (%edx) # sched: [1:0.50]
; ZNVER1-NEXT: fstpl (%ecx) # sched: [1:0.50]
; ZNVER1-NEXT: fstpt (%eax) # sched: [5:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fst %st(0) \0A\09 fsts $0 \0A\09 fstl $1 \0A\09 fstp %st(0) \0A\09 fstpl $0 \0A\09 fstpl $1 \0A\09 fstpt $2", "*m,*m,*m"(i16* %a0, i32* %a1, i64 *%a2) nounwind
ret void
}
define void @test_fstcw_fstenv_fstsw(i8* %a0) optsize {
; GENERIC-LABEL: test_fstcw_fstenv_fstsw:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnstcw (%eax)
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnstenv (%eax)
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnstsw (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fstcw_fstenv_fstsw:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnstcw (%eax) # sched: [8:4.00]
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnstenv (%eax) # sched: [100:0.50]
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnstsw (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fstcw_fstenv_fstsw:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnstcw (%eax) # sched: [1:0.50]
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnstenv (%eax) # sched: [100:1.00]
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnstsw (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fstcw_fstenv_fstsw:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnstcw (%eax) # sched: [7:1.00]
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnstenv (%eax) # sched: [100:0.33]
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnstsw (%eax) # sched: [7:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fstcw_fstenv_fstsw:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fnstcw (%eax) # sched: [2:1.00]
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fnstenv (%eax) # sched: [115:19.50]
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: fnstsw (%eax) # sched: [4:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fstcw_fstenv_fstsw:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnstcw (%eax) # sched: [2:1.00]
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnstenv (%eax) # sched: [115:19.50]
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnstsw (%eax) # sched: [4:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fstcw_fstenv_fstsw:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnstcw (%eax) # sched: [2:1.00]
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnstenv (%eax) # sched: [106:19.50]
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnstsw (%eax) # sched: [3:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fstcw_fstenv_fstsw:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnstcw (%eax) # sched: [2:1.00]
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnstenv (%eax) # sched: [106:19.50]
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnstsw (%eax) # sched: [3:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fstcw_fstenv_fstsw:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fnstcw (%eax) # sched: [1:0.50]
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fnstenv (%eax) # sched: [100:0.50]
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: fnstsw (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fstcw_fstenv_fstsw:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fnstcw (%eax) # sched: [1:0.50]
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fnstenv (%eax) # sched: [100:0.50]
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: fnstsw (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fstcw_fstenv_fstsw:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fstcw $0 \0A\09 fstenv $0 \0A\09 fstsw $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fnstcw_fnstenv_fnstsw(i8* %a0) optsize {
; GENERIC-LABEL: test_fnstcw_fnstenv_fnstsw:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fnstcw (%eax)
; GENERIC-NEXT: fnstenv (%eax)
; GENERIC-NEXT: fnstsw (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fnstcw_fnstenv_fnstsw:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fnstcw (%eax) # sched: [8:4.00]
; ATOM-NEXT: fnstenv (%eax) # sched: [100:0.50]
; ATOM-NEXT: fnstsw (%eax) # sched: [100:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fnstcw_fnstenv_fnstsw:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fnstcw (%eax) # sched: [1:0.50]
; SLM-NEXT: fnstenv (%eax) # sched: [100:1.00]
; SLM-NEXT: fnstsw (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fnstcw_fnstenv_fnstsw:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fnstcw (%eax) # sched: [7:1.00]
; SANDY-NEXT: fnstenv (%eax) # sched: [100:0.33]
; SANDY-NEXT: fnstsw (%eax) # sched: [7:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fnstcw_fnstenv_fnstsw:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fnstcw (%eax) # sched: [2:1.00]
; HASWELL-NEXT: fnstenv (%eax) # sched: [115:19.50]
; HASWELL-NEXT: fnstsw (%eax) # sched: [4:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fnstcw_fnstenv_fnstsw:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fnstcw (%eax) # sched: [2:1.00]
; BROADWELL-NEXT: fnstenv (%eax) # sched: [115:19.50]
; BROADWELL-NEXT: fnstsw (%eax) # sched: [4:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fnstcw_fnstenv_fnstsw:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fnstcw (%eax) # sched: [2:1.00]
; SKYLAKE-NEXT: fnstenv (%eax) # sched: [106:19.50]
; SKYLAKE-NEXT: fnstsw (%eax) # sched: [3:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fnstcw_fnstenv_fnstsw:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fnstcw (%eax) # sched: [2:1.00]
; SKX-NEXT: fnstenv (%eax) # sched: [106:19.50]
; SKX-NEXT: fnstsw (%eax) # sched: [3:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fnstcw_fnstenv_fnstsw:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fnstcw (%eax) # sched: [1:0.50]
; BDVER2-NEXT: fnstenv (%eax) # sched: [100:0.50]
; BDVER2-NEXT: fnstsw (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fnstcw_fnstenv_fnstsw:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fnstcw (%eax) # sched: [1:0.50]
; BTVER2-NEXT: fnstenv (%eax) # sched: [100:0.50]
; BTVER2-NEXT: fnstsw (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fnstcw_fnstenv_fnstsw:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fnstcw $0 \0A\09 fnstenv $0 \0A\09 fnstsw $0", "*m"(i8* %a0) nounwind
ret void
}
define void @test_fsub(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fsub:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fsub %st(0), %st(1)
-; GENERIC-NEXT: fsub %st(2)
+; GENERIC-NEXT: fsub %st, %st(1)
+; GENERIC-NEXT: fsub %st(2), %st
; GENERIC-NEXT: fsubs (%ecx)
; GENERIC-NEXT: fsubl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsub:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fsub %st(0), %st(1) # sched: [5:5.00]
-; ATOM-NEXT: fsub %st(2) # sched: [5:5.00]
+; ATOM-NEXT: fsub %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: fsub %st(2), %st # sched: [5:5.00]
; ATOM-NEXT: fsubs (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fsubl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsub:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; SLM-NEXT: fsub %st(2) # sched: [3:1.00]
+; SLM-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: fsub %st(2), %st # sched: [3:1.00]
; SLM-NEXT: fsubs (%ecx) # sched: [6:1.00]
; SLM-NEXT: fsubl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsub:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; SANDY-NEXT: fsub %st(2) # sched: [3:1.00]
+; SANDY-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: fsub %st(2), %st # sched: [3:1.00]
; SANDY-NEXT: fsubs (%ecx) # sched: [10:1.00]
; SANDY-NEXT: fsubl (%eax) # sched: [10:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsub:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: fsub %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: fsub %st(2), %st # sched: [3:1.00]
; HASWELL-NEXT: fsubs (%ecx) # sched: [10:1.00]
; HASWELL-NEXT: fsubl (%eax) # sched: [10:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsub:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: fsub %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: fsub %st(2), %st # sched: [3:1.00]
; BROADWELL-NEXT: fsubs (%ecx) # sched: [9:1.00]
; BROADWELL-NEXT: fsubl (%eax) # sched: [9:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsub:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: fsub %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsub %st(2), %st # sched: [3:1.00]
; SKYLAKE-NEXT: fsubs (%ecx) # sched: [10:1.00]
; SKYLAKE-NEXT: fsubl (%eax) # sched: [10:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsub:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; SKX-NEXT: fsub %st(2) # sched: [3:1.00]
+; SKX-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: fsub %st(2), %st # sched: [3:1.00]
; SKX-NEXT: fsubs (%ecx) # sched: [10:1.00]
; SKX-NEXT: fsubl (%eax) # sched: [10:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsub:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fsub %st(0), %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fsub %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fsub %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fsub %st(2), %st # sched: [5:1.00]
; BDVER2-NEXT: fsubs (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fsubl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsub:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: fsub %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: fsub %st(2), %st # sched: [3:1.00]
; BTVER2-NEXT: fsubs (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fsubl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsub:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fsub %st(0), %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: fsub %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: fsub %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: fsub %st(2), %st # sched: [3:1.00]
; ZNVER1-NEXT: fsubs (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: fsubl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsub %st(0), %st(1) \0A\09 fsub %st(2), %st(0) \0A\09 fsubs $0 \0A\09 fsubl $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fsubp_fisub:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fsubp %st(1)
-; GENERIC-NEXT: fsubp %st(2)
+; GENERIC-NEXT: fsubp %st, %st(1)
+; GENERIC-NEXT: fsubp %st, %st(2)
; GENERIC-NEXT: fisubs (%ecx)
; GENERIC-NEXT: fisubl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubp_fisub:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fsubp %st(1) # sched: [5:5.00]
-; ATOM-NEXT: fsubp %st(2) # sched: [5:5.00]
+; ATOM-NEXT: fsubp %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: fsubp %st, %st(2) # sched: [5:5.00]
; ATOM-NEXT: fisubs (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fisubl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubp_fisub:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fsubp %st(1) # sched: [3:1.00]
-; SLM-NEXT: fsubp %st(2) # sched: [3:1.00]
+; SLM-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; SLM-NEXT: fisubs (%ecx) # sched: [6:1.00]
; SLM-NEXT: fisubl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubp_fisub:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fsubp %st(1) # sched: [3:1.00]
-; SANDY-NEXT: fsubp %st(2) # sched: [3:1.00]
+; SANDY-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; SANDY-NEXT: fisubs (%ecx) # sched: [13:2.00]
; SANDY-NEXT: fisubl (%eax) # sched: [13:2.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsubp_fisub:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fsubp %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: fsubp %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; HASWELL-NEXT: fisubs (%ecx) # sched: [13:2.00]
; HASWELL-NEXT: fisubl (%eax) # sched: [13:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsubp_fisub:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fsubp %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: fsubp %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; BROADWELL-NEXT: fisubs (%ecx) # sched: [12:2.00]
; BROADWELL-NEXT: fisubl (%eax) # sched: [12:2.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubp_fisub:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fsubp %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: fsubp %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; SKYLAKE-NEXT: fisubs (%ecx) # sched: [13:2.00]
; SKYLAKE-NEXT: fisubl (%eax) # sched: [13:2.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubp_fisub:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fsubp %st(1) # sched: [3:1.00]
-; SKX-NEXT: fsubp %st(2) # sched: [3:1.00]
+; SKX-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; SKX-NEXT: fisubs (%ecx) # sched: [13:2.00]
; SKX-NEXT: fisubl (%eax) # sched: [13:2.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsubp_fisub:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fsubp %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fsubp %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fsubp %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fsubp %st, %st(2) # sched: [5:1.00]
; BDVER2-NEXT: fisubs (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fisubl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsubp_fisub:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fsubp %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: fsubp %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; BTVER2-NEXT: fisubs (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fisubl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubp_fisub:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fsubp %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: fsubp %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubp %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubp %st, %st(2) # sched: [3:1.00]
; ZNVER1-NEXT: fisubs (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: fisubl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsubp \0A\09 fsubp %st(2), %st(0) \0A\09 fisubs $0 \0A\09 fisubl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_fsubr(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fsubr:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fsubr %st(0), %st(1)
-; GENERIC-NEXT: fsubr %st(2)
+; GENERIC-NEXT: fsubr %st, %st(1)
+; GENERIC-NEXT: fsubr %st(2), %st
; GENERIC-NEXT: fsubrs (%ecx)
; GENERIC-NEXT: fsubrl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubr:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fsubr %st(0), %st(1) # sched: [5:5.00]
-; ATOM-NEXT: fsubr %st(2) # sched: [5:5.00]
+; ATOM-NEXT: fsubr %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: fsubr %st(2), %st # sched: [5:5.00]
; ATOM-NEXT: fsubrs (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fsubrl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubr:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; SLM-NEXT: fsubr %st(2) # sched: [3:1.00]
+; SLM-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; SLM-NEXT: fsubrs (%ecx) # sched: [6:1.00]
; SLM-NEXT: fsubrl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubr:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; SANDY-NEXT: fsubr %st(2) # sched: [3:1.00]
+; SANDY-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; SANDY-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; SANDY-NEXT: fsubrl (%eax) # sched: [10:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsubr:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: fsubr %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; HASWELL-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; HASWELL-NEXT: fsubrl (%eax) # sched: [10:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsubr:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: fsubr %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; BROADWELL-NEXT: fsubrs (%ecx) # sched: [9:1.00]
; BROADWELL-NEXT: fsubrl (%eax) # sched: [9:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubr:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: fsubr %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; SKYLAKE-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; SKYLAKE-NEXT: fsubrl (%eax) # sched: [10:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubr:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; SKX-NEXT: fsubr %st(2) # sched: [3:1.00]
+; SKX-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; SKX-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; SKX-NEXT: fsubrl (%eax) # sched: [10:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsubr:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fsubr %st(0), %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fsubr %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fsubr %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fsubr %st(2), %st # sched: [5:1.00]
; BDVER2-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fsubrl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsubr:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: fsubr %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; BTVER2-NEXT: fsubrs (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fsubrl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubr:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fsubr %st(0), %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: fsubr %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubr %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubr %st(2), %st # sched: [3:1.00]
; ZNVER1-NEXT: fsubrs (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: fsubrl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsubr %st(0), %st(1) \0A\09 fsubr %st(2), %st(0) \0A\09 fsubrs $0 \0A\09 fsubrl $1", "*m,*m"(float *%a0, double *%a1) nounwind
ret void
}
define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fsubrp_fisubr:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fsubrp %st(1)
-; GENERIC-NEXT: fsubrp %st(2)
+; GENERIC-NEXT: fsubrp %st, %st(1)
+; GENERIC-NEXT: fsubrp %st, %st(2)
; GENERIC-NEXT: fisubrs (%ecx)
; GENERIC-NEXT: fisubrl (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubrp_fisubr:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
-; ATOM-NEXT: fsubrp %st(1) # sched: [5:5.00]
-; ATOM-NEXT: fsubrp %st(2) # sched: [5:5.00]
+; ATOM-NEXT: fsubrp %st, %st(1) # sched: [5:5.00]
+; ATOM-NEXT: fsubrp %st, %st(2) # sched: [5:5.00]
; ATOM-NEXT: fisubrs (%ecx) # sched: [5:5.00]
; ATOM-NEXT: fisubrl (%eax) # sched: [5:5.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubrp_fisubr:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
-; SLM-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; SLM-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; SLM-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; SLM-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; SLM-NEXT: fisubrs (%ecx) # sched: [6:1.00]
; SLM-NEXT: fisubrl (%eax) # sched: [6:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubrp_fisubr:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
-; SANDY-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; SANDY-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; SANDY-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; SANDY-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; SANDY-NEXT: fisubrs (%ecx) # sched: [13:2.00]
; SANDY-NEXT: fisubrl (%eax) # sched: [13:2.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fsubrp_fisubr:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; HASWELL-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; HASWELL-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; HASWELL-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; HASWELL-NEXT: fisubrs (%ecx) # sched: [13:2.00]
; HASWELL-NEXT: fisubrl (%eax) # sched: [13:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fsubrp_fisubr:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; BROADWELL-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; BROADWELL-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; BROADWELL-NEXT: fisubrs (%ecx) # sched: [12:2.00]
; BROADWELL-NEXT: fisubrl (%eax) # sched: [12:2.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubrp_fisubr:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; SKYLAKE-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; SKYLAKE-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; SKYLAKE-NEXT: fisubrs (%ecx) # sched: [13:2.00]
; SKYLAKE-NEXT: fisubrl (%eax) # sched: [13:2.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubrp_fisubr:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
-; SKX-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; SKX-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; SKX-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; SKX-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; SKX-NEXT: fisubrs (%ecx) # sched: [13:2.00]
; SKX-NEXT: fisubrl (%eax) # sched: [13:2.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fsubrp_fisubr:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fsubrp %st(1) # sched: [5:1.00]
-; BDVER2-NEXT: fsubrp %st(2) # sched: [5:1.00]
+; BDVER2-NEXT: fsubrp %st, %st(1) # sched: [5:1.00]
+; BDVER2-NEXT: fsubrp %st, %st(2) # sched: [5:1.00]
; BDVER2-NEXT: fisubrs (%ecx) # sched: [10:1.00]
; BDVER2-NEXT: fisubrl (%eax) # sched: [10:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fsubrp_fisubr:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; BTVER2-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; BTVER2-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; BTVER2-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; BTVER2-NEXT: fisubrs (%ecx) # sched: [8:1.00]
; BTVER2-NEXT: fisubrl (%eax) # sched: [8:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubrp_fisubr:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fsubrp %st(1) # sched: [3:1.00]
-; ZNVER1-NEXT: fsubrp %st(2) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubrp %st, %st(1) # sched: [3:1.00]
+; ZNVER1-NEXT: fsubrp %st, %st(2) # sched: [3:1.00]
; ZNVER1-NEXT: fisubrs (%ecx) # sched: [10:1.00]
; ZNVER1-NEXT: fisubrl (%eax) # sched: [10:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fsubrp \0A\09 fsubrp %st(2), %st(0) \0A\09 fisubrs $0 \0A\09 fisubrl $1", "*m,*m"(i16 *%a0, i32 *%a1) nounwind
ret void
}
define void @test_ftst() optsize {
; GENERIC-LABEL: test_ftst:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: ftst
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ftst:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: ftst # sched: [9:4.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ftst:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: ftst # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ftst:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: ftst # sched: [3:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_ftst:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: ftst # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_ftst:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: ftst # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ftst:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: ftst # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ftst:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: ftst # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_ftst:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: ftst # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_ftst:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: ftst # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ftst:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: ftst # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "ftst", ""() nounwind
ret void
}
define void @test_fucom_fucomp_fucompp() optsize {
; GENERIC-LABEL: test_fucom_fucomp_fucompp:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fucom %st(1)
; GENERIC-NEXT: fucom %st(3)
; GENERIC-NEXT: fucomp %st(1)
; GENERIC-NEXT: fucomp %st(3)
; GENERIC-NEXT: fucompp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fucom_fucomp_fucompp:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fucom %st(1) # sched: [1:1.00]
; ATOM-NEXT: fucom %st(3) # sched: [1:1.00]
; ATOM-NEXT: fucomp %st(1) # sched: [1:1.00]
; ATOM-NEXT: fucomp %st(3) # sched: [1:1.00]
; ATOM-NEXT: fucompp # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fucom_fucomp_fucompp:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fucom %st(1) # sched: [3:1.00]
; SLM-NEXT: fucom %st(3) # sched: [3:1.00]
; SLM-NEXT: fucomp %st(1) # sched: [3:1.00]
; SLM-NEXT: fucomp %st(3) # sched: [3:1.00]
; SLM-NEXT: fucompp # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fucom_fucomp_fucompp:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fucom %st(1) # sched: [1:1.00]
; SANDY-NEXT: fucom %st(3) # sched: [1:1.00]
; SANDY-NEXT: fucomp %st(1) # sched: [1:1.00]
; SANDY-NEXT: fucomp %st(3) # sched: [1:1.00]
; SANDY-NEXT: fucompp # sched: [3:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fucom_fucomp_fucompp:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fucom %st(1) # sched: [1:1.00]
; HASWELL-NEXT: fucom %st(3) # sched: [1:1.00]
; HASWELL-NEXT: fucomp %st(1) # sched: [1:1.00]
; HASWELL-NEXT: fucomp %st(3) # sched: [1:1.00]
; HASWELL-NEXT: fucompp # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fucom_fucomp_fucompp:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fucom %st(1) # sched: [1:1.00]
; BROADWELL-NEXT: fucom %st(3) # sched: [1:1.00]
; BROADWELL-NEXT: fucomp %st(1) # sched: [1:1.00]
; BROADWELL-NEXT: fucomp %st(3) # sched: [1:1.00]
; BROADWELL-NEXT: fucompp # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fucom_fucomp_fucompp:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fucom %st(1) # sched: [1:1.00]
; SKYLAKE-NEXT: fucom %st(3) # sched: [1:1.00]
; SKYLAKE-NEXT: fucomp %st(1) # sched: [1:1.00]
; SKYLAKE-NEXT: fucomp %st(3) # sched: [1:1.00]
; SKYLAKE-NEXT: fucompp # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fucom_fucomp_fucompp:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fucom %st(1) # sched: [1:1.00]
; SKX-NEXT: fucom %st(3) # sched: [1:1.00]
; SKX-NEXT: fucomp %st(1) # sched: [1:1.00]
; SKX-NEXT: fucomp %st(3) # sched: [1:1.00]
; SKX-NEXT: fucompp # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fucom_fucomp_fucompp:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fucom %st(1) # sched: [1:1.00]
; BDVER2-NEXT: fucom %st(3) # sched: [1:1.00]
; BDVER2-NEXT: fucomp %st(1) # sched: [1:1.00]
; BDVER2-NEXT: fucomp %st(3) # sched: [1:1.00]
; BDVER2-NEXT: fucompp # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fucom_fucomp_fucompp:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fucom %st(1) # sched: [3:1.00]
; BTVER2-NEXT: fucom %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fucomp %st(1) # sched: [3:1.00]
; BTVER2-NEXT: fucomp %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fucompp # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fucom_fucomp_fucompp:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fucom %st(1) # sched: [1:1.00]
; ZNVER1-NEXT: fucom %st(3) # sched: [1:1.00]
; ZNVER1-NEXT: fucomp %st(1) # sched: [1:1.00]
; ZNVER1-NEXT: fucomp %st(3) # sched: [1:1.00]
; ZNVER1-NEXT: fucompp # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fucom \0A\09 fucom %st(3) \0A\09 fucomp \0A\09 fucomp %st(3) \0A\09 fucompp", ""() nounwind
ret void
}
define void @test_fucomi_fucomip() optsize {
; GENERIC-LABEL: test_fucomi_fucomip:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
-; GENERIC-NEXT: fucomi %st(3)
-; GENERIC-NEXT: fucompi %st(3)
+; GENERIC-NEXT: fucomi %st(3), %st
+; GENERIC-NEXT: fucompi %st(3), %st
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fucomi_fucomip:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
-; ATOM-NEXT: fucomi %st(3) # sched: [9:4.50]
-; ATOM-NEXT: fucompi %st(3) # sched: [9:4.50]
+; ATOM-NEXT: fucomi %st(3), %st # sched: [9:4.50]
+; ATOM-NEXT: fucompi %st(3), %st # sched: [9:4.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fucomi_fucomip:
; SLM: # %bb.0:
; SLM-NEXT: #APP
-; SLM-NEXT: fucomi %st(3) # sched: [3:1.00]
-; SLM-NEXT: fucompi %st(3) # sched: [3:1.00]
+; SLM-NEXT: fucomi %st(3), %st # sched: [3:1.00]
+; SLM-NEXT: fucompi %st(3), %st # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fucomi_fucomip:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
-; SANDY-NEXT: fucomi %st(3) # sched: [3:1.00]
-; SANDY-NEXT: fucompi %st(3) # sched: [3:1.00]
+; SANDY-NEXT: fucomi %st(3), %st # sched: [3:1.00]
+; SANDY-NEXT: fucompi %st(3), %st # sched: [3:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fucomi_fucomip:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
-; HASWELL-NEXT: fucomi %st(3) # sched: [1:0.50]
-; HASWELL-NEXT: fucompi %st(3) # sched: [1:0.50]
+; HASWELL-NEXT: fucomi %st(3), %st # sched: [1:0.50]
+; HASWELL-NEXT: fucompi %st(3), %st # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fucomi_fucomip:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
-; BROADWELL-NEXT: fucomi %st(3) # sched: [3:1.00]
-; BROADWELL-NEXT: fucompi %st(3) # sched: [3:1.00]
+; BROADWELL-NEXT: fucomi %st(3), %st # sched: [3:1.00]
+; BROADWELL-NEXT: fucompi %st(3), %st # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fucomi_fucomip:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
-; SKYLAKE-NEXT: fucomi %st(3) # sched: [2:1.00]
-; SKYLAKE-NEXT: fucompi %st(3) # sched: [2:1.00]
+; SKYLAKE-NEXT: fucomi %st(3), %st # sched: [2:1.00]
+; SKYLAKE-NEXT: fucompi %st(3), %st # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fucomi_fucomip:
; SKX: # %bb.0:
; SKX-NEXT: #APP
-; SKX-NEXT: fucomi %st(3) # sched: [2:1.00]
-; SKX-NEXT: fucompi %st(3) # sched: [2:1.00]
+; SKX-NEXT: fucomi %st(3), %st # sched: [2:1.00]
+; SKX-NEXT: fucompi %st(3), %st # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fucomi_fucomip:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
-; BDVER2-NEXT: fucomi %st(3) # sched: [1:1.00]
-; BDVER2-NEXT: fucompi %st(3) # sched: [1:1.00]
+; BDVER2-NEXT: fucomi %st(3), %st # sched: [1:1.00]
+; BDVER2-NEXT: fucompi %st(3), %st # sched: [1:1.00]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fucomi_fucomip:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
-; BTVER2-NEXT: fucomi %st(3) # sched: [3:1.00]
-; BTVER2-NEXT: fucompi %st(3) # sched: [3:1.00]
+; BTVER2-NEXT: fucomi %st(3), %st # sched: [3:1.00]
+; BTVER2-NEXT: fucompi %st(3), %st # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fucomi_fucomip:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
-; ZNVER1-NEXT: fucomi %st(3) # sched: [9:0.50]
-; ZNVER1-NEXT: fucompi %st(3) # sched: [9:0.50]
+; ZNVER1-NEXT: fucomi %st(3), %st # sched: [9:0.50]
+; ZNVER1-NEXT: fucompi %st(3), %st # sched: [9:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fucomi %st(3) \0A\09 fucomip %st(3)", ""() nounwind
ret void
}
define void @test_fwait() optsize {
; GENERIC-LABEL: test_fwait:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fwait:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fwait:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fwait:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fwait:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [2:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fwait:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fwait:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fwait:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fwait:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: wait # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fwait:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fwait:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fwait", ""() nounwind
ret void
}
define void @test_fxam() optsize {
; GENERIC-LABEL: test_fxam:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxam
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxam:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxam # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxam:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxam # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxam:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxam # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fxam:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxam # sched: [1:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fxam:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxam # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxam:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxam # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxam:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxam # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fxam:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fxam # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fxam:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxam # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxam:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxam # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fxam", ""() nounwind
ret void
}
define void @test_fxch() optsize {
; GENERIC-LABEL: test_fxch:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxch %st(1)
; GENERIC-NEXT: fxch %st(3)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxch:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxch %st(1) # sched: [1:1.00]
; ATOM-NEXT: fxch %st(3) # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxch:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxch %st(1) # sched: [1:0.50]
; SLM-NEXT: fxch %st(3) # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxch:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxch %st(1) # sched: [1:0.33]
; SANDY-NEXT: fxch %st(3) # sched: [1:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fxch:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxch %st(1) # sched: [17:4.00]
; HASWELL-NEXT: fxch %st(3) # sched: [17:4.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fxch:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxch %st(1) # sched: [14:4.00]
; BROADWELL-NEXT: fxch %st(3) # sched: [14:4.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxch:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxch %st(1) # sched: [17:4.00]
; SKYLAKE-NEXT: fxch %st(3) # sched: [17:4.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxch:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxch %st(1) # sched: [17:4.00]
; SKX-NEXT: fxch %st(3) # sched: [17:4.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fxch:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fxch %st(1) # sched: [1:0.50]
; BDVER2-NEXT: fxch %st(3) # sched: [1:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fxch:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxch %st(1) # sched: [1:0.50]
; BTVER2-NEXT: fxch %st(3) # sched: [1:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxch:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxch %st(1) # sched: [1:0.25]
; ZNVER1-NEXT: fxch %st(3) # sched: [1:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fxch \0A\09 fxch %st(3)", ""() nounwind
ret void
}
define void @test_fxrstor_fxsave(i8* %a0) optsize {
; GENERIC-LABEL: test_fxrstor_fxsave:
; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxrstor (%eax)
; GENERIC-NEXT: fxsave (%eax)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxrstor_fxsave:
; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fxrstor (%eax) # sched: [141:70.50]
; ATOM-NEXT: fxsave (%eax) # sched: [140:70.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxrstor_fxsave:
; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fxrstor (%eax) # sched: [100:1.00]
; SLM-NEXT: fxsave (%eax) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxrstor_fxsave:
; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fxrstor (%eax) # sched: [5:2.00]
; SANDY-NEXT: fxsave (%eax) # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fxrstor_fxsave:
; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxrstor (%eax) # sched: [64:16.50]
; HASWELL-NEXT: fxsave (%eax) # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fxrstor_fxsave:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxrstor (%eax) # sched: [63:16.50]
; BROADWELL-NEXT: fxsave (%eax) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxrstor_fxsave:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxrstor (%eax) # sched: [63:16.50]
; SKYLAKE-NEXT: fxsave (%eax) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxrstor_fxsave:
; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fxrstor (%eax) # sched: [63:16.50]
; SKX-NEXT: fxsave (%eax) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fxrstor_fxsave:
; BDVER2: # %bb.0:
; BDVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fxrstor (%eax) # sched: [100:0.50]
; BDVER2-NEXT: fxsave (%eax) # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fxrstor_fxsave:
; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxrstor (%eax) # sched: [100:0.50]
; BTVER2-NEXT: fxsave (%eax) # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxrstor_fxsave:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxrstor (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: fxsave (%eax) # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fxrstor $0 \0A\09 fxsave $0", "*m"(i8 *%a0) nounwind
ret void
}
define void @test_fxtract() optsize {
; GENERIC-LABEL: test_fxtract:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxtract
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxtract:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxtract # sched: [25:12.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxtract:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxtract # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxtract:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxtract # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fxtract:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxtract # sched: [15:4.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fxtract:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxtract # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxtract:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxtract # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxtract:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxtract # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fxtract:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fxtract # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fxtract:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxtract # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxtract:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxtract # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fxtract", ""() nounwind
ret void
}
define void @test_fyl2x() optsize {
; GENERIC-LABEL: test_fyl2x:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fyl2x
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fyl2x:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fyl2x # sched: [146:73.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fyl2x:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fyl2x # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fyl2x:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fyl2x # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fyl2x:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fyl2x # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fyl2x:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fyl2x # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fyl2x:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fyl2x # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fyl2x:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fyl2x # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fyl2x:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fyl2x # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fyl2x:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fyl2x # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fyl2x:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fyl2x # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fyl2x", ""() nounwind
ret void
}
define void @test_fyl2xp1() optsize {
; GENERIC-LABEL: test_fyl2xp1:
; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fyl2xp1
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fyl2xp1:
; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fyl2xp1 # sched: [147:73.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fyl2xp1:
; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fyl2xp1 # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fyl2xp1:
; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fyl2xp1 # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [6:1.00]
;
; HASWELL-LABEL: test_fyl2xp1:
; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fyl2xp1 # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [7:1.00]
;
; BROADWELL-LABEL: test_fyl2xp1:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fyl2xp1 # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fyl2xp1:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fyl2xp1 # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fyl2xp1:
; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fyl2xp1 # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BDVER2-LABEL: test_fyl2xp1:
; BDVER2: # %bb.0:
; BDVER2-NEXT: #APP
; BDVER2-NEXT: fyl2xp1 # sched: [100:0.50]
; BDVER2-NEXT: #NO_APP
; BDVER2-NEXT: retl # sched: [5:1.00]
;
; BTVER2-LABEL: test_fyl2xp1:
; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fyl2xp1 # sched: [100:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fyl2xp1:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fyl2xp1 # sched: [100:0.25]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: retl # sched: [1:0.50]
tail call void asm sideeffect "fyl2xp1", ""() nounwind
ret void
}
Index: vendor/llvm/dist-release_80/test/DebugInfo/Mips/eh_frame.ll
===================================================================
--- vendor/llvm/dist-release_80/test/DebugInfo/Mips/eh_frame.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/DebugInfo/Mips/eh_frame.ll (revision 344166)
@@ -1,38 +1,50 @@
-; RUN: llc -mtriple mips-unknown-linux-gnu -mattr=+micromips -O3 -filetype=obj -o - %s | llvm-readelf -r | FileCheck %s
+; RUN: llc -mtriple mips-unknown-linux-gnu -mattr=+micromips -relocation-model=static -O3 -filetype=obj -o - %s | \
+; RUN: llvm-readelf -r | FileCheck %s --check-prefix=CHECK-READELF
+; RUN: llc -mtriple mips-unknown-linux-gnu -mattr=+micromips -relocation-model=pic -O3 -filetype=obj -o - %s | \
+; RUN: llvm-readelf -r | FileCheck %s --check-prefix=CHECK-READELF
+; RUN: llc -mtriple mips-unknown-linux-gnu -mattr=+micromips -relocation-model=static -O3 -filetype=obj -o - %s | \
+; RUN: llvm-objdump -s -j .gcc_except_table - | FileCheck %s --check-prefix=CHECK-EXCEPT-TABLE-STATIC
+; RUN: llc -mtriple mips-unknown-linux-gnu -mattr=+micromips -relocation-model=pic -O3 -filetype=obj -o - %s | \
+; RUN: llvm-objdump -s -j .gcc_except_table - | FileCheck %s --check-prefix=CHECK-EXCEPT-TABLE-PIC
-; CHECK: .rel.eh_frame
-; CHECK: DW.ref.__gxx_personality_v0
-; CHECK-NEXT: .text
-; CHECK-NEXT: .gcc_except_table
+; CHECK-READELF: .rel.eh_frame
+; CHECK-READELF: DW.ref.__gxx_personality_v0
+; CHECK-READELF-NEXT: .text
+; CHECK-READELF-NEXT: .gcc_except_table
+
+; CHECK-EXCEPT-TABLE-STATIC: 0000 ff9b1501 0c011500 00150e23 01231e00 ...........#.#..
+; CHECK-EXCEPT-TABLE-STATIC: 0010 00010000 00000000
+; CHECK-EXCEPT-TABLE-PIC: 0000 ff9b1501 0c012d00 002d133f 013f2a00 ......-..-.?.?*.
+; CHECK-EXCEPT-TABLE-PIC: 0010 00010000 00000000 ........
@_ZTIi = external constant i8*
define dso_local i32 @main() local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
entry:
%exception.i = tail call i8* @__cxa_allocate_exception(i32 4) nounwind
%0 = bitcast i8* %exception.i to i32*
store i32 5, i32* %0, align 16
invoke void @__cxa_throw(i8* %exception.i, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
to label %.noexc unwind label %return
.noexc:
unreachable
return:
%1 = landingpad { i8*, i32 }
catch i8* null
%2 = extractvalue { i8*, i32 } %1, 0
%3 = tail call i8* @__cxa_begin_catch(i8* %2) nounwind
tail call void @__cxa_end_catch()
ret i32 0
}
declare i32 @__gxx_personality_v0(...)
declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
declare void @__cxa_end_catch() local_unnamed_addr
declare i8* @__cxa_allocate_exception(i32) local_unnamed_addr
declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
Index: vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll (revision 344166)
@@ -0,0 +1,18 @@
+; MSan converts 2-element global_ctors to 3-element when adding the new entry.
+; RUN: opt < %s -msan-with-comdat -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -msan -msan-with-comdat -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK: $msan.module_ctor = comdat any
+; CHECK: @llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @f, i8* null }, { i32, void ()*, i8* } { i32 0, void ()* @msan.module_ctor, i8* bitcast (void ()* @msan.module_ctor to i8*) }]
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @f }]
+
+define internal void @f() {
+entry:
+ ret void
+}
+
+; CHECK: define internal void @msan.module_ctor() comdat {
Index: vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_basic.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_basic.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_basic.ll (revision 344166)
@@ -1,994 +1,995 @@
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck \
; RUN: -allow-deprecated-dag-overlap %s
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck -allow-deprecated-dag-overlap %s
; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S \
; RUN: -passes=msan 2>&1 | FileCheck -allow-deprecated-dag-overlap \
; RUN: -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -allow-deprecated-dag-overlap -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @__msan_init, i8* null }
+; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
; Check the presence and the linkage type of __msan_track_origins and
; other interface symbols.
; CHECK-NOT: @__msan_track_origins
; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
; Check instrumentation of stores
define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store i32 %x, i32* %p, align 4
ret void
}
; CHECK-LABEL: @Store
; CHECK: load {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
; CHECK: store
; CHECK-ORIGINS: icmp
; CHECK-ORIGINS: br i1
; CHECK-ORIGINS: <label>
; CHECK-ORIGINS: store
; CHECK-ORIGINS: br label
; CHECK-ORIGINS: <label>
; CHECK: store
; CHECK: ret void
; Check instrumentation of aligned stores
; Shadow store has the same alignment as the original store; origin store
; does not specify explicit alignment.
define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store i32 %x, i32* %p, align 32
ret void
}
; CHECK-LABEL: @AlignedStore
; CHECK: load {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
; CHECK: store {{.*}} align 32
; CHECK-ORIGINS: icmp
; CHECK-ORIGINS: br i1
; CHECK-ORIGINS: <label>
; CHECK-ORIGINS: store {{.*}} align 32
; CHECK-ORIGINS: br label
; CHECK-ORIGINS: <label>
; CHECK: store {{.*}} align 32
; CHECK: ret void
; load followed by cmp: check that we load the shadow and call __msan_warning.
define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
entry:
%0 = load i32, i32* %a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
tail call void (...) @foo() nounwind
br label %if.end
if.end: ; preds = %entry, %if.then
ret void
}
declare void @foo(...)
; CHECK-LABEL: @LoadAndCmp
; CHECK: = load
; CHECK: = load
; CHECK: call void @__msan_warning_noreturn()
; CHECK-NEXT: call void asm sideeffect
; CHECK-NEXT: unreachable
; CHECK: ret void
; Check that we store the shadow for the retval.
define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
entry:
ret i32 123
}
; CHECK-LABEL: @ReturnInt
; CHECK: store i32 0,{{.*}}__msan_retval_tls
; CHECK: ret i32
; Check that we get the shadow for the retval.
define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
entry:
%call = tail call i32 @ReturnInt() nounwind
store i32 %call, i32* %a, align 4
ret void
}
; CHECK-LABEL: @CopyRetVal
; CHECK: load{{.*}}__msan_retval_tls
; CHECK: store
; CHECK: store
; CHECK: ret void
; Check that we generate PHIs for shadow.
define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
entry:
%tobool = icmp eq i32* %b, null
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
%0 = load i32, i32* %b, align 4
br label %if.end
if.else: ; preds = %entry
%1 = load i32, i32* %c, align 4
br label %if.end
if.end: ; preds = %if.else, %if.then
%t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
store i32 %t.0, i32* %a, align 4
ret void
}
; CHECK-LABEL: @FuncWithPhi
; CHECK: = phi
; CHECK-NEXT: = phi
; CHECK: store
; CHECK: store
; CHECK: ret void
; Compute shadow for "x << 10"
define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
entry:
%0 = load i32, i32* %x, align 4
%1 = shl i32 %0, 10
store i32 %1, i32* %x, align 4
ret void
}
; CHECK-LABEL: @ShlConst
; CHECK: = load
; CHECK: = load
; CHECK: shl
; CHECK: shl
; CHECK: store
; CHECK: store
; CHECK: ret void
; Compute shadow for "10 << x": it should have 'sext i1'.
define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
entry:
%0 = load i32, i32* %x, align 4
%1 = shl i32 10, %0
store i32 %1, i32* %x, align 4
ret void
}
; CHECK-LABEL: @ShlNonConst
; CHECK: = load
; CHECK: = load
; CHECK: = sext i1
; CHECK: store
; CHECK: store
; CHECK: ret void
; SExt
define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
entry:
%0 = load i16, i16* %b, align 2
%1 = sext i16 %0 to i32
store i32 %1, i32* %a, align 4
ret void
}
; CHECK-LABEL: @SExt
; CHECK: = load
; CHECK: = load
; CHECK: = sext
; CHECK: = sext
; CHECK: store
; CHECK: store
; CHECK: ret void
; memset
define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
ret void
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
; CHECK-LABEL: @MemSet
; CHECK: call i8* @__msan_memset
; CHECK: ret void
; memcpy
define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
; CHECK-LABEL: @MemCpy
; CHECK: call i8* @__msan_memcpy
; CHECK: ret void
; memmove is lowered to a call
define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
ret void
}
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
; CHECK-LABEL: @MemMove
; CHECK: call i8* @__msan_memmove
; CHECK: ret void
;; ------------
;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
;; verify that MSAN handles these intrinsics properly once they have been
;; added to that class hierarchy.
declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
; CHECK-LABEL: atomic_memcpy
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
ret void
}
define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
; CHECK-LABEL: atomic_memmove
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
ret void
}
define void @atomic_memset(i8* nocapture %x) nounwind {
; CHECK-LABEL: atomic_memset
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
; CHECK-NEXT: ret void
call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
ret void
}
;; ------------
; Check that we propagate shadow for "select"
define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
entry:
%cond = select i1 %c, i32 %a, i32 %b
ret i32 %cond
}
; CHECK-LABEL: @Select
; CHECK: select i1
; CHECK-DAG: or i32
; CHECK-DAG: xor i32
; CHECK: or i32
; CHECK-DAG: select i1
; CHECK-ORIGINS-DAG: select
; CHECK-ORIGINS-DAG: select
; CHECK-DAG: select i1
; CHECK: store i32{{.*}}@__msan_retval_tls
; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
; CHECK: ret i32
; Check that we propagate origin for "select" with vector condition.
; Select condition is flattened to i1, which is then used to select one of the
; argument origins.
define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
entry:
%cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %cond
}
; CHECK-LABEL: @SelectVector
; CHECK: select <8 x i1>
; CHECK-DAG: or <8 x i16>
; CHECK-DAG: xor <8 x i16>
; CHECK: or <8 x i16>
; CHECK-DAG: select <8 x i1>
; CHECK-ORIGINS-DAG: select
; CHECK-ORIGINS-DAG: select
; CHECK-DAG: select <8 x i1>
; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
; CHECK: ret <8 x i16>
; Check that we propagate origin for "select" with scalar condition and vector
; arguments. Select condition shadow is sign-extended to the vector type and
; mixed into the result shadow.
define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
entry:
%cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %cond
}
; CHECK-LABEL: @SelectVector2
; CHECK: select i1
; CHECK-DAG: or <8 x i16>
; CHECK-DAG: xor <8 x i16>
; CHECK: or <8 x i16>
; CHECK-DAG: select i1
; CHECK-ORIGINS-DAG: select i1
; CHECK-ORIGINS-DAG: select i1
; CHECK-DAG: select i1
; CHECK: ret <8 x i16>
define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
entry:
%c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
ret { i64, i64 } %c
}
; CHECK-LABEL: @SelectStruct
; CHECK: select i1 {{.*}}, { i64, i64 }
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
; CHECK-ORIGINS: select i1
; CHECK-ORIGINS: select i1
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
; CHECK: ret { i64, i64 }
define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
entry:
%c = select i1 %x, { i64*, double } %a, { i64*, double } %b
ret { i64*, double } %c
}
; CHECK-LABEL: @SelectStruct2
; CHECK: select i1 {{.*}}, { i64, i64 }
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
; CHECK-ORIGINS: select i1
; CHECK-ORIGINS: select i1
; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
; CHECK: ret { i64*, double }
define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
entry:
%0 = inttoptr i64 %x to i8*
ret i8* %0
}
; CHECK-LABEL: @IntToPtr
; CHECK: load i64, i64*{{.*}}__msan_param_tls
; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
; CHECK-NEXT: inttoptr
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
; CHECK: ret i8*
define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
entry:
%0 = inttoptr i16 %x to i8*
ret i8* %0
}
; CHECK-LABEL: @IntToPtr_ZExt
; CHECK: load i16, i16*{{.*}}__msan_param_tls
; CHECK: zext
; CHECK-NEXT: inttoptr
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
; CHECK: ret i8*
; Check that we insert exactly one check on udiv
; (2nd arg shadow is checked, 1st arg shadow is propagated)
define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
entry:
%div = udiv i32 %a, %b
ret i32 %div
}
; CHECK-LABEL: @Div
; CHECK: icmp
; CHECK: call void @__msan_warning
; CHECK-NOT: icmp
; CHECK: udiv
; CHECK-NOT: icmp
; CHECK: ret i32
; Check that fdiv, unlike udiv, simply propagates shadow.
define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory {
entry:
%c = fdiv float %a, %b
ret float %c
}
; CHECK-LABEL: @FDiv
; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
; CHECK: %[[SB:.*]] = load i32,{{.*}}@__msan_param_tls
; CHECK: %[[SC:.*]] = or i32 %[[SB]], %[[SA]]
; CHECK: = fdiv float
; CHECK: store i32 %[[SC]], i32* {{.*}}@__msan_retval_tls
; CHECK: ret float
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt i32 %x, 0
ret i1 %1
}
; CHECK-LABEL: @ICmpSLTZero
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sge i32 %x, 0
ret i1 %1
}
; CHECK-LABEL: @ICmpSGEZero
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sge
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sgt i32 0, %x
ret i1 %1
}
; CHECK-LABEL: @ICmpSGTZero
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sgt
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sle i32 0, %x
ret i1 %1
}
; CHECK-LABEL: @ICmpSLEZero
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sle
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt i32 -1, %x
ret i1 %1
}
; CHECK-LABEL: @ICmpSLTAllOnes
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sge i32 -1, %x
ret i1 %1
}
; CHECK-LABEL: @ICmpSGEAllOnes
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sge
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sgt i32 %x, -1
ret i1 %1
}
; CHECK-LABEL: @ICmpSGTAllOnes
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sgt
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sle i32 %x, -1
ret i1 %1
}
; CHECK-LABEL: @ICmpSLEAllOnes
; CHECK: icmp slt
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp sle
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
; of the vector arguments.
define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt <2 x i32*> %x, zeroinitializer
ret <2 x i1> %1
}
; CHECK-LABEL: @ICmpSLT_vector_Zero
; CHECK: icmp slt <2 x i64>
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp slt <2 x i32*>
; CHECK-NOT: call void @__msan_warning
; CHECK: ret <2 x i1>
; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
; of the vector arguments.
define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
ret <2 x i1> %1
}
; CHECK-LABEL: @ICmpSLT_vector_AllOnes
; CHECK: icmp slt <2 x i32>
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp slt <2 x i32>
; CHECK-NOT: call void @__msan_warning
; CHECK: ret <2 x i1>
; Check that we propagate shadow for unsigned relational comparisons with
; constants
define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
entry:
%cmp = icmp ugt i32 %x, 7
ret i1 %cmp
}
; CHECK-LABEL: @ICmpUGTConst
; CHECK: icmp ugt i32
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp ugt i32
; CHECK-NOT: call void @__msan_warning
; CHECK: icmp ugt i32
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
; Check that loads of shadow have the same alignment as the original loads.
; Check that loads of origin have the alignment of max(4, original alignment).
define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
%y = alloca i32, align 64
%1 = load volatile i32, i32* %y, align 64
ret i32 %1
}
; CHECK-LABEL: @ShadowLoadAlignmentLarge
; CHECK: load volatile i32, i32* {{.*}} align 64
; CHECK: load i32, i32* {{.*}} align 64
; CHECK: ret i32
define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
%y = alloca i32, align 2
%1 = load volatile i32, i32* %y, align 2
ret i32 %1
}
; CHECK-LABEL: @ShadowLoadAlignmentSmall
; CHECK: load volatile i32, i32* {{.*}} align 2
; CHECK: load i32, i32* {{.*}} align 2
; CHECK-ORIGINS: load i32, i32* {{.*}} align 4
; CHECK: ret i32
; Test vector manipulation instructions.
; Check that the same bit manipulation is applied to the shadow values.
; Check that there is a zero test of the shadow of %idx argument, where present.
define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
%x = extractelement <4 x i32> %vec, i32 %idx
ret i32 %x
}
; CHECK-LABEL: @ExtractElement
; CHECK: extractelement
; CHECK: call void @__msan_warning
; CHECK: extractelement
; CHECK: ret i32
define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
%vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
ret <4 x i32> %vec1
}
; CHECK-LABEL: @InsertElement
; CHECK: insertelement
; CHECK: call void @__msan_warning
; CHECK: insertelement
; CHECK: ret <4 x i32>
define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
%vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
<4 x i32> <i32 0, i32 4, i32 1, i32 5>
ret <4 x i32> %vec2
}
; CHECK-LABEL: @ShuffleVector
; CHECK: shufflevector
; CHECK-NOT: call void @__msan_warning
; CHECK: shufflevector
; CHECK: ret <4 x i32>
; Test bswap intrinsic instrumentation
define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
%y = tail call i32 @llvm.bswap.i32(i32 %x)
ret i32 %y
}
declare i32 @llvm.bswap.i32(i32) nounwind readnone
; CHECK-LABEL: @BSwap
; CHECK-NOT: call void @__msan_warning
; CHECK: @llvm.bswap.i32
; CHECK-NOT: call void @__msan_warning
; CHECK: @llvm.bswap.i32
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i32
; Test handling of vectors of pointers.
; Check that shadow of such vector is a vector of integers.
define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
%x = load <8 x i8*>, <8 x i8*>* %p
ret <8 x i8*> %x
}
; CHECK-LABEL: @VectorOfPointers
; CHECK: load <8 x i8*>, <8 x i8*>*
; CHECK: load <8 x i64>, <8 x i64>*
; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
; CHECK: ret <8 x i8*>
; Test handling of va_copy.
declare void @llvm.va_copy(i8*, i8*) nounwind
define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
ret void
}
; CHECK-LABEL: @VACopy
; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i1 false)
; CHECK: ret void
; Test that va_start instrumentation does not use va_arg_tls*.
; It should work with a local stack copy instead.
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
declare void @llvm.va_start(i8*) nounwind
; Function Attrs: nounwind uwtable
define void @VAStart(i32 %x, ...) sanitize_memory {
entry:
%x.addr = alloca i32, align 4
%va = alloca [1 x %struct.__va_list_tag], align 16
store i32 %x, i32* %x.addr, align 4
%arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
%arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
call void @llvm.va_start(i8* %arraydecay1)
ret void
}
; CHECK-LABEL: @VAStart
; CHECK: call void @llvm.va_start
; CHECK-NOT: @__msan_va_arg_tls
; CHECK-NOT: @__msan_va_arg_overflow_size_tls
; CHECK: ret void
; Test handling of volatile stores.
; Check that MemorySanitizer does not add a check of the value being stored.
define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store volatile i32 %x, i32* %p, align 4
ret void
}
; CHECK-LABEL: @VolatileStore
; CHECK-NOT: @__msan_warning
; CHECK: ret void
; Test that checks are omitted and returned value is always initialized if
; sanitize_memory attribute is missing.
define i32 @NoSanitizeMemory(i32 %x) uwtable {
entry:
%tobool = icmp eq i32 %x, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
tail call void @bar()
br label %if.end
if.end: ; preds = %entry, %if.then
ret i32 %x
}
declare void @bar()
; CHECK-LABEL: @NoSanitizeMemory
; CHECK-NOT: @__msan_warning
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK-NOT: @__msan_warning
; CHECK: ret i32
; Test that stack allocations are unpoisoned in functions missing
; sanitize_memory attribute
define i32 @NoSanitizeMemoryAlloca() {
entry:
%p = alloca i32, align 4
%x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
ret i32 %x
}
declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
; CHECK-LABEL: @NoSanitizeMemoryAlloca
; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 4, i1 false)
; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
; CHECK: ret i32
; Test that undef is unpoisoned in functions missing
; sanitize_memory attribute
define i32 @NoSanitizeMemoryUndef() {
entry:
%x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
ret i32 %x
}
declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
; CHECK-LABEL: @NoSanitizeMemoryUndef
; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
; CHECK: ret i32
; Test PHINode instrumentation in blacklisted functions
define i32 @NoSanitizeMemoryPHI(i32 %x) {
entry:
%tobool = icmp ne i32 %x, 0
br i1 %tobool, label %cond.true, label %cond.false
cond.true: ; preds = %entry
br label %cond.end
cond.false: ; preds = %entry
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
ret i32 %cond
}
; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
; CHECK: ret i32 [[A]]
; Test that there are no __msan_param_origin_tls stores when
; argument shadow is a compile-time zero constant (which is always the case
; in functions missing sanitize_memory attribute).
define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
entry:
%0 = load i32, i32* %x, align 4
%call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
ret i32 %call
}
declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
; CHECK-NOT: __msan_param_origin_tls
; CHECK: ret i32
; Test argument shadow alignment
define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
entry:
ret <2 x i64> %b
}
; CHECK-LABEL: @ArgumentShadowAlignment
; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
; CHECK: ret <2 x i64>
; Test origin propagation for insertvalue
define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
entry:
%a = insertvalue { i64, i32 } undef, i64 %x, 0
%b = insertvalue { i64, i32 } %a, i32 %y, 1
ret { i64, i32 } %b
}
; CHECK-ORIGINS: @make_pair_64_32
; First element shadow
; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
; First element origin
; CHECK-ORIGINS: icmp ne i64
; CHECK-ORIGINS: select i1
; First element app value
; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
; Second element shadow
; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
; Second element origin
; CHECK-ORIGINS: icmp ne i32
; CHECK-ORIGINS: select i1
; Second element app value
; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
; CHECK-ORIGINS: ret { i64, i32 }
; Test shadow propagation for aggregates passed through ellipsis.
%struct.StructByVal = type { i32, i32, i32, i32 }
declare void @VAArgStructFn(i32 %guard, ...)
define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
entry:
%agg.tmp2 = alloca %struct.StructByVal, align 8
%0 = bitcast %struct.StructByVal* %s to i8*
%agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
%agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
%agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
%agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
ret void
}
; "undef" and the first 2 structs go to general purpose registers;
; the third struct goes to the overflow area byval
; CHECK-LABEL: @VAArgStruct
; undef not stored to __msan_va_arg_tls - it's a fixed argument
; first struct through general purpose registers
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
; second struct through general purpose registers
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
; third struct through the overflow area byval
; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
; CHECK: call void (i32, ...) @VAArgStructFn
; CHECK: ret void
; Same code compiled without SSE (see attributes below).
; The register save area is only 48 bytes instead of 176.
define void @VAArgStructNoSSE(%struct.StructByVal* nocapture %s) sanitize_memory #0 {
entry:
%agg.tmp2 = alloca %struct.StructByVal, align 8
%0 = bitcast %struct.StructByVal* %s to i8*
%agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
%agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
%agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
%agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
ret void
}
attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 48
declare i32 @InnerTailCall(i32 %a)
define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
%b = tail call i32 @InnerTailCall(i32 %a)
ret void
}
; We used to strip off the 'tail' modifier, but now that we unpoison return slot
; shadow before the call, we don't need to anymore.
; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
; CHECK: tail call i32 @InnerTailCall
; CHECK: ret void
declare i32 @MustTailCall(i32 %a)
define i32 @CallMustTailCall(i32 %a) sanitize_memory {
%b = musttail call i32 @MustTailCall(i32 %a)
ret i32 %b
}
; For "musttail" calls we can not insert any shadow manipulating code between
; call and the return instruction. And we don't need to, because everything is
; taken care of in the callee.
; CHECK-LABEL: define i32 @CallMustTailCall
; CHECK: musttail call i32 @MustTailCall
; No instrumentation between call and ret.
; CHECK-NEXT: ret i32
declare i32* @MismatchingMustTailCall(i32 %a)
define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
%b = musttail call i32* @MismatchingMustTailCall(i32 %a)
%c = bitcast i32* %b to i8*
ret i8* %c
}
; For "musttail" calls we can not insert any shadow manipulating code between
; call and the return instruction. And we don't need to, because everything is
; taken care of in the callee.
; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
; CHECK: musttail call i32* @MismatchingMustTailCall
; No instrumentation between call and ret.
; CHECK-NEXT: bitcast i32* {{.*}} to i8*
; CHECK-NEXT: ret i8*
-; CHECK: declare void @__msan_init()
+; CHECK-LABEL: define internal void @msan.module_ctor() {
+; CHECK: call void @__msan_init()
Index: vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll (revision 344166)
@@ -1,21 +1,24 @@
; Make sure MSan doesn't insert shadow checks for @llvm.is.constant.* arguments.
+; RUN: opt < %s -msan-kernel=1 -S -passes=msan 2>&1 | FileCheck \
+; RUN: -check-prefixes=CHECK %s
; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck -check-prefixes=CHECK %s
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck -check-prefixes=CHECK %s
; RUN: opt < %s -msan -S | FileCheck -check-prefixes=CHECK %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind readnone uwtable
define dso_local i32 @bar(i32 %v) local_unnamed_addr sanitize_memory {
entry:
%0 = tail call i1 @llvm.is.constant.i32(i32 %v)
%1 = zext i1 %0 to i32
ret i32 %1
}
; CHECK-LABEL: bar
; CHECK-NOT: call void @__msan_warning
; Function Attrs: nounwind readnone
declare i1 @llvm.is.constant.i32(i32)
Index: vendor/llvm/dist-release_80/test/MC/Disassembler/X86/fp-stack.txt
===================================================================
--- vendor/llvm/dist-release_80/test/MC/Disassembler/X86/fp-stack.txt (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/Disassembler/X86/fp-stack.txt (revision 344166)
@@ -1,1061 +1,1061 @@
# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s
# RUN: llvm-mc --disassemble %s -triple=i686-apple-darwin9 | FileCheck %s
-# CHECK: fadd %st(0)
+# CHECK: fadd %st(0), %st
0xd8,0xc0
-# CHECK: fadd %st(1)
+# CHECK: fadd %st(1), %st
0xd8,0xc1
-# CHECK: fadd %st(2)
+# CHECK: fadd %st(2), %st
0xd8,0xc2
-# CHECK: fadd %st(3)
+# CHECK: fadd %st(3), %st
0xd8,0xc3
-# CHECK: fadd %st(4)
+# CHECK: fadd %st(4), %st
0xd8,0xc4
-# CHECK: fadd %st(5)
+# CHECK: fadd %st(5), %st
0xd8,0xc5
-# CHECK: fadd %st(6)
+# CHECK: fadd %st(6), %st
0xd8,0xc6
-# CHECK: fadd %st(7)
+# CHECK: fadd %st(7), %st
0xd8,0xc7
-# CHECK: fmul %st(0)
+# CHECK: fmul %st(0), %st
0xd8,0xc8
-# CHECK: fmul %st(1)
+# CHECK: fmul %st(1), %st
0xd8,0xc9
-# CHECK: fmul %st(2)
+# CHECK: fmul %st(2), %st
0xd8,0xca
-# CHECK: fmul %st(3)
+# CHECK: fmul %st(3), %st
0xd8,0xcb
-# CHECK: fmul %st(4)
+# CHECK: fmul %st(4), %st
0xd8,0xcc
-# CHECK: fmul %st(5)
+# CHECK: fmul %st(5), %st
0xd8,0xcd
-# CHECK: fmul %st(6)
+# CHECK: fmul %st(6), %st
0xd8,0xce
-# CHECK: fmul %st(7)
+# CHECK: fmul %st(7), %st
0xd8,0xcf
# CHECK: fcom %st(0)
0xd8,0xd0
# CHECK: fcom %st(1)
0xd8,0xd1
# CHECK: fcom %st(2)
0xd8,0xd2
# CHECK: fcom %st(3)
0xd8,0xd3
# CHECK: fcom %st(4)
0xd8,0xd4
# CHECK: fcom %st(5)
0xd8,0xd5
# CHECK: fcom %st(6)
0xd8,0xd6
# CHECK: fcom %st(7)
0xd8,0xd7
# CHECK: fcomp %st(0)
0xd8,0xd8
# CHECK: fcomp %st(1)
0xd8,0xd9
# CHECK: fcomp %st(2)
0xd8,0xda
# CHECK: fcomp %st(3)
0xd8,0xdb
# CHECK: fcomp %st(4)
0xd8,0xdc
# CHECK: fcomp %st(5)
0xd8,0xdd
# CHECK: fcomp %st(6)
0xd8,0xde
# CHECK: fcomp %st(7)
0xd8,0xdf
-# CHECK: fsub %st(0)
+# CHECK: fsub %st(0), %st
0xd8,0xe0
-# CHECK: fsub %st(1)
+# CHECK: fsub %st(1), %st
0xd8,0xe1
-# CHECK: fsub %st(2)
+# CHECK: fsub %st(2), %st
0xd8,0xe2
-# CHECK: fsub %st(3)
+# CHECK: fsub %st(3), %st
0xd8,0xe3
-# CHECK: fsub %st(4)
+# CHECK: fsub %st(4), %st
0xd8,0xe4
-# CHECK: fsub %st(5)
+# CHECK: fsub %st(5), %st
0xd8,0xe5
-# CHECK: fsub %st(6)
+# CHECK: fsub %st(6), %st
0xd8,0xe6
-# CHECK: fsub %st(7)
+# CHECK: fsub %st(7), %st
0xd8,0xe7
-# CHECK: fsubr %st(0)
+# CHECK: fsubr %st(0), %st
0xd8,0xe8
-# CHECK: fsubr %st(1)
+# CHECK: fsubr %st(1), %st
0xd8,0xe9
-# CHECK: fsubr %st(2)
+# CHECK: fsubr %st(2), %st
0xd8,0xea
-# CHECK: fsubr %st(3)
+# CHECK: fsubr %st(3), %st
0xd8,0xeb
-# CHECK: fsubr %st(4)
+# CHECK: fsubr %st(4), %st
0xd8,0xec
-# CHECK: fsubr %st(5)
+# CHECK: fsubr %st(5), %st
0xd8,0xed
-# CHECK: fsubr %st(6)
+# CHECK: fsubr %st(6), %st
0xd8,0xee
-# CHECK: fsubr %st(7)
+# CHECK: fsubr %st(7), %st
0xd8,0xef
-# CHECK: fdiv %st(0)
+# CHECK: fdiv %st(0), %st
0xd8,0xf0
-# CHECK: fdiv %st(1)
+# CHECK: fdiv %st(1), %st
0xd8,0xf1
-# CHECK: fdiv %st(2)
+# CHECK: fdiv %st(2), %st
0xd8,0xf2
-# CHECK: fdiv %st(3)
+# CHECK: fdiv %st(3), %st
0xd8,0xf3
-# CHECK: fdiv %st(4)
+# CHECK: fdiv %st(4), %st
0xd8,0xf4
-# CHECK: fdiv %st(5)
+# CHECK: fdiv %st(5), %st
0xd8,0xf5
-# CHECK: fdiv %st(6)
+# CHECK: fdiv %st(6), %st
0xd8,0xf6
-# CHECK: fdiv %st(7)
+# CHECK: fdiv %st(7), %st
0xd8,0xf7
-# CHECK: fdivr %st(0)
+# CHECK: fdivr %st(0), %st
0xd8,0xf8
-# CHECK: fdivr %st(1)
+# CHECK: fdivr %st(1), %st
0xd8,0xf9
-# CHECK: fdivr %st(2)
+# CHECK: fdivr %st(2), %st
0xd8,0xfa
-# CHECK: fdivr %st(3)
+# CHECK: fdivr %st(3), %st
0xd8,0xfb
-# CHECK: fdivr %st(4)
+# CHECK: fdivr %st(4), %st
0xd8,0xfc
-# CHECK: fdivr %st(5)
+# CHECK: fdivr %st(5), %st
0xd8,0xfd
-# CHECK: fdivr %st(6)
+# CHECK: fdivr %st(6), %st
0xd8,0xfe
-# CHECK: fdivr %st(7)
+# CHECK: fdivr %st(7), %st
0xd8,0xff
# CHECK: fld %st(0)
0xd9,0xc0
# CHECK: fld %st(1)
0xd9,0xc1
# CHECK: fld %st(2)
0xd9,0xc2
# CHECK: fld %st(3)
0xd9,0xc3
# CHECK: fld %st(4)
0xd9,0xc4
# CHECK: fld %st(5)
0xd9,0xc5
# CHECK: fld %st(6)
0xd9,0xc6
# CHECK: fld %st(7)
0xd9,0xc7
# CHECK: fxch %st(0)
0xd9,0xc8
# CHECK: fxch %st(1)
0xd9,0xc9
# CHECK: fxch %st(2)
0xd9,0xca
# CHECK: fxch %st(3)
0xd9,0xcb
# CHECK: fxch %st(4)
0xd9,0xcc
# CHECK: fxch %st(5)
0xd9,0xcd
# CHECK: fxch %st(6)
0xd9,0xce
# CHECK: fxch %st(7)
0xd9,0xcf
# CHECK: fnop
0xd9,0xd0
# CHECK: fchs
0xd9,0xe0
# CHECK: fabs
0xd9,0xe1
# CHECK: ftst
0xd9,0xe4
# CHECK: fxam
0xd9,0xe5
# CHECK: fld1
0xd9,0xe8
# CHECK: fldl2t
0xd9,0xe9
# CHECK: fldl2e
0xd9,0xea
# CHECK: fldpi
0xd9,0xeb
# CHECK: fldlg2
0xd9,0xec
# CHECK: fldln2
0xd9,0xed
# CHECK: fldz
0xd9,0xee
# CHECK: f2xm1
0xd9,0xf0
# CHECK: fyl2x
0xd9,0xf1
# CHECK: fptan
0xd9,0xf2
# CHECK: fpatan
0xd9,0xf3
# CHECK: fxtract
0xd9,0xf4
# CHECK: fprem1
0xd9,0xf5
# CHECK: fdecstp
0xd9,0xf6
# CHECK: fincstp
0xd9,0xf7
# CHECK: fprem
0xd9,0xf8
# CHECK: fyl2xp1
0xd9,0xf9
# CHECK: fsqrt
0xd9,0xfa
# CHECK: fsincos
0xd9,0xfb
# CHECK: frndint
0xd9,0xfc
# CHECK: fscale
0xd9,0xfd
# CHECK: fsin
0xd9,0xfe
# CHECK: fcos
0xd9,0xff
-# CHECK: fcmovb %st(0), %st(0)
+# CHECK: fcmovb %st(0), %st
0xda,0xc0
-# CHECK: fcmovb %st(1), %st(0)
+# CHECK: fcmovb %st(1), %st
0xda,0xc1
-# CHECK: fcmovb %st(2), %st(0)
+# CHECK: fcmovb %st(2), %st
0xda,0xc2
-# CHECK: fcmovb %st(3), %st(0)
+# CHECK: fcmovb %st(3), %st
0xda,0xc3
-# CHECK: fcmovb %st(4), %st(0)
+# CHECK: fcmovb %st(4), %st
0xda,0xc4
-# CHECK: fcmovb %st(5), %st(0)
+# CHECK: fcmovb %st(5), %st
0xda,0xc5
-# CHECK: fcmovb %st(6), %st(0)
+# CHECK: fcmovb %st(6), %st
0xda,0xc6
-# CHECK: fcmovb %st(7), %st(0)
+# CHECK: fcmovb %st(7), %st
0xda,0xc7
-# CHECK: fcmove %st(0), %st(0)
+# CHECK: fcmove %st(0), %st
0xda,0xc8
-# CHECK: fcmove %st(1), %st(0)
+# CHECK: fcmove %st(1), %st
0xda,0xc9
-# CHECK: fcmove %st(2), %st(0)
+# CHECK: fcmove %st(2), %st
0xda,0xca
-# CHECK: fcmove %st(3), %st(0)
+# CHECK: fcmove %st(3), %st
0xda,0xcb
-# CHECK: fcmove %st(4), %st(0)
+# CHECK: fcmove %st(4), %st
0xda,0xcc
-# CHECK: fcmove %st(5), %st(0)
+# CHECK: fcmove %st(5), %st
0xda,0xcd
-# CHECK: fcmove %st(6), %st(0)
+# CHECK: fcmove %st(6), %st
0xda,0xce
-# CHECK: fcmove %st(7), %st(0)
+# CHECK: fcmove %st(7), %st
0xda,0xcf
-# CHECK: fcmovbe %st(0), %st(0)
+# CHECK: fcmovbe %st(0), %st
0xda,0xd0
-# CHECK: fcmovbe %st(1), %st(0)
+# CHECK: fcmovbe %st(1), %st
0xda,0xd1
-# CHECK: fcmovbe %st(2), %st(0)
+# CHECK: fcmovbe %st(2), %st
0xda,0xd2
-# CHECK: fcmovbe %st(3), %st(0)
+# CHECK: fcmovbe %st(3), %st
0xda,0xd3
-# CHECK: fcmovbe %st(4), %st(0)
+# CHECK: fcmovbe %st(4), %st
0xda,0xd4
-# CHECK: fcmovbe %st(5), %st(0)
+# CHECK: fcmovbe %st(5), %st
0xda,0xd5
-# CHECK: fcmovbe %st(6), %st(0)
+# CHECK: fcmovbe %st(6), %st
0xda,0xd6
-# CHECK: fcmovbe %st(7), %st(0)
+# CHECK: fcmovbe %st(7), %st
0xda,0xd7
-# CHECK: fcmovu %st(0), %st(0)
+# CHECK: fcmovu %st(0), %st
0xda,0xd8
-# CHECK: fcmovu %st(1), %st(0)
+# CHECK: fcmovu %st(1), %st
0xda,0xd9
-# CHECK: fcmovu %st(2), %st(0)
+# CHECK: fcmovu %st(2), %st
0xda,0xda
-# CHECK: fcmovu %st(3), %st(0)
+# CHECK: fcmovu %st(3), %st
0xda,0xdb
-# CHECK: fcmovu %st(4), %st(0)
+# CHECK: fcmovu %st(4), %st
0xda,0xdc
-# CHECK: fcmovu %st(5), %st(0)
+# CHECK: fcmovu %st(5), %st
0xda,0xdd
-# CHECK: fcmovu %st(6), %st(0)
+# CHECK: fcmovu %st(6), %st
0xda,0xde
-# CHECK: fcmovu %st(7), %st(0)
+# CHECK: fcmovu %st(7), %st
0xda,0xdf
# CHECK: fucompp
0xda,0xe9
-# CHECK: fcmovnb %st(0), %st(0)
+# CHECK: fcmovnb %st(0), %st
0xdb,0xc0
-# CHECK: fcmovnb %st(1), %st(0)
+# CHECK: fcmovnb %st(1), %st
0xdb,0xc1
-# CHECK: fcmovnb %st(2), %st(0)
+# CHECK: fcmovnb %st(2), %st
0xdb,0xc2
-# CHECK: fcmovnb %st(3), %st(0)
+# CHECK: fcmovnb %st(3), %st
0xdb,0xc3
-# CHECK: fcmovnb %st(4), %st(0)
+# CHECK: fcmovnb %st(4), %st
0xdb,0xc4
-# CHECK: fcmovnb %st(5), %st(0)
+# CHECK: fcmovnb %st(5), %st
0xdb,0xc5
-# CHECK: fcmovnb %st(6), %st(0)
+# CHECK: fcmovnb %st(6), %st
0xdb,0xc6
-# CHECK: fcmovnb %st(7), %st(0)
+# CHECK: fcmovnb %st(7), %st
0xdb,0xc7
-# CHECK: fcmovne %st(0), %st(0)
+# CHECK: fcmovne %st(0), %st
0xdb,0xc8
-# CHECK: fcmovne %st(1), %st(0)
+# CHECK: fcmovne %st(1), %st
0xdb,0xc9
-# CHECK: fcmovne %st(2), %st(0)
+# CHECK: fcmovne %st(2), %st
0xdb,0xca
-# CHECK: fcmovne %st(3), %st(0)
+# CHECK: fcmovne %st(3), %st
0xdb,0xcb
-# CHECK: fcmovne %st(4), %st(0)
+# CHECK: fcmovne %st(4), %st
0xdb,0xcc
-# CHECK: fcmovne %st(5), %st(0)
+# CHECK: fcmovne %st(5), %st
0xdb,0xcd
-# CHECK: fcmovne %st(6), %st(0)
+# CHECK: fcmovne %st(6), %st
0xdb,0xce
-# CHECK: fcmovne %st(7), %st(0)
+# CHECK: fcmovne %st(7), %st
0xdb,0xcf
-# CHECK: fcmovnbe %st(0), %st(0)
+# CHECK: fcmovnbe %st(0), %st
0xdb,0xd0
-# CHECK: fcmovnbe %st(1), %st(0)
+# CHECK: fcmovnbe %st(1), %st
0xdb,0xd1
-# CHECK: fcmovnbe %st(2), %st(0)
+# CHECK: fcmovnbe %st(2), %st
0xdb,0xd2
-# CHECK: fcmovnbe %st(3), %st(0)
+# CHECK: fcmovnbe %st(3), %st
0xdb,0xd3
-# CHECK: fcmovnbe %st(4), %st(0)
+# CHECK: fcmovnbe %st(4), %st
0xdb,0xd4
-# CHECK: fcmovnbe %st(5), %st(0)
+# CHECK: fcmovnbe %st(5), %st
0xdb,0xd5
-# CHECK: fcmovnbe %st(6), %st(0)
+# CHECK: fcmovnbe %st(6), %st
0xdb,0xd6
-# CHECK: fcmovnbe %st(7), %st(0)
+# CHECK: fcmovnbe %st(7), %st
0xdb,0xd7
-# CHECK: fcmovnu %st(0), %st(0)
+# CHECK: fcmovnu %st(0), %st
0xdb,0xd8
-# CHECK: fcmovnu %st(1), %st(0)
+# CHECK: fcmovnu %st(1), %st
0xdb,0xd9
-# CHECK: fcmovnu %st(2), %st(0)
+# CHECK: fcmovnu %st(2), %st
0xdb,0xda
-# CHECK: fcmovnu %st(3), %st(0)
+# CHECK: fcmovnu %st(3), %st
0xdb,0xdb
-# CHECK: fcmovnu %st(4), %st(0)
+# CHECK: fcmovnu %st(4), %st
0xdb,0xdc
-# CHECK: fcmovnu %st(5), %st(0)
+# CHECK: fcmovnu %st(5), %st
0xdb,0xdd
-# CHECK: fcmovnu %st(6), %st(0)
+# CHECK: fcmovnu %st(6), %st
0xdb,0xde
-# CHECK: fcmovnu %st(7), %st(0)
+# CHECK: fcmovnu %st(7), %st
0xdb,0xdf
# CHECK: fnclex
0xdb,0xe2
# CHECK: fninit
0xdb,0xe3
# CHECK: fucomi %st(0)
0xdb,0xe8
# CHECK: fucomi %st(1)
0xdb,0xe9
# CHECK: fucomi %st(2)
0xdb,0xea
# CHECK: fucomi %st(3)
0xdb,0xeb
# CHECK: fucomi %st(4)
0xdb,0xec
# CHECK: fucomi %st(5)
0xdb,0xed
# CHECK: fucomi %st(6)
0xdb,0xee
# CHECK: fucomi %st(7)
0xdb,0xef
# CHECK: fcomi %st(0)
0xdb,0xf0
# CHECK: fcomi %st(1)
0xdb,0xf1
# CHECK: fcomi %st(2)
0xdb,0xf2
# CHECK: fcomi %st(3)
0xdb,0xf3
# CHECK: fcomi %st(4)
0xdb,0xf4
# CHECK: fcomi %st(5)
0xdb,0xf5
# CHECK: fcomi %st(6)
0xdb,0xf6
# CHECK: fcomi %st(7)
0xdb,0xf7
-# CHECK: fadd %st(0), %st(0)
+# CHECK: fadd %st, %st(0)
0xdc,0xc0
-# CHECK: fadd %st(0), %st(1)
+# CHECK: fadd %st, %st(1)
0xdc,0xc1
-# CHECK: fadd %st(0), %st(2)
+# CHECK: fadd %st, %st(2)
0xdc,0xc2
-# CHECK: fadd %st(0), %st(3)
+# CHECK: fadd %st, %st(3)
0xdc,0xc3
-# CHECK: fadd %st(0), %st(4)
+# CHECK: fadd %st, %st(4)
0xdc,0xc4
-# CHECK: fadd %st(0), %st(5)
+# CHECK: fadd %st, %st(5)
0xdc,0xc5
-# CHECK: fadd %st(0), %st(6)
+# CHECK: fadd %st, %st(6)
0xdc,0xc6
-# CHECK: fadd %st(0), %st(7)
+# CHECK: fadd %st, %st(7)
0xdc,0xc7
-# CHECK: fmul %st(0), %st(0)
+# CHECK: fmul %st, %st(0)
0xdc,0xc8
-# CHECK: fmul %st(0), %st(1)
+# CHECK: fmul %st, %st(1)
0xdc,0xc9
-# CHECK: fmul %st(0), %st(2)
+# CHECK: fmul %st, %st(2)
0xdc,0xca
-# CHECK: fmul %st(0), %st(3)
+# CHECK: fmul %st, %st(3)
0xdc,0xcb
-# CHECK: fmul %st(0), %st(4)
+# CHECK: fmul %st, %st(4)
0xdc,0xcc
-# CHECK: fmul %st(0), %st(5)
+# CHECK: fmul %st, %st(5)
0xdc,0xcd
-# CHECK: fmul %st(0), %st(6)
+# CHECK: fmul %st, %st(6)
0xdc,0xce
-# CHECK: fmul %st(0), %st(7)
+# CHECK: fmul %st, %st(7)
0xdc,0xcf
-# CHECK: fsub %st(0), %st(0)
+# CHECK: fsub %st, %st(0)
0xdc,0xe0
-# CHECK: fsub %st(0), %st(1)
+# CHECK: fsub %st, %st(1)
0xdc,0xe1
-# CHECK: fsub %st(0), %st(2)
+# CHECK: fsub %st, %st(2)
0xdc,0xe2
-# CHECK: fsub %st(0), %st(3)
+# CHECK: fsub %st, %st(3)
0xdc,0xe3
-# CHECK: fsub %st(0), %st(4)
+# CHECK: fsub %st, %st(4)
0xdc,0xe4
-# CHECK: fsub %st(0), %st(5)
+# CHECK: fsub %st, %st(5)
0xdc,0xe5
-# CHECK: fsub %st(0), %st(6)
+# CHECK: fsub %st, %st(6)
0xdc,0xe6
-# CHECK: fsub %st(0), %st(7)
+# CHECK: fsub %st, %st(7)
0xdc,0xe7
-# CHECK: fsubr %st(0), %st(0)
+# CHECK: fsubr %st, %st(0)
0xdc,0xe8
-# CHECK: fsubr %st(0), %st(1)
+# CHECK: fsubr %st, %st(1)
0xdc,0xe9
-# CHECK: fsubr %st(0), %st(2)
+# CHECK: fsubr %st, %st(2)
0xdc,0xea
-# CHECK: fsubr %st(0), %st(3)
+# CHECK: fsubr %st, %st(3)
0xdc,0xeb
-# CHECK: fsubr %st(0), %st(4)
+# CHECK: fsubr %st, %st(4)
0xdc,0xec
-# CHECK: fsubr %st(0), %st(5)
+# CHECK: fsubr %st, %st(5)
0xdc,0xed
-# CHECK: fsubr %st(0), %st(6)
+# CHECK: fsubr %st, %st(6)
0xdc,0xee
-# CHECK: fsubr %st(0), %st(7)
+# CHECK: fsubr %st, %st(7)
0xdc,0xef
-# CHECK: fdiv %st(0), %st(0)
+# CHECK: fdiv %st, %st(0)
0xdc,0xf0
-# CHECK: fdiv %st(0), %st(1)
+# CHECK: fdiv %st, %st(1)
0xdc,0xf1
-# CHECK: fdiv %st(0), %st(2)
+# CHECK: fdiv %st, %st(2)
0xdc,0xf2
-# CHECK: fdiv %st(0), %st(3)
+# CHECK: fdiv %st, %st(3)
0xdc,0xf3
-# CHECK: fdiv %st(0), %st(4)
+# CHECK: fdiv %st, %st(4)
0xdc,0xf4
-# CHECK: fdiv %st(0), %st(5)
+# CHECK: fdiv %st, %st(5)
0xdc,0xf5
-# CHECK: fdiv %st(0), %st(6)
+# CHECK: fdiv %st, %st(6)
0xdc,0xf6
-# CHECK: fdiv %st(0), %st(7)
+# CHECK: fdiv %st, %st(7)
0xdc,0xf7
-# CHECK: fdivr %st(0), %st(0)
+# CHECK: fdivr %st, %st(0)
0xdc,0xf8
-# CHECK: fdivr %st(0), %st(1)
+# CHECK: fdivr %st, %st(1)
0xdc,0xf9
-# CHECK: fdivr %st(0), %st(2)
+# CHECK: fdivr %st, %st(2)
0xdc,0xfa
-# CHECK: fdivr %st(0), %st(3)
+# CHECK: fdivr %st, %st(3)
0xdc,0xfb
-# CHECK: fdivr %st(0), %st(4)
+# CHECK: fdivr %st, %st(4)
0xdc,0xfc
-# CHECK: fdivr %st(0), %st(5)
+# CHECK: fdivr %st, %st(5)
0xdc,0xfd
-# CHECK: fdivr %st(0), %st(6)
+# CHECK: fdivr %st, %st(6)
0xdc,0xfe
-# CHECK: fdivr %st(0), %st(7)
+# CHECK: fdivr %st, %st(7)
0xdc,0xff
# CHECK: ffree %st(0)
0xdd,0xc0
# CHECK: ffree %st(1)
0xdd,0xc1
# CHECK: ffree %st(2)
0xdd,0xc2
# CHECK: ffree %st(3)
0xdd,0xc3
# CHECK: ffree %st(4)
0xdd,0xc4
# CHECK: ffree %st(5)
0xdd,0xc5
# CHECK: ffree %st(6)
0xdd,0xc6
# CHECK: ffree %st(7)
0xdd,0xc7
# CHECK: fst %st(0)
0xdd,0xd0
# CHECK: fst %st(1)
0xdd,0xd1
# CHECK: fst %st(2)
0xdd,0xd2
# CHECK: fst %st(3)
0xdd,0xd3
# CHECK: fst %st(4)
0xdd,0xd4
# CHECK: fst %st(5)
0xdd,0xd5
# CHECK: fst %st(6)
0xdd,0xd6
# CHECK: fst %st(7)
0xdd,0xd7
# CHECK: fstp %st(0)
0xdd,0xd8
# CHECK: fstp %st(1)
0xdd,0xd9
# CHECK: fstp %st(2)
0xdd,0xda
# CHECK: fstp %st(3)
0xdd,0xdb
# CHECK: fstp %st(4)
0xdd,0xdc
# CHECK: fstp %st(5)
0xdd,0xdd
# CHECK: fstp %st(6)
0xdd,0xde
# CHECK: fstp %st(7)
0xdd,0xdf
# CHECK: fucom %st(0)
0xdd,0xe0
# CHECK: fucom %st(1)
0xdd,0xe1
# CHECK: fucom %st(2)
0xdd,0xe2
# CHECK: fucom %st(3)
0xdd,0xe3
# CHECK: fucom %st(4)
0xdd,0xe4
# CHECK: fucom %st(5)
0xdd,0xe5
# CHECK: fucom %st(6)
0xdd,0xe6
# CHECK: fucom %st(7)
0xdd,0xe7
# CHECK: fucomp %st(0)
0xdd,0xe8
# CHECK: fucomp %st(1)
0xdd,0xe9
# CHECK: fucomp %st(2)
0xdd,0xea
# CHECK: fucomp %st(3)
0xdd,0xeb
# CHECK: fucomp %st(4)
0xdd,0xec
# CHECK: fucomp %st(5)
0xdd,0xed
# CHECK: fucomp %st(6)
0xdd,0xee
# CHECK: fucomp %st(7)
0xdd,0xef
-# CHECK: faddp %st(0)
+# CHECK: faddp %st, %st(0)
0xde,0xc0
-# CHECK: faddp %st(1)
+# CHECK: faddp %st, %st(1)
0xde,0xc1
-# CHECK: faddp %st(2)
+# CHECK: faddp %st, %st(2)
0xde,0xc2
-# CHECK: faddp %st(3)
+# CHECK: faddp %st, %st(3)
0xde,0xc3
-# CHECK: faddp %st(4)
+# CHECK: faddp %st, %st(4)
0xde,0xc4
-# CHECK: faddp %st(5)
+# CHECK: faddp %st, %st(5)
0xde,0xc5
-# CHECK: faddp %st(6)
+# CHECK: faddp %st, %st(6)
0xde,0xc6
-# CHECK: faddp %st(7)
+# CHECK: faddp %st, %st(7)
0xde,0xc7
-# CHECK: fmulp %st(0)
+# CHECK: fmulp %st, %st(0)
0xde,0xc8
-# CHECK: fmulp %st(1)
+# CHECK: fmulp %st, %st(1)
0xde,0xc9
-# CHECK: fmulp %st(2)
+# CHECK: fmulp %st, %st(2)
0xde,0xca
-# CHECK: fmulp %st(3)
+# CHECK: fmulp %st, %st(3)
0xde,0xcb
-# CHECK: fmulp %st(4)
+# CHECK: fmulp %st, %st(4)
0xde,0xcc
-# CHECK: fmulp %st(5)
+# CHECK: fmulp %st, %st(5)
0xde,0xcd
-# CHECK: fmulp %st(6)
+# CHECK: fmulp %st, %st(6)
0xde,0xce
-# CHECK: fmulp %st(7)
+# CHECK: fmulp %st, %st(7)
0xde,0xcf
# CHECK: fcompp
0xde,0xd9
-# CHECK: fsubp %st(0)
+# CHECK: fsubp %st, %st(0)
0xde,0xe0
-# CHECK: fsubp %st(1)
+# CHECK: fsubp %st, %st(1)
0xde,0xe1
-# CHECK: fsubp %st(2)
+# CHECK: fsubp %st, %st(2)
0xde,0xe2
-# CHECK: fsubp %st(3)
+# CHECK: fsubp %st, %st(3)
0xde,0xe3
-# CHECK: fsubp %st(4)
+# CHECK: fsubp %st, %st(4)
0xde,0xe4
-# CHECK: fsubp %st(5)
+# CHECK: fsubp %st, %st(5)
0xde,0xe5
-# CHECK: fsubp %st(6)
+# CHECK: fsubp %st, %st(6)
0xde,0xe6
-# CHECK: fsubp %st(7)
+# CHECK: fsubp %st, %st(7)
0xde,0xe7
-# CHECK: fsubrp %st(0)
+# CHECK: fsubrp %st, %st(0)
0xde,0xe8
-# CHECK: fsubrp %st(1)
+# CHECK: fsubrp %st, %st(1)
0xde,0xe9
-# CHECK: fsubrp %st(2)
+# CHECK: fsubrp %st, %st(2)
0xde,0xea
-# CHECK: fsubrp %st(3)
+# CHECK: fsubrp %st, %st(3)
0xde,0xeb
-# CHECK: fsubrp %st(4)
+# CHECK: fsubrp %st, %st(4)
0xde,0xec
-# CHECK: fsubrp %st(5)
+# CHECK: fsubrp %st, %st(5)
0xde,0xed
-# CHECK: fsubrp %st(6)
+# CHECK: fsubrp %st, %st(6)
0xde,0xee
-# CHECK: fsubrp %st(7)
+# CHECK: fsubrp %st, %st(7)
0xde,0xef
-# CHECK: fdivp %st(0)
+# CHECK: fdivp %st, %st(0)
0xde,0xf0
-# CHECK: fdivp %st(1)
+# CHECK: fdivp %st, %st(1)
0xde,0xf1
-# CHECK: fdivp %st(2)
+# CHECK: fdivp %st, %st(2)
0xde,0xf2
-# CHECK: fdivp %st(3)
+# CHECK: fdivp %st, %st(3)
0xde,0xf3
-# CHECK: fdivp %st(4)
+# CHECK: fdivp %st, %st(4)
0xde,0xf4
-# CHECK: fdivp %st(5)
+# CHECK: fdivp %st, %st(5)
0xde,0xf5
-# CHECK: fdivp %st(6)
+# CHECK: fdivp %st, %st(6)
0xde,0xf6
-# CHECK: fdivp %st(7)
+# CHECK: fdivp %st, %st(7)
0xde,0xf7
-# CHECK: fdivrp %st(0)
+# CHECK: fdivrp %st, %st(0)
0xde,0xf8
-# CHECK: fdivrp %st(1)
+# CHECK: fdivrp %st, %st(1)
0xde,0xf9
-# CHECK: fdivrp %st(2)
+# CHECK: fdivrp %st, %st(2)
0xde,0xfa
-# CHECK: fdivrp %st(3)
+# CHECK: fdivrp %st, %st(3)
0xde,0xfb
-# CHECK: fdivrp %st(4)
+# CHECK: fdivrp %st, %st(4)
0xde,0xfc
-# CHECK: fdivrp %st(5)
+# CHECK: fdivrp %st, %st(5)
0xde,0xfd
-# CHECK: fdivrp %st(6)
+# CHECK: fdivrp %st, %st(6)
0xde,0xfe
-# CHECK: fdivrp %st(7)
+# CHECK: fdivrp %st, %st(7)
0xde,0xff
# CHECK: ffreep %st(0)
0xdf,0xc0
# CHECK: ffreep %st(1)
0xdf,0xc1
# CHECK: ffreep %st(2)
0xdf,0xc2
# CHECK: ffreep %st(3)
0xdf,0xc3
# CHECK: ffreep %st(4)
0xdf,0xc4
# CHECK: ffreep %st(5)
0xdf,0xc5
# CHECK: ffreep %st(6)
0xdf,0xc6
# CHECK: ffreep %st(7)
0xdf,0xc7
# CHECK: fnstsw %ax
0xdf,0xe0
# CHECK: fucompi %st(0)
0xdf,0xe8
# CHECK: fucompi %st(1)
0xdf,0xe9
# CHECK: fucompi %st(2)
0xdf,0xea
# CHECK: fucompi %st(3)
0xdf,0xeb
# CHECK: fucompi %st(4)
0xdf,0xec
# CHECK: fucompi %st(5)
0xdf,0xed
# CHECK: fucompi %st(6)
0xdf,0xee
# CHECK: fucompi %st(7)
0xdf,0xef
# CHECK: fcompi %st(0)
0xdf,0xf0
# CHECK: fcompi %st(1)
0xdf,0xf1
# CHECK: fcompi %st(2)
0xdf,0xf2
# CHECK: fcompi %st(3)
0xdf,0xf3
# CHECK: fcompi %st(4)
0xdf,0xf4
# CHECK: fcompi %st(5)
0xdf,0xf5
# CHECK: fcompi %st(6)
0xdf,0xf6
# CHECK: fcompi %st(7)
0xdf,0xf7
Index: vendor/llvm/dist-release_80/test/MC/Disassembler/X86/x86-16.txt
===================================================================
--- vendor/llvm/dist-release_80/test/MC/Disassembler/X86/x86-16.txt (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/Disassembler/X86/x86-16.txt (revision 344166)
@@ -1,808 +1,808 @@
# RUN: llvm-mc --disassemble %s -triple=i686-linux-gnu-code16 | FileCheck %s
# CHECK: movl $305419896, %ebx
0x66 0xbb 0x78 0x56 0x34 0x12
# CHECK: pause
0xf3 0x90
# CHECK: sfence
0x0f 0xae 0xf8
# CHECK: lfence
0x0f 0xae 0xe8
# CHECK: mfence
0x0f 0xae 0xf0
# CHECK: stgi
0x0f 0x01 0xdc
# CHECK: clgi
0x0f 0x01 0xdd
# CHECK: rdtscp
0x0f 0x01 0xf9
# CHECK: movl %eax, 16(%ebp)
0x67 0x66 0x89 0x45 0x10
# CHECK: movl %eax, -16(%ebp)
0x67 0x66 0x89 0x45 0xf0
# CHECK: testb %cl, %bl
0x84 0xcb
# CHECK: cmpl %eax, %ebx
0x66 0x39 0xc3
# CHECK: addw %ax, %ax
0x01 0xc0
# CHECK: shrl %eax
0x66 0xd1 0xe8
# CHECK: shll %eax
0x66 0xd1 0xe0
# CHECK: shll %eax
0x66 0xd1 0xe0
# CHECK: movb 0, %al
0xa0 0x00 0x00
# CHECK: movw 0, %ax
0xa1 0x00 0x00
# CHECK: movl 0, %eax
0x66 0xa1 0x00 0x00
# CHECK: into
0xce
# CHECK: int3
0xcc
# CHECK: int $4
0xcd 0x04
# CHECK: int $127
0xcd 0x7f
# CHECK: pushfw
0x9c
# CHECK: pushfl
0x66 0x9c
# CHECK: popfw
0x9d
# CHECK: popfl
0x66 0x9d
# CHECK: retl
0x66 0xc3
# CHECK: cmoval %eax, %edx
0x66 0x0f 0x47 0xd0
# CHECK: cmovael %eax, %edx
0x66 0x0f 0x43 0xd0
# CHECK: cmovbel %eax, %edx
0x66 0x0f 0x46 0xd0
# CHECK: cmovbl %eax, %edx
0x66 0x0f 0x42 0xd0
# CHECK: cmovbw %bx, %bx
0x0f 0x42 0xdb
# CHECK: cmovbel %eax, %edx
0x66 0x0f 0x46 0xd0
# CHECK: cmovbl %eax, %edx
0x66 0x0f 0x42 0xd0
# CHECK: cmovel %eax, %edx
0x66 0x0f 0x44 0xd0
# CHECK: cmovgl %eax, %edx
0x66 0x0f 0x4f 0xd0
# CHECK: cmovgel %eax, %edx
0x66 0x0f 0x4d 0xd0
# CHECK: cmovll %eax, %edx
0x66 0x0f 0x4c 0xd0
# CHECK: cmovlel %eax, %edx
0x66 0x0f 0x4e 0xd0
# CHECK: cmovbel %eax, %edx
0x66 0x0f 0x46 0xd0
# CHECK: cmovnel %eax, %edx
0x66 0x0f 0x45 0xd0
# CHECK: cmovael %eax, %edx
0x66 0x0f 0x43 0xd0
# CHECK: cmoval %eax, %edx
0x66 0x0f 0x47 0xd0
# CHECK: cmovael %eax, %edx
0x66 0x0f 0x43 0xd0
# CHECK: cmovnel %eax, %edx
0x66 0x0f 0x45 0xd0
# CHECK: cmovlel %eax, %edx
0x66 0x0f 0x4e 0xd0
# CHECK: cmovgel %eax, %edx
0x66 0x0f 0x4d 0xd0
# CHECK: cmovnel %eax, %edx
0x66 0x0f 0x45 0xd0
# CHECK: cmovlel %eax, %edx
0x66 0x0f 0x4e 0xd0
# CHECK: cmovll %eax, %edx
0x66 0x0f 0x4c 0xd0
# CHECK: cmovgel %eax, %edx
0x66 0x0f 0x4d 0xd0
# CHECK: cmovgl %eax, %edx
0x66 0x0f 0x4f 0xd0
# CHECK: cmovnol %eax, %edx
0x66 0x0f 0x41 0xd0
# CHECK: cmovnpl %eax, %edx
0x66 0x0f 0x4b 0xd0
# CHECK: cmovnsl %eax, %edx
0x66 0x0f 0x49 0xd0
# CHECK: cmovnel %eax, %edx
0x66 0x0f 0x45 0xd0
# CHECK: cmovol %eax, %edx
0x66 0x0f 0x40 0xd0
# CHECK: cmovpl %eax, %edx
0x66 0x0f 0x4a 0xd0
# CHECK: cmovsl %eax, %edx
0x66 0x0f 0x48 0xd0
# CHECK: cmovel %eax, %edx
0x66 0x0f 0x44 0xd0
# CHECK: fmul %st(0)
0xd8 0xc8
# CHECK: fadd %st(0)
0xd8 0xc0
# CHECK: fsub %st(0)
0xd8 0xe0
# CHECK: fsubr %st(0)
0xd8 0xe8
# CHECK: fdivr %st(0)
0xd8 0xf8
# CHECK: fdiv %st(0)
0xd8 0xf0
# CHECK: movl %cs, %eax
0x66 0x8c 0xc8
# CHECK: movw %cs, %ax
0x8c 0xc8
# CHECK: movw %cs, (%eax)
0x67 0x66 0x8c 0x08
# CHECK: movw %cs, (%eax)
0x67 0x8c 0x08
# CHECK: movl %eax, %cs
0x66 0x8e 0xc8
# CHECK: movw (%eax), %cs
0x67 0x66 0x8e 0x08
# CHECK: movw (%eax), %cs
0x67 0x8e 0x08
# CHECKX: movl %cr0, %eax
0x0f 0x20 0xc0
# CHECKX: movl %cr1, %eax
0x0f 0x20 0xc8
# CHECKX: movl %cr2, %eax
0x0f 0x20 0xd0
# CHECKX: movl %cr3, %eax
0x0f 0x20 0xd8
# CHECKX: movl %cr4, %eax
0x0f 0x20 0xe0
# CHECKX: movl %dr0, %eax
0x0f 0x21 0xc0
# CHECKX: movl %dr1, %eax
0x0f 0x21 0xc8
# CHECKX: movl %dr1, %eax
0x0f 0x21 0xc8
# CHECKX: movl %dr2, %eax
0x0f 0x21 0xd0
# CHECKX: movl %dr3, %eax
0x0f 0x21 0xd8
# CHECKX: movl %dr4, %eax
0x0f 0x21 0xe0
# CHECKX: movl %dr5, %eax
0x0f 0x21 0xe8
# CHECKX: movl %dr6, %eax
0x0f 0x21 0xf0
# CHECKX: movl %dr7, %eax
0x0f 0x21 0xf8
# CHECK: wait
0x9b
# CHECK: movl %gs:124, %eax
0x65 0x66 0x8b 0x06 0x7c 0x00
# CHECK: pushaw
0x60
# CHECK: popaw
0x61
# CHECK: pushaw
0x60
# CHECK: popaw
0x61
# CHECK: pushal
0x66 0x60
# CHECK: popal
0x66 0x61
# CHECK: jmpw *8(%eax)
0x67 0xff 0x60 0x08
# CHECK: jmpl *8(%eax)
0x67 0x66 0xff 0x60 0x08
# CHECK: lcalll $2, $4660
0x66 0x9a 0x34 0x12 0x00 0x00 0x02 0x00
# CHECK: jcxz
0xe3 0x00
# CHECK: jecxz
0x67 0xe3 0x00
# CHECK: iretw
0xcf
# CHECK: iretw
0xcf
# CHECK: iretl
0x66 0xcf
# CHECK: sysretl
0x0f 0x07
# CHECK: sysretl
0x0f 0x07
# CHECK: testl %ecx, -24(%ebp)
0x67 0x66 0x85 0x4d 0xe8
# CHECK: testl %ecx, -24(%ebp)
0x67 0x66 0x85 0x4d 0xe8
# CHECK: pushw %cs
0x0e
# CHECK: pushw %ds
0x1e
# CHECK: pushw %ss
0x16
# CHECK: pushw %es
0x06
# CHECK: pushw %fs
0x0f 0xa0
# CHECK: pushw %gs
0x0f 0xa8
# CHECK: pushw %cs
0x0e
# CHECK: pushw %ds
0x1e
# CHECK: pushw %ss
0x16
# CHECK: pushw %es
0x06
# CHECK: pushw %fs
0x0f 0xa0
# CHECK: pushw %gs
0x0f 0xa8
# CHECK: pushl %cs
0x66 0x0e
# CHECK: pushl %ds
0x66 0x1e
# CHECK: pushl %ss
0x66 0x16
# CHECK: pushl %es
0x66 0x06
# CHECK: pushl %fs
0x66 0x0f 0xa0
# CHECK: pushl %gs
0x66 0x0f 0xa8
# CHECK: popw %ss
0x17
# CHECK: popw %ds
0x1f
# CHECK: popw %es
0x07
# CHECK: popl %ss
0x66 0x17
# CHECK: popl %ds
0x66 0x1f
# CHECK: popl %es
0x66 0x07
# CHECK: pushfl
0x66 0x9c
# CHECK: popfl
0x66 0x9d
# CHECK: pushfl
0x66 0x9c
# CHECK: popfl
0x66 0x9d
# CHECK: salc
0xd6
# CHECK: setb %bl
0x0f 0x92 0xc3
# CHECK: setb %bl
0x0f 0x92 0xc3
# CHECK: setae %bl
0x0f 0x93 0xc3
# CHECK: setae %bl
0x0f 0x93 0xc3
# CHECK: setbe %bl
0x0f 0x96 0xc3
# CHECK: seta %bl
0x0f 0x97 0xc3
# CHECK: setp %bl
0x0f 0x9a 0xc3
# CHECK: setnp %bl
0x0f 0x9b 0xc3
# CHECK: setl %bl
0x0f 0x9c 0xc3
# CHECK: setge %bl
0x0f 0x9d 0xc3
# CHECK: setle %bl
0x0f 0x9e 0xc3
# CHECK: setg %bl
0x0f 0x9f 0xc3
# CHECK: setne %cl
0x0f 0x95 0xc1
# CHECK: setb %bl
0x0f 0x92 0xc3
# CHECK: setb %bl
0x0f 0x92 0xc3
# CHECK: lcalll $31438, $31438
0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
# CHECK: lcalll $31438, $31438
0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
# CHECK: ljmpl $31438, $31438
0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
# CHECK: ljmpl $31438, $31438
0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
# CHECK: lcallw $31438, $31438
0x9a 0xce 0x7a 0xce 0x7a
# CHECK: lcallw $31438, $31438
0x9a 0xce 0x7a 0xce 0x7a
# CHECK: ljmpw $31438, $31438
0xea 0xce 0x7a 0xce 0x7a
# CHECK: ljmpw $31438, $31438
0xea 0xce 0x7a 0xce 0x7a
# CHECK: lcallw $31438, $31438
0x9a 0xce 0x7a 0xce 0x7a
# CHECK: lcallw $31438, $31438
0x9a 0xce 0x7a 0xce 0x7a
# CHECK: ljmpw $31438, $31438
0xea 0xce 0x7a 0xce 0x7a
# CHECK: ljmpw $31438, $31438
0xea 0xce 0x7a 0xce 0x7a
# CHECK: calll
0x66 0xe8 0x00 0x00 0x00 0x00
# CHECK: callw
0xe8 0x00 0x00
# CHECK: incb %al
0xfe 0xc0
# CHECK: incw %ax
0x40
# CHECK: incl %eax
0x66 0x40
# CHECK: decb %al
0xfe 0xc8
# CHECK: decw %ax
0x48
# CHECK: decl %eax
0x66 0x48
# CHECK: pshufw $14, %mm4, %mm0
0x0f 0x70 0xc4 0x0e
# CHECK: pshufw $90, %mm4, %mm0
0x0f 0x70 0xc4 0x5a
# CHECK: aaa
0x37
# CHECK: aad $1
0xd5 0x01
# CHECK: aad
0xd5 0x0a
# CHECK: aad
0xd5 0x0a
# CHECK: aam $2
0xd4 0x02
# CHECK: aam
0xd4 0x0a
# CHECK: aam
0xd4 0x0a
# CHECK: aas
0x3f
# CHECK: daa
0x27
# CHECK: das
0x2f
# CHECK: retw $31438
0xc2 0xce 0x7a
# CHECK: lretw $31438
0xca 0xce 0x7a
# CHECK: retw $31438
0xc2 0xce 0x7a
# CHECK: lretw $31438
0xca 0xce 0x7a
# CHECK: retl $31438
0x66 0xc2 0xce 0x7a
# CHECK: lretl $31438
0x66 0xca 0xce 0x7a
# CHECK: bound %bx, 2(%eax)
0x67 0x62 0x58 0x02
# CHECK: bound %ecx, 4(%ebx)
0x67 0x66 0x62 0x4b 0x04
# CHECK: arpl %bx, %bx
0x63 0xdb
# CHECK: arpl %bx, 6(%ecx)
0x67 0x63 0x59 0x06
# CHECK: lgdtw 4(%eax)
0x67 0x0f 0x01 0x50 0x04
# CHECK: lgdtw 4(%eax)
0x67 0x0f 0x01 0x50 0x04
# CHECK: lgdtl 4(%eax)
0x67 0x66 0x0f 0x01 0x50 0x04
# CHECK: lidtw 4(%eax)
0x67 0x0f 0x01 0x58 0x04
# CHECK: lidtw 4(%eax)
0x67 0x0f 0x01 0x58 0x04
# CHECK: lidtl 4(%eax)
0x67 0x66 0x0f 0x01 0x58 0x04
# CHECK: sgdtw 4(%eax)
0x67 0x0f 0x01 0x40 0x04
# CHECK: sgdtw 4(%eax)
0x67 0x0f 0x01 0x40 0x04
# CHECK: sgdtl 4(%eax)
0x67 0x66 0x0f 0x01 0x40 0x04
# CHECK: sidtw 4(%eax)
0x67 0x0f 0x01 0x48 0x04
# CHECK: sidtw 4(%eax)
0x67 0x0f 0x01 0x48 0x04
# CHECK: sidtl 4(%eax)
0x67 0x66 0x0f 0x01 0x48 0x04
# CHECK: fcompi %st(2)
0xdf 0xf2
# CHECK: fcompi %st(2)
0xdf 0xf2
# CHECK: fcompi %st(1)
0xdf 0xf1
# CHECK: fucompi %st(2)
0xdf 0xea
# CHECK: fucompi %st(2)
0xdf 0xea
# CHECK: fucompi %st(1)
0xdf 0xe9
# CHECK: fldcw 32493
0xd9 0x2e 0xed 0x7e
# CHECK: fldcw 32493
0xd9 0x2e 0xed 0x7e
# CHECK: fnstcw 32493
0xd9 0x3e 0xed 0x7e
# CHECK: fnstcw 32493
0xd9 0x3e 0xed 0x7e
# CHECK: wait
0x9b
# CHECK: fnstcw 32493
0xd9 0x3e 0xed 0x7e
# CHECK: wait
0x9b
# CHECK: fnstcw 32493
0xd9 0x3e 0xed 0x7e
# CHECK: fnstsw 32493
0xdd 0x3e 0xed 0x7e
# CHECK: fnstsw 32493
0xdd 0x3e 0xed 0x7e
# CHECK: wait
0x9b
# CHECK: fnstsw 32493
0xdd 0x3e 0xed 0x7e
# CHECK: wait
0x9b
# CHECK: fnstsw 32493
0xdd 0x3e 0xed 0x7e
# CHECK: verr 32493
0x0f 0x00 0x26 0xed 0x7e
# CHECK: verr 32493
0x0f 0x00 0x26 0xed 0x7e
# CHECK: wait
0x9b
# CHECK: fnclex
0xdb 0xe2
# CHECK: fnclex
0xdb 0xe2
# CHECK: ud2
0x0f 0x0b
# CHECK: ud2
0x0f 0x0b
# CHECK: ud2b
0x0f 0xb9
# CHECK: loope
0xe1 0x00
# CHECK: loopne
0xe0 0x00
# CHECK: outsb
0x6e
# CHECK: outsw
0x6f
# CHECK: outsl
0x66 0x6f
# CHECK: insb
0x6c
# CHECK: insw
0x6d
# CHECK: insl
0x66 0x6d
# CHECK: movsb
0xa4
# CHECK: movsw
0xa5
# CHECK: movsl
0x66 0xa5
# CHECK: lodsb
0xac
# CHECK: lodsw
0xad
# CHECK: lodsl
0x66 0xad
# CHECK: stosb
0xaa
# CHECK: stosw
0xab
# CHECK: stosl
0x66 0xab
# CHECK: strw %ax
0x0f 0x00 0xc8
# CHECK: strl %eax
0x66 0x0f 0x00 0xc8
-# CHECK: fsubp %st(1)
+# CHECK: fsubp %st, %st(1)
0xde 0xe1
-# CHECK: fsubp %st(2)
+# CHECK: fsubp %st, %st(2)
0xde 0xe2
# CHECKX: nop
0x66 0x90
# CHECKX: nop
0x90
# CHECK: xchgl %ecx, %eax
0x66 0x91
# CHECK: xchgl %ecx, %eax
0x66 0x91
# CHECK: retw
0xc3
# CHECK: retl
0x66 0xc3
# CHECK: lretw
0xcb
# CHECK: lretl
0x66 0xcb
# CHECK: callw -1
0xe8 0xff 0xff
# CHECK: wbnoinvd
0xf3 0x0f 0x09
# CHECK: umonitor %ax
0xf3 0x0f 0xae 0xf0
# CHECK: umonitor %eax
0x67 0xf3 0x0f 0xae 0xf0
#CHECK: movdir64b (%esi), %eax
0x67 0x66 0x0f 0x38 0xf8 0x06
#CHECK: movdir64b (%si), %ax
0x66 0x0f 0x38 0xf8 0x04
Index: vendor/llvm/dist-release_80/test/MC/MachO/file-single.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/MachO/file-single.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/MachO/file-single.s (nonexistent)
@@ -1,8 +0,0 @@
-// RUN: not llvm-mc -triple i386-apple-darwin9 %s -o /dev/null 2>&1 | FileCheck %s
-
-// Previously this crashed MC.
-
-// CHECK: error: target does not support '.file' without a number
-
- .file "dir/foo"
- nop
Index: vendor/llvm/dist-release_80/test/MC/MachO/file.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/MachO/file.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/MachO/file.s (revision 344166)
@@ -1,27 +1,30 @@
// RUN: llvm-mc -triple i386-apple-darwin9 %s -filetype=obj -o - | llvm-readobj -s -section-data | FileCheck %s
+// This number-less file directive is ignored on MachO.
+ .file "bar/baz.s"
+
.file 1 "dir/foo"
nop
// CHECK: Section {
// CHECK: Index: 1
// CHECK-NEXT: Name: __debug_line
// CHECK-NEXT: Segment: __DWARF
// CHECK-NEXT: Address: 0x1
// CHECK-NEXT: Size: 0x29
// CHECK-NEXT: Offset: 237
// CHECK-NEXT: Alignment: 0
// CHECK-NEXT: RelocationOffset: 0x0
// CHECK-NEXT: RelocationCount: 0
// CHECK-NEXT: Type: 0x0
// CHECK-NEXT: Attributes [ (0x20000)
// CHECK-NEXT: Debug (0x20000)
// CHECK-NEXT: ]
// CHECK-NEXT: Reserved1: 0x0
// CHECK-NEXT: Reserved2: 0x0
// CHECK-NEXT: SectionData (
// CHECK-NEXT: 000: 25000000 04001F00 00000101 01FB0E0D |%...............|
// CHECK-NEXT: 010: 00010101 01000000 01000001 64697200 |............dir.|
// CHECK-NEXT: 020: 00666F6F 00010000 00 |.foo.....|
// CHECK-NEXT: )
// CHECK-NEXT: }
Index: vendor/llvm/dist-release_80/test/MC/WebAssembly/external-func-address.ll
===================================================================
--- vendor/llvm/dist-release_80/test/MC/WebAssembly/external-func-address.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/WebAssembly/external-func-address.ll (revision 344166)
@@ -1,65 +1,65 @@
; RUN: llc -filetype=obj %s -o - | obj2yaml | FileCheck %s
target triple = "wasm32-unknown-unknown"
; Verify that addresses of external functions generate correctly typed
; imports and relocations of type R_TABLE_INDEX_I32.
declare void @f0(i32) #0
@ptr_to_f0 = hidden global void (i32)* @f0, align 4
-attributes #0 = { "wasm-import-module"="somewhere" }
+attributes #0 = { "wasm-import-module"="somewhere" "wasm-import-name"="something" }
declare void @f1(i32) #1
@ptr_to_f1 = hidden global void (i32)* @f1, align 4
; Check that varargs functions have correctly typed imports
declare i32 @varargs(i32, i32, ...)
define void @call(i32) {
%a = call i32 (i32, i32, ...) @varargs(i32 0, i32 0)
ret void
}
; CHECK: --- !WASM
; CHECK-NEXT: FileHeader:
; CHECK-NEXT: Version: 0x00000001
; CHECK-NEXT: Sections:
; CHECK-NEXT: - Type: TYPE
; CHECK-NEXT: Signatures:
; CHECK-NEXT: - Index: 0
; CHECK-NEXT: ReturnType: NORESULT
; CHECK-NEXT: ParamTypes:
; CHECK-NEXT: - I32
; CHECK-NEXT: - Index: 1
; CHECK-NEXT: ReturnType: I32
; CHECK-NEXT: ParamTypes:
; CHECK-NEXT: - I32
; CHECK-NEXT: - I32
; CHECK-NEXT: - I32
; CHECK: - Type: IMPORT
; CHECK-NEXT: Imports:
; CHECK: - Module: env
; CHECK-NEXT: Field: __linear_memory
; CHECK: - Module: env
; CHECK-NEXT: Field: __indirect_function_table
; CHECK: - Module: env
; CHECK-NEXT: Field: varargs
; CHECK-NEXT: Kind: FUNCTION
; CHECK-NEXT: SigIndex: 1
; CHECK: - Module: somewhere
-; CHECK-NEXT: Field: f0
+; CHECK-NEXT: Field: something
; CHECK: - Module: env
; CHECK-NEXT: Field: f1
; CHECK-NEXT: Kind: FUNCTION
; CHECK-NEXT: SigIndex: 0
; CHECK: - Type: ELEM
; CHECK-NEXT: Segments:
; CHECK-NEXT: - Offset:
; CHECK-NEXT: Opcode: I32_CONST
; CHECK-NEXT: Value: 1
; CHECK-NEXT: Functions: [ 1, 2 ]
; CHECK: - Type: DATA
; CHECK-NEXT: Relocations:
; CHECK-NEXT: - Type: R_WEBASSEMBLY_TABLE_INDEX_I32
; CHECK-NEXT: Index: 3
; CHECK-NEXT: Offset: 0x00000006
Index: vendor/llvm/dist-release_80/test/MC/WebAssembly/import-module.ll
===================================================================
--- vendor/llvm/dist-release_80/test/MC/WebAssembly/import-module.ll (nonexistent)
+++ vendor/llvm/dist-release_80/test/MC/WebAssembly/import-module.ll (revision 344166)
@@ -0,0 +1,31 @@
+; RUN: llc -filetype=obj %s -o - | obj2yaml | FileCheck %s
+
+target triple = "wasm32-unknown-unknown"
+
+define void @test() {
+ call void @foo()
+ call void @plain()
+ ret void
+}
+
+declare void @foo() #0
+declare void @plain()
+
+attributes #0 = { "wasm-import-module"="bar" "wasm-import-name"="qux" }
+
+; CHECK: - Type: IMPORT
+; CHECK-NEXT: Imports:
+; CHECK: - Module: bar
+; CHECK-NEXT: Field: qux
+; CHECK-NEXT: Kind: FUNCTION
+
+; CHECK: - Module: env
+; CHECK-NEXT: Field: plain
+; CHECK-NEXT: Kind: FUNCTION
+
+; CHECK: - Type: CUSTOM
+; CHECK: Name: foo
+; CHECK-NEXT: Flags: [ UNDEFINED ]
+
+; CHECK: Name: plain
+; CHECK-NEXT: Flags: [ UNDEFINED ]
Index: vendor/llvm/dist-release_80/test/MC/X86/PPRO-32.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/PPRO-32.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/PPRO-32.s (revision 344166)
@@ -1,126 +1,126 @@
// RUN: llvm-mc -triple i386-unknown-unknown --show-encoding %s | FileCheck %s
// CHECK: cmovael %eax, %eax
// CHECK: encoding: [0x0f,0x43,0xc0]
cmovael %eax, %eax
// CHECK: cmoval %eax, %eax
// CHECK: encoding: [0x0f,0x47,0xc0]
cmoval %eax, %eax
// CHECK: cmovbel %eax, %eax
// CHECK: encoding: [0x0f,0x46,0xc0]
cmovbel %eax, %eax
// CHECK: cmovbl %eax, %eax
// CHECK: encoding: [0x0f,0x42,0xc0]
cmovbl %eax, %eax
// CHECK: cmovel %eax, %eax
// CHECK: encoding: [0x0f,0x44,0xc0]
cmovel %eax, %eax
// CHECK: cmovgel %eax, %eax
// CHECK: encoding: [0x0f,0x4d,0xc0]
cmovgel %eax, %eax
// CHECK: cmovgl %eax, %eax
// CHECK: encoding: [0x0f,0x4f,0xc0]
cmovgl %eax, %eax
// CHECK: cmovlel %eax, %eax
// CHECK: encoding: [0x0f,0x4e,0xc0]
cmovlel %eax, %eax
// CHECK: cmovll %eax, %eax
// CHECK: encoding: [0x0f,0x4c,0xc0]
cmovll %eax, %eax
// CHECK: cmovnel %eax, %eax
// CHECK: encoding: [0x0f,0x45,0xc0]
cmovnel %eax, %eax
// CHECK: cmovnol %eax, %eax
// CHECK: encoding: [0x0f,0x41,0xc0]
cmovnol %eax, %eax
// CHECK: cmovnpl %eax, %eax
// CHECK: encoding: [0x0f,0x4b,0xc0]
cmovnpl %eax, %eax
// CHECK: cmovnsl %eax, %eax
// CHECK: encoding: [0x0f,0x49,0xc0]
cmovnsl %eax, %eax
// CHECK: cmovol %eax, %eax
// CHECK: encoding: [0x0f,0x40,0xc0]
cmovol %eax, %eax
// CHECK: cmovpl %eax, %eax
// CHECK: encoding: [0x0f,0x4a,0xc0]
cmovpl %eax, %eax
// CHECK: cmovsl %eax, %eax
// CHECK: encoding: [0x0f,0x48,0xc0]
cmovsl %eax, %eax
-// CHECK: fcmovbe %st(4), %st(0)
+// CHECK: fcmovbe %st(4), %st
// CHECK: encoding: [0xda,0xd4]
-fcmovbe %st(4), %st(0)
+fcmovbe %st(4), %st
-// CHECK: fcmovb %st(4), %st(0)
+// CHECK: fcmovb %st(4), %st
// CHECK: encoding: [0xda,0xc4]
-fcmovb %st(4), %st(0)
+fcmovb %st(4), %st
-// CHECK: fcmove %st(4), %st(0)
+// CHECK: fcmove %st(4), %st
// CHECK: encoding: [0xda,0xcc]
-fcmove %st(4), %st(0)
+fcmove %st(4), %st
-// CHECK: fcmovnbe %st(4), %st(0)
+// CHECK: fcmovnbe %st(4), %st
// CHECK: encoding: [0xdb,0xd4]
-fcmovnbe %st(4), %st(0)
+fcmovnbe %st(4), %st
-// CHECK: fcmovnb %st(4), %st(0)
+// CHECK: fcmovnb %st(4), %st
// CHECK: encoding: [0xdb,0xc4]
-fcmovnb %st(4), %st(0)
+fcmovnb %st(4), %st
-// CHECK: fcmovne %st(4), %st(0)
+// CHECK: fcmovne %st(4), %st
// CHECK: encoding: [0xdb,0xcc]
-fcmovne %st(4), %st(0)
+fcmovne %st(4), %st
-// CHECK: fcmovnu %st(4), %st(0)
+// CHECK: fcmovnu %st(4), %st
// CHECK: encoding: [0xdb,0xdc]
-fcmovnu %st(4), %st(0)
+fcmovnu %st(4), %st
-// CHECK: fcmovu %st(4), %st(0)
+// CHECK: fcmovu %st(4), %st
// CHECK: encoding: [0xda,0xdc]
-fcmovu %st(4), %st(0)
+fcmovu %st(4), %st
// CHECK: fcomi %st(4)
// CHECK: encoding: [0xdb,0xf4]
fcomi %st(4)
// CHECK: fcompi %st(4)
// CHECK: encoding: [0xdf,0xf4]
fcompi %st(4)
// CHECK: fucomi %st(4)
// CHECK: encoding: [0xdb,0xec]
fucomi %st(4)
// CHECK: fucompi %st(4)
// CHECK: encoding: [0xdf,0xec]
fucompi %st(4)
// CHECK: sysenter
// CHECK: encoding: [0x0f,0x34]
sysenter
// CHECK: sysexitl
// CHECK: encoding: [0x0f,0x35]
sysexitl
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2
Index: vendor/llvm/dist-release_80/test/MC/X86/PPRO-64.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/PPRO-64.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/PPRO-64.s (revision 344166)
@@ -1,130 +1,130 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
// CHECK: cmovael %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x43,0xed]
cmovael %r13d, %r13d
// CHECK: cmoval %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x47,0xed]
cmoval %r13d, %r13d
// CHECK: cmovbel %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x46,0xed]
cmovbel %r13d, %r13d
// CHECK: cmovbl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x42,0xed]
cmovbl %r13d, %r13d
// CHECK: cmovel %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x44,0xed]
cmovel %r13d, %r13d
// CHECK: cmovgel %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4d,0xed]
cmovgel %r13d, %r13d
// CHECK: cmovgl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4f,0xed]
cmovgl %r13d, %r13d
// CHECK: cmovlel %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4e,0xed]
cmovlel %r13d, %r13d
// CHECK: cmovll %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4c,0xed]
cmovll %r13d, %r13d
// CHECK: cmovnel %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x45,0xed]
cmovnel %r13d, %r13d
// CHECK: cmovnol %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x41,0xed]
cmovnol %r13d, %r13d
// CHECK: cmovnpl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4b,0xed]
cmovnpl %r13d, %r13d
// CHECK: cmovnsl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x49,0xed]
cmovnsl %r13d, %r13d
// CHECK: cmovol %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x40,0xed]
cmovol %r13d, %r13d
// CHECK: cmovpl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x4a,0xed]
cmovpl %r13d, %r13d
// CHECK: cmovsl %r13d, %r13d
// CHECK: encoding: [0x45,0x0f,0x48,0xed]
cmovsl %r13d, %r13d
-// CHECK: fcmovbe %st(4), %st(0)
+// CHECK: fcmovbe %st(4), %st
// CHECK: encoding: [0xda,0xd4]
-fcmovbe %st(4), %st(0)
+fcmovbe %st(4), %st
-// CHECK: fcmovb %st(4), %st(0)
+// CHECK: fcmovb %st(4), %st
// CHECK: encoding: [0xda,0xc4]
-fcmovb %st(4), %st(0)
+fcmovb %st(4), %st
-// CHECK: fcmove %st(4), %st(0)
+// CHECK: fcmove %st(4), %st
// CHECK: encoding: [0xda,0xcc]
-fcmove %st(4), %st(0)
+fcmove %st(4), %st
-// CHECK: fcmovnbe %st(4), %st(0)
+// CHECK: fcmovnbe %st(4), %st
// CHECK: encoding: [0xdb,0xd4]
-fcmovnbe %st(4), %st(0)
+fcmovnbe %st(4), %st
-// CHECK: fcmovnb %st(4), %st(0)
+// CHECK: fcmovnb %st(4), %st
// CHECK: encoding: [0xdb,0xc4]
-fcmovnb %st(4), %st(0)
+fcmovnb %st(4), %st
-// CHECK: fcmovne %st(4), %st(0)
+// CHECK: fcmovne %st(4), %st
// CHECK: encoding: [0xdb,0xcc]
-fcmovne %st(4), %st(0)
+fcmovne %st(4), %st
-// CHECK: fcmovnu %st(4), %st(0)
+// CHECK: fcmovnu %st(4), %st
// CHECK: encoding: [0xdb,0xdc]
-fcmovnu %st(4), %st(0)
+fcmovnu %st(4), %st
-// CHECK: fcmovu %st(4), %st(0)
+// CHECK: fcmovu %st(4), %st
// CHECK: encoding: [0xda,0xdc]
-fcmovu %st(4), %st(0)
+fcmovu %st(4), %st
// CHECK: fcomi %st(4)
// CHECK: encoding: [0xdb,0xf4]
fcomi %st(4)
// CHECK: fcompi %st(4)
// CHECK: encoding: [0xdf,0xf4]
fcompi %st(4)
// CHECK: fucomi %st(4)
// CHECK: encoding: [0xdb,0xec]
fucomi %st(4)
// CHECK: fucompi %st(4)
// CHECK: encoding: [0xdf,0xec]
fucompi %st(4)
// CHECK: sysenter
// CHECK: encoding: [0x0f,0x34]
sysenter
// CHECK: sysexitl
// CHECK: encoding: [0x0f,0x35]
sysexitl
// CHECK: sysexitq
// CHECK: encoding: [0x48,0x0f,0x35]
sysexitq
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2
Index: vendor/llvm/dist-release_80/test/MC/X86/X87-32.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/X87-32.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/X87-32.s (revision 344166)
@@ -1,1618 +1,1618 @@
// RUN: llvm-mc -triple i386-unknown-unknown --show-encoding %s | FileCheck %s
// CHECK: f2xm1
// CHECK: encoding: [0xd9,0xf0]
f2xm1
// CHECK: fabs
// CHECK: encoding: [0xd9,0xe1]
fabs
// CHECK: faddl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x84,0x82,0x10,0xe3,0x0f,0xe3]
faddl -485498096(%edx,%eax,4)
// CHECK: faddl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
faddl 485498096(%edx,%eax,4)
// CHECK: faddl 485498096(%edx)
// CHECK: encoding: [0xdc,0x82,0xf0,0x1c,0xf0,0x1c]
faddl 485498096(%edx)
// CHECK: faddl 485498096
// CHECK: encoding: [0xdc,0x05,0xf0,0x1c,0xf0,0x1c]
faddl 485498096
// CHECK: faddl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x44,0x02,0x40]
faddl 64(%edx,%eax)
// CHECK: faddl (%edx)
// CHECK: encoding: [0xdc,0x02]
faddl (%edx)
-// CHECK: faddp %st(4)
+// CHECK: faddp %st, %st(4)
// CHECK: encoding: [0xde,0xc4]
faddp %st(4)
// CHECK: fadds -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x84,0x82,0x10,0xe3,0x0f,0xe3]
fadds -485498096(%edx,%eax,4)
// CHECK: fadds 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
fadds 485498096(%edx,%eax,4)
// CHECK: fadds 485498096(%edx)
// CHECK: encoding: [0xd8,0x82,0xf0,0x1c,0xf0,0x1c]
fadds 485498096(%edx)
// CHECK: fadds 485498096
// CHECK: encoding: [0xd8,0x05,0xf0,0x1c,0xf0,0x1c]
fadds 485498096
// CHECK: fadds 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x44,0x02,0x40]
fadds 64(%edx,%eax)
// CHECK: fadds (%edx)
// CHECK: encoding: [0xd8,0x02]
fadds (%edx)
-// CHECK: fadd %st(0), %st(4)
+// CHECK: fadd %st, %st(4)
// CHECK: encoding: [0xdc,0xc4]
-fadd %st(0), %st(4)
+fadd %st, %st(4)
-// CHECK: fadd %st(4)
+// CHECK: fadd %st(4), %st
// CHECK: encoding: [0xd8,0xc4]
fadd %st(4)
// CHECK: fbld -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fbld -485498096(%edx,%eax,4)
// CHECK: fbld 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fbld 485498096(%edx,%eax,4)
// CHECK: fbld 485498096(%edx)
// CHECK: encoding: [0xdf,0xa2,0xf0,0x1c,0xf0,0x1c]
fbld 485498096(%edx)
// CHECK: fbld 485498096
// CHECK: encoding: [0xdf,0x25,0xf0,0x1c,0xf0,0x1c]
fbld 485498096
// CHECK: fbld 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x64,0x02,0x40]
fbld 64(%edx,%eax)
// CHECK: fbld (%edx)
// CHECK: encoding: [0xdf,0x22]
fbld (%edx)
// CHECK: fbstp -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fbstp -485498096(%edx,%eax,4)
// CHECK: fbstp 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fbstp 485498096(%edx,%eax,4)
// CHECK: fbstp 485498096(%edx)
// CHECK: encoding: [0xdf,0xb2,0xf0,0x1c,0xf0,0x1c]
fbstp 485498096(%edx)
// CHECK: fbstp 485498096
// CHECK: encoding: [0xdf,0x35,0xf0,0x1c,0xf0,0x1c]
fbstp 485498096
// CHECK: fbstp 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x74,0x02,0x40]
fbstp 64(%edx,%eax)
// CHECK: fbstp (%edx)
// CHECK: encoding: [0xdf,0x32]
fbstp (%edx)
// CHECK: fchs
// CHECK: encoding: [0xd9,0xe0]
fchs
// CHECK: fcoml -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fcoml -485498096(%edx,%eax,4)
// CHECK: fcoml 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fcoml 485498096(%edx,%eax,4)
// CHECK: fcoml 485498096(%edx)
// CHECK: encoding: [0xdc,0x92,0xf0,0x1c,0xf0,0x1c]
fcoml 485498096(%edx)
// CHECK: fcoml 485498096
// CHECK: encoding: [0xdc,0x15,0xf0,0x1c,0xf0,0x1c]
fcoml 485498096
// CHECK: fcoml 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x54,0x02,0x40]
fcoml 64(%edx,%eax)
// CHECK: fcoml (%edx)
// CHECK: encoding: [0xdc,0x12]
fcoml (%edx)
// CHECK: fcompl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fcompl -485498096(%edx,%eax,4)
// CHECK: fcompl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fcompl 485498096(%edx,%eax,4)
// CHECK: fcompl 485498096(%edx)
// CHECK: encoding: [0xdc,0x9a,0xf0,0x1c,0xf0,0x1c]
fcompl 485498096(%edx)
// CHECK: fcompl 485498096
// CHECK: encoding: [0xdc,0x1d,0xf0,0x1c,0xf0,0x1c]
fcompl 485498096
// CHECK: fcompl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x5c,0x02,0x40]
fcompl 64(%edx,%eax)
// CHECK: fcompl (%edx)
// CHECK: encoding: [0xdc,0x1a]
fcompl (%edx)
// CHECK: fcompp
// CHECK: encoding: [0xde,0xd9]
fcompp
// CHECK: fcomps -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fcomps -485498096(%edx,%eax,4)
// CHECK: fcomps 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fcomps 485498096(%edx,%eax,4)
// CHECK: fcomps 485498096(%edx)
// CHECK: encoding: [0xd8,0x9a,0xf0,0x1c,0xf0,0x1c]
fcomps 485498096(%edx)
// CHECK: fcomps 485498096
// CHECK: encoding: [0xd8,0x1d,0xf0,0x1c,0xf0,0x1c]
fcomps 485498096
// CHECK: fcomps 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x5c,0x02,0x40]
fcomps 64(%edx,%eax)
// CHECK: fcomps (%edx)
// CHECK: encoding: [0xd8,0x1a]
fcomps (%edx)
// CHECK: fcomp %st(4)
// CHECK: encoding: [0xd8,0xdc]
fcomp %st(4)
// CHECK: fcoms -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fcoms -485498096(%edx,%eax,4)
// CHECK: fcoms 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fcoms 485498096(%edx,%eax,4)
// CHECK: fcoms 485498096(%edx)
// CHECK: encoding: [0xd8,0x92,0xf0,0x1c,0xf0,0x1c]
fcoms 485498096(%edx)
// CHECK: fcoms 485498096
// CHECK: encoding: [0xd8,0x15,0xf0,0x1c,0xf0,0x1c]
fcoms 485498096
// CHECK: fcoms 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x54,0x02,0x40]
fcoms 64(%edx,%eax)
// CHECK: fcoms (%edx)
// CHECK: encoding: [0xd8,0x12]
fcoms (%edx)
// CHECK: fcom %st(4)
// CHECK: encoding: [0xd8,0xd4]
fcom %st(4)
// CHECK: fcos
// CHECK: encoding: [0xd9,0xff]
fcos
// CHECK: fdecstp
// CHECK: encoding: [0xd9,0xf6]
fdecstp
// CHECK: fdivl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fdivl -485498096(%edx,%eax,4)
// CHECK: fdivl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fdivl 485498096(%edx,%eax,4)
// CHECK: fdivl 485498096(%edx)
// CHECK: encoding: [0xdc,0xb2,0xf0,0x1c,0xf0,0x1c]
fdivl 485498096(%edx)
// CHECK: fdivl 485498096
// CHECK: encoding: [0xdc,0x35,0xf0,0x1c,0xf0,0x1c]
fdivl 485498096
// CHECK: fdivl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x74,0x02,0x40]
fdivl 64(%edx,%eax)
// CHECK: fdivl (%edx)
// CHECK: encoding: [0xdc,0x32]
fdivl (%edx)
-// CHECK: fdivp %st(4)
+// CHECK: fdivp %st, %st(4)
// CHECK: encoding: [0xde,0xf4]
fdivp %st(4)
// CHECK: fdivrl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fdivrl -485498096(%edx,%eax,4)
// CHECK: fdivrl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fdivrl 485498096(%edx,%eax,4)
// CHECK: fdivrl 485498096(%edx)
// CHECK: encoding: [0xdc,0xba,0xf0,0x1c,0xf0,0x1c]
fdivrl 485498096(%edx)
// CHECK: fdivrl 485498096
// CHECK: encoding: [0xdc,0x3d,0xf0,0x1c,0xf0,0x1c]
fdivrl 485498096
// CHECK: fdivrl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x7c,0x02,0x40]
fdivrl 64(%edx,%eax)
// CHECK: fdivrl (%edx)
// CHECK: encoding: [0xdc,0x3a]
fdivrl (%edx)
-// CHECK: fdivrp %st(4)
+// CHECK: fdivrp %st, %st(4)
// CHECK: encoding: [0xde,0xfc]
fdivrp %st(4)
// CHECK: fdivrs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fdivrs -485498096(%edx,%eax,4)
// CHECK: fdivrs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fdivrs 485498096(%edx,%eax,4)
// CHECK: fdivrs 485498096(%edx)
// CHECK: encoding: [0xd8,0xba,0xf0,0x1c,0xf0,0x1c]
fdivrs 485498096(%edx)
// CHECK: fdivrs 485498096
// CHECK: encoding: [0xd8,0x3d,0xf0,0x1c,0xf0,0x1c]
fdivrs 485498096
// CHECK: fdivrs 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x7c,0x02,0x40]
fdivrs 64(%edx,%eax)
// CHECK: fdivrs (%edx)
// CHECK: encoding: [0xd8,0x3a]
fdivrs (%edx)
-// CHECK: fdivr %st(0), %st(4)
+// CHECK: fdivr %st, %st(4)
// CHECK: encoding: [0xdc,0xfc]
-fdivr %st(0), %st(4)
+fdivr %st, %st(4)
-// CHECK: fdivr %st(4)
+// CHECK: fdivr %st(4), %st
// CHECK: encoding: [0xd8,0xfc]
fdivr %st(4)
// CHECK: fdivs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fdivs -485498096(%edx,%eax,4)
// CHECK: fdivs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fdivs 485498096(%edx,%eax,4)
// CHECK: fdivs 485498096(%edx)
// CHECK: encoding: [0xd8,0xb2,0xf0,0x1c,0xf0,0x1c]
fdivs 485498096(%edx)
// CHECK: fdivs 485498096
// CHECK: encoding: [0xd8,0x35,0xf0,0x1c,0xf0,0x1c]
fdivs 485498096
// CHECK: fdivs 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x74,0x02,0x40]
fdivs 64(%edx,%eax)
// CHECK: fdivs (%edx)
// CHECK: encoding: [0xd8,0x32]
fdivs (%edx)
-// CHECK: fdiv %st(0), %st(4)
+// CHECK: fdiv %st, %st(4)
// CHECK: encoding: [0xdc,0xf4]
-fdiv %st(0), %st(4)
+fdiv %st, %st(4)
-// CHECK: fdiv %st(4)
+// CHECK: fdiv %st(4), %st
// CHECK: encoding: [0xd8,0xf4]
fdiv %st(4)
// CHECK: ffreep %st(4)
// CHECK: encoding: [0xdf,0xc4]
ffreep %st(4)
// CHECK: ffree %st(4)
// CHECK: encoding: [0xdd,0xc4]
ffree %st(4)
// CHECK: fiaddl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x84,0x82,0x10,0xe3,0x0f,0xe3]
fiaddl -485498096(%edx,%eax,4)
// CHECK: fiaddl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
fiaddl 485498096(%edx,%eax,4)
// CHECK: fiaddl 485498096(%edx)
// CHECK: encoding: [0xda,0x82,0xf0,0x1c,0xf0,0x1c]
fiaddl 485498096(%edx)
// CHECK: fiaddl 485498096
// CHECK: encoding: [0xda,0x05,0xf0,0x1c,0xf0,0x1c]
fiaddl 485498096
// CHECK: fiaddl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x44,0x02,0x40]
fiaddl 64(%edx,%eax)
// CHECK: fiaddl (%edx)
// CHECK: encoding: [0xda,0x02]
fiaddl (%edx)
// CHECK: fiadds -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x84,0x82,0x10,0xe3,0x0f,0xe3]
fiadds -485498096(%edx,%eax,4)
// CHECK: fiadds 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
fiadds 485498096(%edx,%eax,4)
// CHECK: fiadds 485498096(%edx)
// CHECK: encoding: [0xde,0x82,0xf0,0x1c,0xf0,0x1c]
fiadds 485498096(%edx)
// CHECK: fiadds 485498096
// CHECK: encoding: [0xde,0x05,0xf0,0x1c,0xf0,0x1c]
fiadds 485498096
// CHECK: fiadds 64(%edx,%eax)
// CHECK: encoding: [0xde,0x44,0x02,0x40]
fiadds 64(%edx,%eax)
// CHECK: fiadds (%edx)
// CHECK: encoding: [0xde,0x02]
fiadds (%edx)
// CHECK: ficoml -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x94,0x82,0x10,0xe3,0x0f,0xe3]
ficoml -485498096(%edx,%eax,4)
// CHECK: ficoml 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
ficoml 485498096(%edx,%eax,4)
// CHECK: ficoml 485498096(%edx)
// CHECK: encoding: [0xda,0x92,0xf0,0x1c,0xf0,0x1c]
ficoml 485498096(%edx)
// CHECK: ficoml 485498096
// CHECK: encoding: [0xda,0x15,0xf0,0x1c,0xf0,0x1c]
ficoml 485498096
// CHECK: ficoml 64(%edx,%eax)
// CHECK: encoding: [0xda,0x54,0x02,0x40]
ficoml 64(%edx,%eax)
// CHECK: ficoml (%edx)
// CHECK: encoding: [0xda,0x12]
ficoml (%edx)
// CHECK: ficompl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
ficompl -485498096(%edx,%eax,4)
// CHECK: ficompl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
ficompl 485498096(%edx,%eax,4)
// CHECK: ficompl 485498096(%edx)
// CHECK: encoding: [0xda,0x9a,0xf0,0x1c,0xf0,0x1c]
ficompl 485498096(%edx)
// CHECK: ficompl 485498096
// CHECK: encoding: [0xda,0x1d,0xf0,0x1c,0xf0,0x1c]
ficompl 485498096
// CHECK: ficompl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x5c,0x02,0x40]
ficompl 64(%edx,%eax)
// CHECK: ficompl (%edx)
// CHECK: encoding: [0xda,0x1a]
ficompl (%edx)
// CHECK: ficomps -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
ficomps -485498096(%edx,%eax,4)
// CHECK: ficomps 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
ficomps 485498096(%edx,%eax,4)
// CHECK: ficomps 485498096(%edx)
// CHECK: encoding: [0xde,0x9a,0xf0,0x1c,0xf0,0x1c]
ficomps 485498096(%edx)
// CHECK: ficomps 485498096
// CHECK: encoding: [0xde,0x1d,0xf0,0x1c,0xf0,0x1c]
ficomps 485498096
// CHECK: ficomps 64(%edx,%eax)
// CHECK: encoding: [0xde,0x5c,0x02,0x40]
ficomps 64(%edx,%eax)
// CHECK: ficomps (%edx)
// CHECK: encoding: [0xde,0x1a]
ficomps (%edx)
// CHECK: ficoms -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x94,0x82,0x10,0xe3,0x0f,0xe3]
ficoms -485498096(%edx,%eax,4)
// CHECK: ficoms 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
ficoms 485498096(%edx,%eax,4)
// CHECK: ficoms 485498096(%edx)
// CHECK: encoding: [0xde,0x92,0xf0,0x1c,0xf0,0x1c]
ficoms 485498096(%edx)
// CHECK: ficoms 485498096
// CHECK: encoding: [0xde,0x15,0xf0,0x1c,0xf0,0x1c]
ficoms 485498096
// CHECK: ficoms 64(%edx,%eax)
// CHECK: encoding: [0xde,0x54,0x02,0x40]
ficoms 64(%edx,%eax)
// CHECK: ficoms (%edx)
// CHECK: encoding: [0xde,0x12]
ficoms (%edx)
// CHECK: fidivl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fidivl -485498096(%edx,%eax,4)
// CHECK: fidivl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fidivl 485498096(%edx,%eax,4)
// CHECK: fidivl 485498096(%edx)
// CHECK: encoding: [0xda,0xb2,0xf0,0x1c,0xf0,0x1c]
fidivl 485498096(%edx)
// CHECK: fidivl 485498096
// CHECK: encoding: [0xda,0x35,0xf0,0x1c,0xf0,0x1c]
fidivl 485498096
// CHECK: fidivl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x74,0x02,0x40]
fidivl 64(%edx,%eax)
// CHECK: fidivl (%edx)
// CHECK: encoding: [0xda,0x32]
fidivl (%edx)
// CHECK: fidivrl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fidivrl -485498096(%edx,%eax,4)
// CHECK: fidivrl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fidivrl 485498096(%edx,%eax,4)
// CHECK: fidivrl 485498096(%edx)
// CHECK: encoding: [0xda,0xba,0xf0,0x1c,0xf0,0x1c]
fidivrl 485498096(%edx)
// CHECK: fidivrl 485498096
// CHECK: encoding: [0xda,0x3d,0xf0,0x1c,0xf0,0x1c]
fidivrl 485498096
// CHECK: fidivrl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x7c,0x02,0x40]
fidivrl 64(%edx,%eax)
// CHECK: fidivrl (%edx)
// CHECK: encoding: [0xda,0x3a]
fidivrl (%edx)
// CHECK: fidivrs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fidivrs -485498096(%edx,%eax,4)
// CHECK: fidivrs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fidivrs 485498096(%edx,%eax,4)
// CHECK: fidivrs 485498096(%edx)
// CHECK: encoding: [0xde,0xba,0xf0,0x1c,0xf0,0x1c]
fidivrs 485498096(%edx)
// CHECK: fidivrs 485498096
// CHECK: encoding: [0xde,0x3d,0xf0,0x1c,0xf0,0x1c]
fidivrs 485498096
// CHECK: fidivrs 64(%edx,%eax)
// CHECK: encoding: [0xde,0x7c,0x02,0x40]
fidivrs 64(%edx,%eax)
// CHECK: fidivrs (%edx)
// CHECK: encoding: [0xde,0x3a]
fidivrs (%edx)
// CHECK: fidivs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fidivs -485498096(%edx,%eax,4)
// CHECK: fidivs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fidivs 485498096(%edx,%eax,4)
// CHECK: fidivs 485498096(%edx)
// CHECK: encoding: [0xde,0xb2,0xf0,0x1c,0xf0,0x1c]
fidivs 485498096(%edx)
// CHECK: fidivs 485498096
// CHECK: encoding: [0xde,0x35,0xf0,0x1c,0xf0,0x1c]
fidivs 485498096
// CHECK: fidivs 64(%edx,%eax)
// CHECK: encoding: [0xde,0x74,0x02,0x40]
fidivs 64(%edx,%eax)
// CHECK: fidivs (%edx)
// CHECK: encoding: [0xde,0x32]
fidivs (%edx)
// CHECK: fildl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x84,0x82,0x10,0xe3,0x0f,0xe3]
fildl -485498096(%edx,%eax,4)
// CHECK: fildl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
fildl 485498096(%edx,%eax,4)
// CHECK: fildl 485498096(%edx)
// CHECK: encoding: [0xdb,0x82,0xf0,0x1c,0xf0,0x1c]
fildl 485498096(%edx)
// CHECK: fildl 485498096
// CHECK: encoding: [0xdb,0x05,0xf0,0x1c,0xf0,0x1c]
fildl 485498096
// CHECK: fildl 64(%edx,%eax)
// CHECK: encoding: [0xdb,0x44,0x02,0x40]
fildl 64(%edx,%eax)
// CHECK: fildl (%edx)
// CHECK: encoding: [0xdb,0x02]
fildl (%edx)
// CHECK: fildll -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fildll -485498096(%edx,%eax,4)
// CHECK: fildll 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fildll 485498096(%edx,%eax,4)
// CHECK: fildll 485498096(%edx)
// CHECK: encoding: [0xdf,0xaa,0xf0,0x1c,0xf0,0x1c]
fildll 485498096(%edx)
// CHECK: fildll 485498096
// CHECK: encoding: [0xdf,0x2d,0xf0,0x1c,0xf0,0x1c]
fildll 485498096
// CHECK: fildll 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x6c,0x02,0x40]
fildll 64(%edx,%eax)
// CHECK: fildll (%edx)
// CHECK: encoding: [0xdf,0x2a]
fildll (%edx)
// CHECK: filds -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x84,0x82,0x10,0xe3,0x0f,0xe3]
filds -485498096(%edx,%eax,4)
// CHECK: filds 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
filds 485498096(%edx,%eax,4)
// CHECK: filds 485498096(%edx)
// CHECK: encoding: [0xdf,0x82,0xf0,0x1c,0xf0,0x1c]
filds 485498096(%edx)
// CHECK: filds 485498096
// CHECK: encoding: [0xdf,0x05,0xf0,0x1c,0xf0,0x1c]
filds 485498096
// CHECK: filds 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x44,0x02,0x40]
filds 64(%edx,%eax)
// CHECK: filds (%edx)
// CHECK: encoding: [0xdf,0x02]
filds (%edx)
// CHECK: fimull -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
fimull -485498096(%edx,%eax,4)
// CHECK: fimull 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
fimull 485498096(%edx,%eax,4)
// CHECK: fimull 485498096(%edx)
// CHECK: encoding: [0xda,0x8a,0xf0,0x1c,0xf0,0x1c]
fimull 485498096(%edx)
// CHECK: fimull 485498096
// CHECK: encoding: [0xda,0x0d,0xf0,0x1c,0xf0,0x1c]
fimull 485498096
// CHECK: fimull 64(%edx,%eax)
// CHECK: encoding: [0xda,0x4c,0x02,0x40]
fimull 64(%edx,%eax)
// CHECK: fimull (%edx)
// CHECK: encoding: [0xda,0x0a]
fimull (%edx)
// CHECK: fimuls -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
fimuls -485498096(%edx,%eax,4)
// CHECK: fimuls 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
fimuls 485498096(%edx,%eax,4)
// CHECK: fimuls 485498096(%edx)
// CHECK: encoding: [0xde,0x8a,0xf0,0x1c,0xf0,0x1c]
fimuls 485498096(%edx)
// CHECK: fimuls 485498096
// CHECK: encoding: [0xde,0x0d,0xf0,0x1c,0xf0,0x1c]
fimuls 485498096
// CHECK: fimuls 64(%edx,%eax)
// CHECK: encoding: [0xde,0x4c,0x02,0x40]
fimuls 64(%edx,%eax)
// CHECK: fimuls (%edx)
// CHECK: encoding: [0xde,0x0a]
fimuls (%edx)
// CHECK: fincstp
// CHECK: encoding: [0xd9,0xf7]
fincstp
// CHECK: fistl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fistl -485498096(%edx,%eax,4)
// CHECK: fistl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fistl 485498096(%edx,%eax,4)
// CHECK: fistl 485498096(%edx)
// CHECK: encoding: [0xdb,0x92,0xf0,0x1c,0xf0,0x1c]
fistl 485498096(%edx)
// CHECK: fistl 485498096
// CHECK: encoding: [0xdb,0x15,0xf0,0x1c,0xf0,0x1c]
fistl 485498096
// CHECK: fistl 64(%edx,%eax)
// CHECK: encoding: [0xdb,0x54,0x02,0x40]
fistl 64(%edx,%eax)
// CHECK: fistl (%edx)
// CHECK: encoding: [0xdb,0x12]
fistl (%edx)
// CHECK: fistpl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fistpl -485498096(%edx,%eax,4)
// CHECK: fistpl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fistpl 485498096(%edx,%eax,4)
// CHECK: fistpl 485498096(%edx)
// CHECK: encoding: [0xdb,0x9a,0xf0,0x1c,0xf0,0x1c]
fistpl 485498096(%edx)
// CHECK: fistpl 485498096
// CHECK: encoding: [0xdb,0x1d,0xf0,0x1c,0xf0,0x1c]
fistpl 485498096
// CHECK: fistpl 64(%edx,%eax)
// CHECK: encoding: [0xdb,0x5c,0x02,0x40]
fistpl 64(%edx,%eax)
// CHECK: fistpl (%edx)
// CHECK: encoding: [0xdb,0x1a]
fistpl (%edx)
// CHECK: fistpll -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fistpll -485498096(%edx,%eax,4)
// CHECK: fistpll 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fistpll 485498096(%edx,%eax,4)
// CHECK: fistpll 485498096(%edx)
// CHECK: encoding: [0xdf,0xba,0xf0,0x1c,0xf0,0x1c]
fistpll 485498096(%edx)
// CHECK: fistpll 485498096
// CHECK: encoding: [0xdf,0x3d,0xf0,0x1c,0xf0,0x1c]
fistpll 485498096
// CHECK: fistpll 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x7c,0x02,0x40]
fistpll 64(%edx,%eax)
// CHECK: fistpll (%edx)
// CHECK: encoding: [0xdf,0x3a]
fistpll (%edx)
// CHECK: fistps -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fistps -485498096(%edx,%eax,4)
// CHECK: fistps 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fistps 485498096(%edx,%eax,4)
// CHECK: fistps 485498096(%edx)
// CHECK: encoding: [0xdf,0x9a,0xf0,0x1c,0xf0,0x1c]
fistps 485498096(%edx)
// CHECK: fistps 485498096
// CHECK: encoding: [0xdf,0x1d,0xf0,0x1c,0xf0,0x1c]
fistps 485498096
// CHECK: fistps 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x5c,0x02,0x40]
fistps 64(%edx,%eax)
// CHECK: fistps (%edx)
// CHECK: encoding: [0xdf,0x1a]
fistps (%edx)
// CHECK: fists -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fists -485498096(%edx,%eax,4)
// CHECK: fists 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdf,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fists 485498096(%edx,%eax,4)
// CHECK: fists 485498096(%edx)
// CHECK: encoding: [0xdf,0x92,0xf0,0x1c,0xf0,0x1c]
fists 485498096(%edx)
// CHECK: fists 485498096
// CHECK: encoding: [0xdf,0x15,0xf0,0x1c,0xf0,0x1c]
fists 485498096
// CHECK: fists 64(%edx,%eax)
// CHECK: encoding: [0xdf,0x54,0x02,0x40]
fists 64(%edx,%eax)
// CHECK: fists (%edx)
// CHECK: encoding: [0xdf,0x12]
fists (%edx)
// CHECK: fisubl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fisubl -485498096(%edx,%eax,4)
// CHECK: fisubl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fisubl 485498096(%edx,%eax,4)
// CHECK: fisubl 485498096(%edx)
// CHECK: encoding: [0xda,0xa2,0xf0,0x1c,0xf0,0x1c]
fisubl 485498096(%edx)
// CHECK: fisubl 485498096
// CHECK: encoding: [0xda,0x25,0xf0,0x1c,0xf0,0x1c]
fisubl 485498096
// CHECK: fisubl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x64,0x02,0x40]
fisubl 64(%edx,%eax)
// CHECK: fisubl (%edx)
// CHECK: encoding: [0xda,0x22]
fisubl (%edx)
// CHECK: fisubrl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fisubrl -485498096(%edx,%eax,4)
// CHECK: fisubrl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xda,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fisubrl 485498096(%edx,%eax,4)
// CHECK: fisubrl 485498096(%edx)
// CHECK: encoding: [0xda,0xaa,0xf0,0x1c,0xf0,0x1c]
fisubrl 485498096(%edx)
// CHECK: fisubrl 485498096
// CHECK: encoding: [0xda,0x2d,0xf0,0x1c,0xf0,0x1c]
fisubrl 485498096
// CHECK: fisubrl 64(%edx,%eax)
// CHECK: encoding: [0xda,0x6c,0x02,0x40]
fisubrl 64(%edx,%eax)
// CHECK: fisubrl (%edx)
// CHECK: encoding: [0xda,0x2a]
fisubrl (%edx)
// CHECK: fisubrs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fisubrs -485498096(%edx,%eax,4)
// CHECK: fisubrs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fisubrs 485498096(%edx,%eax,4)
// CHECK: fisubrs 485498096(%edx)
// CHECK: encoding: [0xde,0xaa,0xf0,0x1c,0xf0,0x1c]
fisubrs 485498096(%edx)
// CHECK: fisubrs 485498096
// CHECK: encoding: [0xde,0x2d,0xf0,0x1c,0xf0,0x1c]
fisubrs 485498096
// CHECK: fisubrs 64(%edx,%eax)
// CHECK: encoding: [0xde,0x6c,0x02,0x40]
fisubrs 64(%edx,%eax)
// CHECK: fisubrs (%edx)
// CHECK: encoding: [0xde,0x2a]
fisubrs (%edx)
// CHECK: fisubs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fisubs -485498096(%edx,%eax,4)
// CHECK: fisubs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xde,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fisubs 485498096(%edx,%eax,4)
// CHECK: fisubs 485498096(%edx)
// CHECK: encoding: [0xde,0xa2,0xf0,0x1c,0xf0,0x1c]
fisubs 485498096(%edx)
// CHECK: fisubs 485498096
// CHECK: encoding: [0xde,0x25,0xf0,0x1c,0xf0,0x1c]
fisubs 485498096
// CHECK: fisubs 64(%edx,%eax)
// CHECK: encoding: [0xde,0x64,0x02,0x40]
fisubs 64(%edx,%eax)
// CHECK: fisubs (%edx)
// CHECK: encoding: [0xde,0x22]
fisubs (%edx)
// CHECK: fld1
// CHECK: encoding: [0xd9,0xe8]
fld1
// CHECK: fldcw -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fldcw -485498096(%edx,%eax,4)
// CHECK: fldcw 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fldcw 485498096(%edx,%eax,4)
// CHECK: fldcw 485498096(%edx)
// CHECK: encoding: [0xd9,0xaa,0xf0,0x1c,0xf0,0x1c]
fldcw 485498096(%edx)
// CHECK: fldcw 485498096
// CHECK: encoding: [0xd9,0x2d,0xf0,0x1c,0xf0,0x1c]
fldcw 485498096
// CHECK: fldcw 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x6c,0x02,0x40]
fldcw 64(%edx,%eax)
// CHECK: fldcw (%edx)
// CHECK: encoding: [0xd9,0x2a]
fldcw (%edx)
// CHECK: fldenv -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fldenv -485498096(%edx,%eax,4)
// CHECK: fldenv 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fldenv 485498096(%edx,%eax,4)
// CHECK: fldenv 485498096(%edx)
// CHECK: encoding: [0xd9,0xa2,0xf0,0x1c,0xf0,0x1c]
fldenv 485498096(%edx)
// CHECK: fldenv 485498096
// CHECK: encoding: [0xd9,0x25,0xf0,0x1c,0xf0,0x1c]
fldenv 485498096
// CHECK: fldenv 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x64,0x02,0x40]
fldenv 64(%edx,%eax)
// CHECK: fldenv (%edx)
// CHECK: encoding: [0xd9,0x22]
fldenv (%edx)
// CHECK: fldl2e
// CHECK: encoding: [0xd9,0xea]
fldl2e
// CHECK: fldl2t
// CHECK: encoding: [0xd9,0xe9]
fldl2t
// CHECK: fldl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x84,0x82,0x10,0xe3,0x0f,0xe3]
fldl -485498096(%edx,%eax,4)
// CHECK: fldl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
fldl 485498096(%edx,%eax,4)
// CHECK: fldl 485498096(%edx)
// CHECK: encoding: [0xdd,0x82,0xf0,0x1c,0xf0,0x1c]
fldl 485498096(%edx)
// CHECK: fldl 485498096
// CHECK: encoding: [0xdd,0x05,0xf0,0x1c,0xf0,0x1c]
fldl 485498096
// CHECK: fldl 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x44,0x02,0x40]
fldl 64(%edx,%eax)
// CHECK: fldl (%edx)
// CHECK: encoding: [0xdd,0x02]
fldl (%edx)
// CHECK: fldlg2
// CHECK: encoding: [0xd9,0xec]
fldlg2
// CHECK: fldln2
// CHECK: encoding: [0xd9,0xed]
fldln2
// CHECK: fldpi
// CHECK: encoding: [0xd9,0xeb]
fldpi
// CHECK: flds -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x84,0x82,0x10,0xe3,0x0f,0xe3]
flds -485498096(%edx,%eax,4)
// CHECK: flds 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x84,0x82,0xf0,0x1c,0xf0,0x1c]
flds 485498096(%edx,%eax,4)
// CHECK: flds 485498096(%edx)
// CHECK: encoding: [0xd9,0x82,0xf0,0x1c,0xf0,0x1c]
flds 485498096(%edx)
// CHECK: flds 485498096
// CHECK: encoding: [0xd9,0x05,0xf0,0x1c,0xf0,0x1c]
flds 485498096
// CHECK: flds 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x44,0x02,0x40]
flds 64(%edx,%eax)
// CHECK: flds (%edx)
// CHECK: encoding: [0xd9,0x02]
flds (%edx)
// CHECK: fld %st(4)
// CHECK: encoding: [0xd9,0xc4]
fld %st(4)
// CHECK: fldt -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fldt -485498096(%edx,%eax,4)
// CHECK: fldt 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fldt 485498096(%edx,%eax,4)
// CHECK: fldt 485498096(%edx)
// CHECK: encoding: [0xdb,0xaa,0xf0,0x1c,0xf0,0x1c]
fldt 485498096(%edx)
// CHECK: fldt 485498096
// CHECK: encoding: [0xdb,0x2d,0xf0,0x1c,0xf0,0x1c]
fldt 485498096
// CHECK: fldt 64(%edx,%eax)
// CHECK: encoding: [0xdb,0x6c,0x02,0x40]
fldt 64(%edx,%eax)
// CHECK: fldt (%edx)
// CHECK: encoding: [0xdb,0x2a]
fldt (%edx)
// CHECK: fldz
// CHECK: encoding: [0xd9,0xee]
fldz
// CHECK: fmull -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
fmull -485498096(%edx,%eax,4)
// CHECK: fmull 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
fmull 485498096(%edx,%eax,4)
// CHECK: fmull 485498096(%edx)
// CHECK: encoding: [0xdc,0x8a,0xf0,0x1c,0xf0,0x1c]
fmull 485498096(%edx)
// CHECK: fmull 485498096
// CHECK: encoding: [0xdc,0x0d,0xf0,0x1c,0xf0,0x1c]
fmull 485498096
// CHECK: fmull 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x4c,0x02,0x40]
fmull 64(%edx,%eax)
// CHECK: fmull (%edx)
// CHECK: encoding: [0xdc,0x0a]
fmull (%edx)
-// CHECK: fmulp %st(4)
+// CHECK: fmulp %st, %st(4)
// CHECK: encoding: [0xde,0xcc]
fmulp %st(4)
// CHECK: fmuls -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
fmuls -485498096(%edx,%eax,4)
// CHECK: fmuls 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
fmuls 485498096(%edx,%eax,4)
// CHECK: fmuls 485498096(%edx)
// CHECK: encoding: [0xd8,0x8a,0xf0,0x1c,0xf0,0x1c]
fmuls 485498096(%edx)
// CHECK: fmuls 485498096
// CHECK: encoding: [0xd8,0x0d,0xf0,0x1c,0xf0,0x1c]
fmuls 485498096
// CHECK: fmuls 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x4c,0x02,0x40]
fmuls 64(%edx,%eax)
// CHECK: fmuls (%edx)
// CHECK: encoding: [0xd8,0x0a]
fmuls (%edx)
-// CHECK: fmul %st(0), %st(4)
+// CHECK: fmul %st, %st(4)
// CHECK: encoding: [0xdc,0xcc]
-fmul %st(0), %st(4)
+fmul %st, %st(4)
-// CHECK: fmul %st(4)
+// CHECK: fmul %st(4), %st
// CHECK: encoding: [0xd8,0xcc]
fmul %st(4)
// CHECK: fnclex
// CHECK: encoding: [0xdb,0xe2]
fnclex
// CHECK: fninit
// CHECK: encoding: [0xdb,0xe3]
fninit
// CHECK: fnop
// CHECK: encoding: [0xd9,0xd0]
fnop
// CHECK: fnsave -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fnsave -485498096(%edx,%eax,4)
// CHECK: fnsave 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fnsave 485498096(%edx,%eax,4)
// CHECK: fnsave 485498096(%edx)
// CHECK: encoding: [0xdd,0xb2,0xf0,0x1c,0xf0,0x1c]
fnsave 485498096(%edx)
// CHECK: fnsave 485498096
// CHECK: encoding: [0xdd,0x35,0xf0,0x1c,0xf0,0x1c]
fnsave 485498096
// CHECK: fnsave 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x74,0x02,0x40]
fnsave 64(%edx,%eax)
// CHECK: fnsave (%edx)
// CHECK: encoding: [0xdd,0x32]
fnsave (%edx)
// CHECK: fnstcw -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fnstcw -485498096(%edx,%eax,4)
// CHECK: fnstcw 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fnstcw 485498096(%edx,%eax,4)
// CHECK: fnstcw 485498096(%edx)
// CHECK: encoding: [0xd9,0xba,0xf0,0x1c,0xf0,0x1c]
fnstcw 485498096(%edx)
// CHECK: fnstcw 485498096
// CHECK: encoding: [0xd9,0x3d,0xf0,0x1c,0xf0,0x1c]
fnstcw 485498096
// CHECK: fnstcw 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x7c,0x02,0x40]
fnstcw 64(%edx,%eax)
// CHECK: fnstcw (%edx)
// CHECK: encoding: [0xd9,0x3a]
fnstcw (%edx)
// CHECK: fnstenv -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xb4,0x82,0x10,0xe3,0x0f,0xe3]
fnstenv -485498096(%edx,%eax,4)
// CHECK: fnstenv 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0xb4,0x82,0xf0,0x1c,0xf0,0x1c]
fnstenv 485498096(%edx,%eax,4)
// CHECK: fnstenv 485498096(%edx)
// CHECK: encoding: [0xd9,0xb2,0xf0,0x1c,0xf0,0x1c]
fnstenv 485498096(%edx)
// CHECK: fnstenv 485498096
// CHECK: encoding: [0xd9,0x35,0xf0,0x1c,0xf0,0x1c]
fnstenv 485498096
// CHECK: fnstenv 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x74,0x02,0x40]
fnstenv 64(%edx,%eax)
// CHECK: fnstenv (%edx)
// CHECK: encoding: [0xd9,0x32]
fnstenv (%edx)
// CHECK: fnstsw -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fnstsw -485498096(%edx,%eax,4)
// CHECK: fnstsw 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fnstsw 485498096(%edx,%eax,4)
// CHECK: fnstsw 485498096(%edx)
// CHECK: encoding: [0xdd,0xba,0xf0,0x1c,0xf0,0x1c]
fnstsw 485498096(%edx)
// CHECK: fnstsw 485498096
// CHECK: encoding: [0xdd,0x3d,0xf0,0x1c,0xf0,0x1c]
fnstsw 485498096
// CHECK: fnstsw 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x7c,0x02,0x40]
fnstsw 64(%edx,%eax)
// CHECK: fnstsw %ax
// CHECK: encoding: [0xdf,0xe0]
fnstsw %ax
// CHECK: fnstsw (%edx)
// CHECK: encoding: [0xdd,0x3a]
fnstsw (%edx)
// CHECK: fpatan
// CHECK: encoding: [0xd9,0xf3]
fpatan
// CHECK: fprem1
// CHECK: encoding: [0xd9,0xf5]
fprem1
// CHECK: fprem
// CHECK: encoding: [0xd9,0xf8]
fprem
// CHECK: fptan
// CHECK: encoding: [0xd9,0xf2]
fptan
// CHECK: frndint
// CHECK: encoding: [0xd9,0xfc]
frndint
// CHECK: frstor -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
frstor -485498096(%edx,%eax,4)
// CHECK: frstor 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
frstor 485498096(%edx,%eax,4)
// CHECK: frstor 485498096(%edx)
// CHECK: encoding: [0xdd,0xa2,0xf0,0x1c,0xf0,0x1c]
frstor 485498096(%edx)
// CHECK: frstor 485498096
// CHECK: encoding: [0xdd,0x25,0xf0,0x1c,0xf0,0x1c]
frstor 485498096
// CHECK: frstor 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x64,0x02,0x40]
frstor 64(%edx,%eax)
// CHECK: frstor (%edx)
// CHECK: encoding: [0xdd,0x22]
frstor (%edx)
// CHECK: fscale
// CHECK: encoding: [0xd9,0xfd]
fscale
// CHECK: fsincos
// CHECK: encoding: [0xd9,0xfb]
fsincos
// CHECK: fsin
// CHECK: encoding: [0xd9,0xfe]
fsin
// CHECK: fsqrt
// CHECK: encoding: [0xd9,0xfa]
fsqrt
// CHECK: fstl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fstl -485498096(%edx,%eax,4)
// CHECK: fstl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fstl 485498096(%edx,%eax,4)
// CHECK: fstl 485498096(%edx)
// CHECK: encoding: [0xdd,0x92,0xf0,0x1c,0xf0,0x1c]
fstl 485498096(%edx)
// CHECK: fstl 485498096
// CHECK: encoding: [0xdd,0x15,0xf0,0x1c,0xf0,0x1c]
fstl 485498096
// CHECK: fstl 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x54,0x02,0x40]
fstl 64(%edx,%eax)
// CHECK: fstl (%edx)
// CHECK: encoding: [0xdd,0x12]
fstl (%edx)
// CHECK: fstpl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fstpl -485498096(%edx,%eax,4)
// CHECK: fstpl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdd,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fstpl 485498096(%edx,%eax,4)
// CHECK: fstpl 485498096(%edx)
// CHECK: encoding: [0xdd,0x9a,0xf0,0x1c,0xf0,0x1c]
fstpl 485498096(%edx)
// CHECK: fstpl 485498096
// CHECK: encoding: [0xdd,0x1d,0xf0,0x1c,0xf0,0x1c]
fstpl 485498096
// CHECK: fstpl 64(%edx,%eax)
// CHECK: encoding: [0xdd,0x5c,0x02,0x40]
fstpl 64(%edx,%eax)
// CHECK: fstpl (%edx)
// CHECK: encoding: [0xdd,0x1a]
fstpl (%edx)
// CHECK: fstps -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x9c,0x82,0x10,0xe3,0x0f,0xe3]
fstps -485498096(%edx,%eax,4)
// CHECK: fstps 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x9c,0x82,0xf0,0x1c,0xf0,0x1c]
fstps 485498096(%edx,%eax,4)
// CHECK: fstps 485498096(%edx)
// CHECK: encoding: [0xd9,0x9a,0xf0,0x1c,0xf0,0x1c]
fstps 485498096(%edx)
// CHECK: fstps 485498096
// CHECK: encoding: [0xd9,0x1d,0xf0,0x1c,0xf0,0x1c]
fstps 485498096
// CHECK: fstps 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x5c,0x02,0x40]
fstps 64(%edx,%eax)
// CHECK: fstps (%edx)
// CHECK: encoding: [0xd9,0x1a]
fstps (%edx)
// CHECK: fstp %st(4)
// CHECK: encoding: [0xdd,0xdc]
fstp %st(4)
// CHECK: fstpt -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0xbc,0x82,0x10,0xe3,0x0f,0xe3]
fstpt -485498096(%edx,%eax,4)
// CHECK: fstpt 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdb,0xbc,0x82,0xf0,0x1c,0xf0,0x1c]
fstpt 485498096(%edx,%eax,4)
// CHECK: fstpt 485498096(%edx)
// CHECK: encoding: [0xdb,0xba,0xf0,0x1c,0xf0,0x1c]
fstpt 485498096(%edx)
// CHECK: fstpt 485498096
// CHECK: encoding: [0xdb,0x3d,0xf0,0x1c,0xf0,0x1c]
fstpt 485498096
// CHECK: fstpt 64(%edx,%eax)
// CHECK: encoding: [0xdb,0x7c,0x02,0x40]
fstpt 64(%edx,%eax)
// CHECK: fstpt (%edx)
// CHECK: encoding: [0xdb,0x3a]
fstpt (%edx)
// CHECK: fsts -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x94,0x82,0x10,0xe3,0x0f,0xe3]
fsts -485498096(%edx,%eax,4)
// CHECK: fsts 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd9,0x94,0x82,0xf0,0x1c,0xf0,0x1c]
fsts 485498096(%edx,%eax,4)
// CHECK: fsts 485498096(%edx)
// CHECK: encoding: [0xd9,0x92,0xf0,0x1c,0xf0,0x1c]
fsts 485498096(%edx)
// CHECK: fsts 485498096
// CHECK: encoding: [0xd9,0x15,0xf0,0x1c,0xf0,0x1c]
fsts 485498096
// CHECK: fsts 64(%edx,%eax)
// CHECK: encoding: [0xd9,0x54,0x02,0x40]
fsts 64(%edx,%eax)
// CHECK: fsts (%edx)
// CHECK: encoding: [0xd9,0x12]
fsts (%edx)
// CHECK: fst %st(4)
// CHECK: encoding: [0xdd,0xd4]
fst %st(4)
// CHECK: fsubl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fsubl -485498096(%edx,%eax,4)
// CHECK: fsubl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fsubl 485498096(%edx,%eax,4)
// CHECK: fsubl 485498096(%edx)
// CHECK: encoding: [0xdc,0xa2,0xf0,0x1c,0xf0,0x1c]
fsubl 485498096(%edx)
// CHECK: fsubl 485498096
// CHECK: encoding: [0xdc,0x25,0xf0,0x1c,0xf0,0x1c]
fsubl 485498096
// CHECK: fsubl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x64,0x02,0x40]
fsubl 64(%edx,%eax)
// CHECK: fsubl (%edx)
// CHECK: encoding: [0xdc,0x22]
fsubl (%edx)
-// CHECK: fsubp %st(4)
+// CHECK: fsubp %st, %st(4)
// CHECK: encoding: [0xde,0xe4]
fsubp %st(4)
// CHECK: fsubrl -485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fsubrl -485498096(%edx,%eax,4)
// CHECK: fsubrl 485498096(%edx,%eax,4)
// CHECK: encoding: [0xdc,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fsubrl 485498096(%edx,%eax,4)
// CHECK: fsubrl 485498096(%edx)
// CHECK: encoding: [0xdc,0xaa,0xf0,0x1c,0xf0,0x1c]
fsubrl 485498096(%edx)
// CHECK: fsubrl 485498096
// CHECK: encoding: [0xdc,0x2d,0xf0,0x1c,0xf0,0x1c]
fsubrl 485498096
// CHECK: fsubrl 64(%edx,%eax)
// CHECK: encoding: [0xdc,0x6c,0x02,0x40]
fsubrl 64(%edx,%eax)
// CHECK: fsubrl (%edx)
// CHECK: encoding: [0xdc,0x2a]
fsubrl (%edx)
-// CHECK: fsubrp %st(4)
+// CHECK: fsubrp %st, %st(4)
// CHECK: encoding: [0xde,0xec]
fsubrp %st(4)
// CHECK: fsubrs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xac,0x82,0x10,0xe3,0x0f,0xe3]
fsubrs -485498096(%edx,%eax,4)
// CHECK: fsubrs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xac,0x82,0xf0,0x1c,0xf0,0x1c]
fsubrs 485498096(%edx,%eax,4)
// CHECK: fsubrs 485498096(%edx)
// CHECK: encoding: [0xd8,0xaa,0xf0,0x1c,0xf0,0x1c]
fsubrs 485498096(%edx)
// CHECK: fsubrs 485498096
// CHECK: encoding: [0xd8,0x2d,0xf0,0x1c,0xf0,0x1c]
fsubrs 485498096
// CHECK: fsubrs 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x6c,0x02,0x40]
fsubrs 64(%edx,%eax)
// CHECK: fsubrs (%edx)
// CHECK: encoding: [0xd8,0x2a]
fsubrs (%edx)
-// CHECK: fsubr %st(0), %st(4)
+// CHECK: fsubr %st, %st(4)
// CHECK: encoding: [0xdc,0xec]
-fsubr %st(0), %st(4)
+fsubr %st, %st(4)
-// CHECK: fsubr %st(4)
+// CHECK: fsubr %st(4), %st
// CHECK: encoding: [0xd8,0xec]
fsubr %st(4)
// CHECK: fsubs -485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
fsubs -485498096(%edx,%eax,4)
// CHECK: fsubs 485498096(%edx,%eax,4)
// CHECK: encoding: [0xd8,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
fsubs 485498096(%edx,%eax,4)
// CHECK: fsubs 485498096(%edx)
// CHECK: encoding: [0xd8,0xa2,0xf0,0x1c,0xf0,0x1c]
fsubs 485498096(%edx)
// CHECK: fsubs 485498096
// CHECK: encoding: [0xd8,0x25,0xf0,0x1c,0xf0,0x1c]
fsubs 485498096
// CHECK: fsubs 64(%edx,%eax)
// CHECK: encoding: [0xd8,0x64,0x02,0x40]
fsubs 64(%edx,%eax)
// CHECK: fsubs (%edx)
// CHECK: encoding: [0xd8,0x22]
fsubs (%edx)
-// CHECK: fsub %st(0), %st(4)
+// CHECK: fsub %st, %st(4)
// CHECK: encoding: [0xdc,0xe4]
-fsub %st(0), %st(4)
+fsub %st, %st(4)
-// CHECK: fsub %st(4)
+// CHECK: fsub %st(4), %st
// CHECK: encoding: [0xd8,0xe4]
fsub %st(4)
// CHECK: ftst
// CHECK: encoding: [0xd9,0xe4]
ftst
// CHECK: fucompp
// CHECK: encoding: [0xda,0xe9]
fucompp
// CHECK: fucomp %st(4)
// CHECK: encoding: [0xdd,0xec]
fucomp %st(4)
// CHECK: fucom %st(4)
// CHECK: encoding: [0xdd,0xe4]
fucom %st(4)
// CHECK: fxam
// CHECK: encoding: [0xd9,0xe5]
fxam
// CHECK: fxch %st(4)
// CHECK: encoding: [0xd9,0xcc]
fxch %st(4)
// CHECK: fxtract
// CHECK: encoding: [0xd9,0xf4]
fxtract
// CHECK: fyl2x
// CHECK: encoding: [0xd9,0xf1]
fyl2x
// CHECK: fyl2xp1
// CHECK: encoding: [0xd9,0xf9]
fyl2xp1
// CHECK: wait
// CHECK: encoding: [0x9b]
wait
Index: vendor/llvm/dist-release_80/test/MC/X86/X87-64.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/X87-64.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/X87-64.s (revision 344166)
@@ -1,1618 +1,1618 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
// CHECK: f2xm1
// CHECK: encoding: [0xd9,0xf0]
f2xm1
// CHECK: fabs
// CHECK: encoding: [0xd9,0xe1]
fabs
// CHECK: faddl 485498096
// CHECK: encoding: [0xdc,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
faddl 485498096
// CHECK: faddl 64(%rdx)
// CHECK: encoding: [0xdc,0x42,0x40]
faddl 64(%rdx)
// CHECK: faddl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x44,0x82,0xc0]
faddl -64(%rdx,%rax,4)
// CHECK: faddl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x44,0x82,0x40]
faddl 64(%rdx,%rax,4)
// CHECK: faddl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x44,0x02,0x40]
faddl 64(%rdx,%rax)
// CHECK: faddl (%rdx)
// CHECK: encoding: [0xdc,0x02]
faddl (%rdx)
-// CHECK: faddp %st(4)
+// CHECK: faddp %st, %st(4)
// CHECK: encoding: [0xde,0xc4]
faddp %st(4)
// CHECK: fadds 485498096
// CHECK: encoding: [0xd8,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
fadds 485498096
// CHECK: fadds 64(%rdx)
// CHECK: encoding: [0xd8,0x42,0x40]
fadds 64(%rdx)
// CHECK: fadds -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x44,0x82,0xc0]
fadds -64(%rdx,%rax,4)
// CHECK: fadds 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x44,0x82,0x40]
fadds 64(%rdx,%rax,4)
// CHECK: fadds 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x44,0x02,0x40]
fadds 64(%rdx,%rax)
// CHECK: fadds (%rdx)
// CHECK: encoding: [0xd8,0x02]
fadds (%rdx)
-// CHECK: fadd %st(0), %st(4)
+// CHECK: fadd %st, %st(4)
// CHECK: encoding: [0xdc,0xc4]
-fadd %st(0), %st(4)
+fadd %st, %st(4)
-// CHECK: fadd %st(4)
+// CHECK: fadd %st(4), %st
// CHECK: encoding: [0xd8,0xc4]
fadd %st(4)
// CHECK: fbld 485498096
// CHECK: encoding: [0xdf,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fbld 485498096
// CHECK: fbld 64(%rdx)
// CHECK: encoding: [0xdf,0x62,0x40]
fbld 64(%rdx)
// CHECK: fbld -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x64,0x82,0xc0]
fbld -64(%rdx,%rax,4)
// CHECK: fbld 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x64,0x82,0x40]
fbld 64(%rdx,%rax,4)
// CHECK: fbld 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x64,0x02,0x40]
fbld 64(%rdx,%rax)
// CHECK: fbld (%rdx)
// CHECK: encoding: [0xdf,0x22]
fbld (%rdx)
// CHECK: fbstp 485498096
// CHECK: encoding: [0xdf,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fbstp 485498096
// CHECK: fbstp 64(%rdx)
// CHECK: encoding: [0xdf,0x72,0x40]
fbstp 64(%rdx)
// CHECK: fbstp -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x74,0x82,0xc0]
fbstp -64(%rdx,%rax,4)
// CHECK: fbstp 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x74,0x82,0x40]
fbstp 64(%rdx,%rax,4)
// CHECK: fbstp 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x74,0x02,0x40]
fbstp 64(%rdx,%rax)
// CHECK: fbstp (%rdx)
// CHECK: encoding: [0xdf,0x32]
fbstp (%rdx)
// CHECK: fchs
// CHECK: encoding: [0xd9,0xe0]
fchs
// CHECK: fcoml 485498096
// CHECK: encoding: [0xdc,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fcoml 485498096
// CHECK: fcoml 64(%rdx)
// CHECK: encoding: [0xdc,0x52,0x40]
fcoml 64(%rdx)
// CHECK: fcoml -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x54,0x82,0xc0]
fcoml -64(%rdx,%rax,4)
// CHECK: fcoml 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x54,0x82,0x40]
fcoml 64(%rdx,%rax,4)
// CHECK: fcoml 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x54,0x02,0x40]
fcoml 64(%rdx,%rax)
// CHECK: fcoml (%rdx)
// CHECK: encoding: [0xdc,0x12]
fcoml (%rdx)
// CHECK: fcompl 485498096
// CHECK: encoding: [0xdc,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fcompl 485498096
// CHECK: fcompl 64(%rdx)
// CHECK: encoding: [0xdc,0x5a,0x40]
fcompl 64(%rdx)
// CHECK: fcompl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x5c,0x82,0xc0]
fcompl -64(%rdx,%rax,4)
// CHECK: fcompl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x5c,0x82,0x40]
fcompl 64(%rdx,%rax,4)
// CHECK: fcompl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x5c,0x02,0x40]
fcompl 64(%rdx,%rax)
// CHECK: fcompl (%rdx)
// CHECK: encoding: [0xdc,0x1a]
fcompl (%rdx)
// CHECK: fcompp
// CHECK: encoding: [0xde,0xd9]
fcompp
// CHECK: fcomps 485498096
// CHECK: encoding: [0xd8,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fcomps 485498096
// CHECK: fcomps 64(%rdx)
// CHECK: encoding: [0xd8,0x5a,0x40]
fcomps 64(%rdx)
// CHECK: fcomps -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x5c,0x82,0xc0]
fcomps -64(%rdx,%rax,4)
// CHECK: fcomps 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x5c,0x82,0x40]
fcomps 64(%rdx,%rax,4)
// CHECK: fcomps 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x5c,0x02,0x40]
fcomps 64(%rdx,%rax)
// CHECK: fcomps (%rdx)
// CHECK: encoding: [0xd8,0x1a]
fcomps (%rdx)
// CHECK: fcomp %st(4)
// CHECK: encoding: [0xd8,0xdc]
fcomp %st(4)
// CHECK: fcoms 485498096
// CHECK: encoding: [0xd8,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fcoms 485498096
// CHECK: fcoms 64(%rdx)
// CHECK: encoding: [0xd8,0x52,0x40]
fcoms 64(%rdx)
// CHECK: fcoms -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x54,0x82,0xc0]
fcoms -64(%rdx,%rax,4)
// CHECK: fcoms 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x54,0x82,0x40]
fcoms 64(%rdx,%rax,4)
// CHECK: fcoms 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x54,0x02,0x40]
fcoms 64(%rdx,%rax)
// CHECK: fcoms (%rdx)
// CHECK: encoding: [0xd8,0x12]
fcoms (%rdx)
// CHECK: fcom %st(4)
// CHECK: encoding: [0xd8,0xd4]
fcom %st(4)
// CHECK: fcos
// CHECK: encoding: [0xd9,0xff]
fcos
// CHECK: fdecstp
// CHECK: encoding: [0xd9,0xf6]
fdecstp
// CHECK: fdivl 485498096
// CHECK: encoding: [0xdc,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fdivl 485498096
// CHECK: fdivl 64(%rdx)
// CHECK: encoding: [0xdc,0x72,0x40]
fdivl 64(%rdx)
// CHECK: fdivl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x74,0x82,0xc0]
fdivl -64(%rdx,%rax,4)
// CHECK: fdivl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x74,0x82,0x40]
fdivl 64(%rdx,%rax,4)
// CHECK: fdivl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x74,0x02,0x40]
fdivl 64(%rdx,%rax)
// CHECK: fdivl (%rdx)
// CHECK: encoding: [0xdc,0x32]
fdivl (%rdx)
-// CHECK: fdivp %st(4)
+// CHECK: fdivp %st, %st(4)
// CHECK: encoding: [0xde,0xf4]
fdivp %st(4)
// CHECK: fdivrl 485498096
// CHECK: encoding: [0xdc,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fdivrl 485498096
// CHECK: fdivrl 64(%rdx)
// CHECK: encoding: [0xdc,0x7a,0x40]
fdivrl 64(%rdx)
// CHECK: fdivrl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x7c,0x82,0xc0]
fdivrl -64(%rdx,%rax,4)
// CHECK: fdivrl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x7c,0x82,0x40]
fdivrl 64(%rdx,%rax,4)
// CHECK: fdivrl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x7c,0x02,0x40]
fdivrl 64(%rdx,%rax)
// CHECK: fdivrl (%rdx)
// CHECK: encoding: [0xdc,0x3a]
fdivrl (%rdx)
-// CHECK: fdivrp %st(4)
+// CHECK: fdivrp %st, %st(4)
// CHECK: encoding: [0xde,0xfc]
fdivrp %st(4)
// CHECK: fdivrs 485498096
// CHECK: encoding: [0xd8,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fdivrs 485498096
// CHECK: fdivrs 64(%rdx)
// CHECK: encoding: [0xd8,0x7a,0x40]
fdivrs 64(%rdx)
// CHECK: fdivrs -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x7c,0x82,0xc0]
fdivrs -64(%rdx,%rax,4)
// CHECK: fdivrs 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x7c,0x82,0x40]
fdivrs 64(%rdx,%rax,4)
// CHECK: fdivrs 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x7c,0x02,0x40]
fdivrs 64(%rdx,%rax)
// CHECK: fdivrs (%rdx)
// CHECK: encoding: [0xd8,0x3a]
fdivrs (%rdx)
-// CHECK: fdivr %st(0), %st(4)
+// CHECK: fdivr %st, %st(4)
// CHECK: encoding: [0xdc,0xfc]
-fdivr %st(0), %st(4)
+fdivr %st, %st(4)
-// CHECK: fdivr %st(4)
+// CHECK: fdivr %st(4), %st
// CHECK: encoding: [0xd8,0xfc]
fdivr %st(4)
// CHECK: fdivs 485498096
// CHECK: encoding: [0xd8,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fdivs 485498096
// CHECK: fdivs 64(%rdx)
// CHECK: encoding: [0xd8,0x72,0x40]
fdivs 64(%rdx)
// CHECK: fdivs -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x74,0x82,0xc0]
fdivs -64(%rdx,%rax,4)
// CHECK: fdivs 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x74,0x82,0x40]
fdivs 64(%rdx,%rax,4)
// CHECK: fdivs 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x74,0x02,0x40]
fdivs 64(%rdx,%rax)
// CHECK: fdivs (%rdx)
// CHECK: encoding: [0xd8,0x32]
fdivs (%rdx)
-// CHECK: fdiv %st(0), %st(4)
+// CHECK: fdiv %st, %st(4)
// CHECK: encoding: [0xdc,0xf4]
-fdiv %st(0), %st(4)
+fdiv %st, %st(4)
-// CHECK: fdiv %st(4)
+// CHECK: fdiv %st(4), %st
// CHECK: encoding: [0xd8,0xf4]
fdiv %st(4)
// CHECK: ffreep %st(4)
// CHECK: encoding: [0xdf,0xc4]
ffreep %st(4)
// CHECK: ffree %st(4)
// CHECK: encoding: [0xdd,0xc4]
ffree %st(4)
// CHECK: fiaddl 485498096
// CHECK: encoding: [0xda,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
fiaddl 485498096
// CHECK: fiaddl 64(%rdx)
// CHECK: encoding: [0xda,0x42,0x40]
fiaddl 64(%rdx)
// CHECK: fiaddl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x44,0x82,0xc0]
fiaddl -64(%rdx,%rax,4)
// CHECK: fiaddl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x44,0x82,0x40]
fiaddl 64(%rdx,%rax,4)
// CHECK: fiaddl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x44,0x02,0x40]
fiaddl 64(%rdx,%rax)
// CHECK: fiaddl (%rdx)
// CHECK: encoding: [0xda,0x02]
fiaddl (%rdx)
// CHECK: fiadds 485498096
// CHECK: encoding: [0xde,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
fiadds 485498096
// CHECK: fiadds 64(%rdx)
// CHECK: encoding: [0xde,0x42,0x40]
fiadds 64(%rdx)
// CHECK: fiadds -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x44,0x82,0xc0]
fiadds -64(%rdx,%rax,4)
// CHECK: fiadds 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x44,0x82,0x40]
fiadds 64(%rdx,%rax,4)
// CHECK: fiadds 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x44,0x02,0x40]
fiadds 64(%rdx,%rax)
// CHECK: fiadds (%rdx)
// CHECK: encoding: [0xde,0x02]
fiadds (%rdx)
// CHECK: ficoml 485498096
// CHECK: encoding: [0xda,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
ficoml 485498096
// CHECK: ficoml 64(%rdx)
// CHECK: encoding: [0xda,0x52,0x40]
ficoml 64(%rdx)
// CHECK: ficoml -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x54,0x82,0xc0]
ficoml -64(%rdx,%rax,4)
// CHECK: ficoml 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x54,0x82,0x40]
ficoml 64(%rdx,%rax,4)
// CHECK: ficoml 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x54,0x02,0x40]
ficoml 64(%rdx,%rax)
// CHECK: ficoml (%rdx)
// CHECK: encoding: [0xda,0x12]
ficoml (%rdx)
// CHECK: ficompl 485498096
// CHECK: encoding: [0xda,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
ficompl 485498096
// CHECK: ficompl 64(%rdx)
// CHECK: encoding: [0xda,0x5a,0x40]
ficompl 64(%rdx)
// CHECK: ficompl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x5c,0x82,0xc0]
ficompl -64(%rdx,%rax,4)
// CHECK: ficompl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x5c,0x82,0x40]
ficompl 64(%rdx,%rax,4)
// CHECK: ficompl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x5c,0x02,0x40]
ficompl 64(%rdx,%rax)
// CHECK: ficompl (%rdx)
// CHECK: encoding: [0xda,0x1a]
ficompl (%rdx)
// CHECK: ficomps 485498096
// CHECK: encoding: [0xde,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
ficomps 485498096
// CHECK: ficomps 64(%rdx)
// CHECK: encoding: [0xde,0x5a,0x40]
ficomps 64(%rdx)
// CHECK: ficomps -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x5c,0x82,0xc0]
ficomps -64(%rdx,%rax,4)
// CHECK: ficomps 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x5c,0x82,0x40]
ficomps 64(%rdx,%rax,4)
// CHECK: ficomps 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x5c,0x02,0x40]
ficomps 64(%rdx,%rax)
// CHECK: ficomps (%rdx)
// CHECK: encoding: [0xde,0x1a]
ficomps (%rdx)
// CHECK: ficoms 485498096
// CHECK: encoding: [0xde,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
ficoms 485498096
// CHECK: ficoms 64(%rdx)
// CHECK: encoding: [0xde,0x52,0x40]
ficoms 64(%rdx)
// CHECK: ficoms -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x54,0x82,0xc0]
ficoms -64(%rdx,%rax,4)
// CHECK: ficoms 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x54,0x82,0x40]
ficoms 64(%rdx,%rax,4)
// CHECK: ficoms 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x54,0x02,0x40]
ficoms 64(%rdx,%rax)
// CHECK: ficoms (%rdx)
// CHECK: encoding: [0xde,0x12]
ficoms (%rdx)
// CHECK: fidivl 485498096
// CHECK: encoding: [0xda,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fidivl 485498096
// CHECK: fidivl 64(%rdx)
// CHECK: encoding: [0xda,0x72,0x40]
fidivl 64(%rdx)
// CHECK: fidivl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x74,0x82,0xc0]
fidivl -64(%rdx,%rax,4)
// CHECK: fidivl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x74,0x82,0x40]
fidivl 64(%rdx,%rax,4)
// CHECK: fidivl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x74,0x02,0x40]
fidivl 64(%rdx,%rax)
// CHECK: fidivl (%rdx)
// CHECK: encoding: [0xda,0x32]
fidivl (%rdx)
// CHECK: fidivrl 485498096
// CHECK: encoding: [0xda,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fidivrl 485498096
// CHECK: fidivrl 64(%rdx)
// CHECK: encoding: [0xda,0x7a,0x40]
fidivrl 64(%rdx)
// CHECK: fidivrl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x7c,0x82,0xc0]
fidivrl -64(%rdx,%rax,4)
// CHECK: fidivrl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x7c,0x82,0x40]
fidivrl 64(%rdx,%rax,4)
// CHECK: fidivrl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x7c,0x02,0x40]
fidivrl 64(%rdx,%rax)
// CHECK: fidivrl (%rdx)
// CHECK: encoding: [0xda,0x3a]
fidivrl (%rdx)
// CHECK: fidivrs 485498096
// CHECK: encoding: [0xde,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fidivrs 485498096
// CHECK: fidivrs 64(%rdx)
// CHECK: encoding: [0xde,0x7a,0x40]
fidivrs 64(%rdx)
// CHECK: fidivrs -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x7c,0x82,0xc0]
fidivrs -64(%rdx,%rax,4)
// CHECK: fidivrs 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x7c,0x82,0x40]
fidivrs 64(%rdx,%rax,4)
// CHECK: fidivrs 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x7c,0x02,0x40]
fidivrs 64(%rdx,%rax)
// CHECK: fidivrs (%rdx)
// CHECK: encoding: [0xde,0x3a]
fidivrs (%rdx)
// CHECK: fidivs 485498096
// CHECK: encoding: [0xde,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fidivs 485498096
// CHECK: fidivs 64(%rdx)
// CHECK: encoding: [0xde,0x72,0x40]
fidivs 64(%rdx)
// CHECK: fidivs -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x74,0x82,0xc0]
fidivs -64(%rdx,%rax,4)
// CHECK: fidivs 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x74,0x82,0x40]
fidivs 64(%rdx,%rax,4)
// CHECK: fidivs 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x74,0x02,0x40]
fidivs 64(%rdx,%rax)
// CHECK: fidivs (%rdx)
// CHECK: encoding: [0xde,0x32]
fidivs (%rdx)
// CHECK: fildl 485498096
// CHECK: encoding: [0xdb,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
fildl 485498096
// CHECK: fildl 64(%rdx)
// CHECK: encoding: [0xdb,0x42,0x40]
fildl 64(%rdx)
// CHECK: fildl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x44,0x82,0xc0]
fildl -64(%rdx,%rax,4)
// CHECK: fildl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x44,0x82,0x40]
fildl 64(%rdx,%rax,4)
// CHECK: fildl 64(%rdx,%rax)
// CHECK: encoding: [0xdb,0x44,0x02,0x40]
fildl 64(%rdx,%rax)
// CHECK: fildll 485498096
// CHECK: encoding: [0xdf,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fildll 485498096
// CHECK: fildll 64(%rdx)
// CHECK: encoding: [0xdf,0x6a,0x40]
fildll 64(%rdx)
// CHECK: fildll -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x6c,0x82,0xc0]
fildll -64(%rdx,%rax,4)
// CHECK: fildll 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x6c,0x82,0x40]
fildll 64(%rdx,%rax,4)
// CHECK: fildll 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x6c,0x02,0x40]
fildll 64(%rdx,%rax)
// CHECK: fildll (%rdx)
// CHECK: encoding: [0xdf,0x2a]
fildll (%rdx)
// CHECK: fildl (%rdx)
// CHECK: encoding: [0xdb,0x02]
fildl (%rdx)
// CHECK: filds 485498096
// CHECK: encoding: [0xdf,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
filds 485498096
// CHECK: filds 64(%rdx)
// CHECK: encoding: [0xdf,0x42,0x40]
filds 64(%rdx)
// CHECK: filds -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x44,0x82,0xc0]
filds -64(%rdx,%rax,4)
// CHECK: filds 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x44,0x82,0x40]
filds 64(%rdx,%rax,4)
// CHECK: filds 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x44,0x02,0x40]
filds 64(%rdx,%rax)
// CHECK: filds (%rdx)
// CHECK: encoding: [0xdf,0x02]
filds (%rdx)
// CHECK: fimull 485498096
// CHECK: encoding: [0xda,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
fimull 485498096
// CHECK: fimull 64(%rdx)
// CHECK: encoding: [0xda,0x4a,0x40]
fimull 64(%rdx)
// CHECK: fimull -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x4c,0x82,0xc0]
fimull -64(%rdx,%rax,4)
// CHECK: fimull 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x4c,0x82,0x40]
fimull 64(%rdx,%rax,4)
// CHECK: fimull 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x4c,0x02,0x40]
fimull 64(%rdx,%rax)
// CHECK: fimull (%rdx)
// CHECK: encoding: [0xda,0x0a]
fimull (%rdx)
// CHECK: fimuls 485498096
// CHECK: encoding: [0xde,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
fimuls 485498096
// CHECK: fimuls 64(%rdx)
// CHECK: encoding: [0xde,0x4a,0x40]
fimuls 64(%rdx)
// CHECK: fimuls -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x4c,0x82,0xc0]
fimuls -64(%rdx,%rax,4)
// CHECK: fimuls 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x4c,0x82,0x40]
fimuls 64(%rdx,%rax,4)
// CHECK: fimuls 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x4c,0x02,0x40]
fimuls 64(%rdx,%rax)
// CHECK: fimuls (%rdx)
// CHECK: encoding: [0xde,0x0a]
fimuls (%rdx)
// CHECK: fincstp
// CHECK: encoding: [0xd9,0xf7]
fincstp
// CHECK: fistl 485498096
// CHECK: encoding: [0xdb,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fistl 485498096
// CHECK: fistl 64(%rdx)
// CHECK: encoding: [0xdb,0x52,0x40]
fistl 64(%rdx)
// CHECK: fistl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x54,0x82,0xc0]
fistl -64(%rdx,%rax,4)
// CHECK: fistl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x54,0x82,0x40]
fistl 64(%rdx,%rax,4)
// CHECK: fistl 64(%rdx,%rax)
// CHECK: encoding: [0xdb,0x54,0x02,0x40]
fistl 64(%rdx,%rax)
// CHECK: fistl (%rdx)
// CHECK: encoding: [0xdb,0x12]
fistl (%rdx)
// CHECK: fistpl 485498096
// CHECK: encoding: [0xdb,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fistpl 485498096
// CHECK: fistpl 64(%rdx)
// CHECK: encoding: [0xdb,0x5a,0x40]
fistpl 64(%rdx)
// CHECK: fistpl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x5c,0x82,0xc0]
fistpl -64(%rdx,%rax,4)
// CHECK: fistpl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x5c,0x82,0x40]
fistpl 64(%rdx,%rax,4)
// CHECK: fistpl 64(%rdx,%rax)
// CHECK: encoding: [0xdb,0x5c,0x02,0x40]
fistpl 64(%rdx,%rax)
// CHECK: fistpll 485498096
// CHECK: encoding: [0xdf,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fistpll 485498096
// CHECK: fistpll 64(%rdx)
// CHECK: encoding: [0xdf,0x7a,0x40]
fistpll 64(%rdx)
// CHECK: fistpll -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x7c,0x82,0xc0]
fistpll -64(%rdx,%rax,4)
// CHECK: fistpll 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x7c,0x82,0x40]
fistpll 64(%rdx,%rax,4)
// CHECK: fistpll 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x7c,0x02,0x40]
fistpll 64(%rdx,%rax)
// CHECK: fistpll (%rdx)
// CHECK: encoding: [0xdf,0x3a]
fistpll (%rdx)
// CHECK: fistpl (%rdx)
// CHECK: encoding: [0xdb,0x1a]
fistpl (%rdx)
// CHECK: fistps 485498096
// CHECK: encoding: [0xdf,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fistps 485498096
// CHECK: fistps 64(%rdx)
// CHECK: encoding: [0xdf,0x5a,0x40]
fistps 64(%rdx)
// CHECK: fistps -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x5c,0x82,0xc0]
fistps -64(%rdx,%rax,4)
// CHECK: fistps 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x5c,0x82,0x40]
fistps 64(%rdx,%rax,4)
// CHECK: fistps 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x5c,0x02,0x40]
fistps 64(%rdx,%rax)
// CHECK: fistps (%rdx)
// CHECK: encoding: [0xdf,0x1a]
fistps (%rdx)
// CHECK: fists 485498096
// CHECK: encoding: [0xdf,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fists 485498096
// CHECK: fists 64(%rdx)
// CHECK: encoding: [0xdf,0x52,0x40]
fists 64(%rdx)
// CHECK: fists -64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x54,0x82,0xc0]
fists -64(%rdx,%rax,4)
// CHECK: fists 64(%rdx,%rax,4)
// CHECK: encoding: [0xdf,0x54,0x82,0x40]
fists 64(%rdx,%rax,4)
// CHECK: fists 64(%rdx,%rax)
// CHECK: encoding: [0xdf,0x54,0x02,0x40]
fists 64(%rdx,%rax)
// CHECK: fists (%rdx)
// CHECK: encoding: [0xdf,0x12]
fists (%rdx)
// CHECK: fisubl 485498096
// CHECK: encoding: [0xda,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fisubl 485498096
// CHECK: fisubl 64(%rdx)
// CHECK: encoding: [0xda,0x62,0x40]
fisubl 64(%rdx)
// CHECK: fisubl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x64,0x82,0xc0]
fisubl -64(%rdx,%rax,4)
// CHECK: fisubl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x64,0x82,0x40]
fisubl 64(%rdx,%rax,4)
// CHECK: fisubl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x64,0x02,0x40]
fisubl 64(%rdx,%rax)
// CHECK: fisubl (%rdx)
// CHECK: encoding: [0xda,0x22]
fisubl (%rdx)
// CHECK: fisubrl 485498096
// CHECK: encoding: [0xda,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fisubrl 485498096
// CHECK: fisubrl 64(%rdx)
// CHECK: encoding: [0xda,0x6a,0x40]
fisubrl 64(%rdx)
// CHECK: fisubrl -64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x6c,0x82,0xc0]
fisubrl -64(%rdx,%rax,4)
// CHECK: fisubrl 64(%rdx,%rax,4)
// CHECK: encoding: [0xda,0x6c,0x82,0x40]
fisubrl 64(%rdx,%rax,4)
// CHECK: fisubrl 64(%rdx,%rax)
// CHECK: encoding: [0xda,0x6c,0x02,0x40]
fisubrl 64(%rdx,%rax)
// CHECK: fisubrl (%rdx)
// CHECK: encoding: [0xda,0x2a]
fisubrl (%rdx)
// CHECK: fisubrs 485498096
// CHECK: encoding: [0xde,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fisubrs 485498096
// CHECK: fisubrs 64(%rdx)
// CHECK: encoding: [0xde,0x6a,0x40]
fisubrs 64(%rdx)
// CHECK: fisubrs -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x6c,0x82,0xc0]
fisubrs -64(%rdx,%rax,4)
// CHECK: fisubrs 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x6c,0x82,0x40]
fisubrs 64(%rdx,%rax,4)
// CHECK: fisubrs 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x6c,0x02,0x40]
fisubrs 64(%rdx,%rax)
// CHECK: fisubrs (%rdx)
// CHECK: encoding: [0xde,0x2a]
fisubrs (%rdx)
// CHECK: fisubs 485498096
// CHECK: encoding: [0xde,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fisubs 485498096
// CHECK: fisubs 64(%rdx)
// CHECK: encoding: [0xde,0x62,0x40]
fisubs 64(%rdx)
// CHECK: fisubs -64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x64,0x82,0xc0]
fisubs -64(%rdx,%rax,4)
// CHECK: fisubs 64(%rdx,%rax,4)
// CHECK: encoding: [0xde,0x64,0x82,0x40]
fisubs 64(%rdx,%rax,4)
// CHECK: fisubs 64(%rdx,%rax)
// CHECK: encoding: [0xde,0x64,0x02,0x40]
fisubs 64(%rdx,%rax)
// CHECK: fisubs (%rdx)
// CHECK: encoding: [0xde,0x22]
fisubs (%rdx)
// CHECK: fld1
// CHECK: encoding: [0xd9,0xe8]
fld1
// CHECK: fldcw 485498096
// CHECK: encoding: [0xd9,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fldcw 485498096
// CHECK: fldcw 64(%rdx)
// CHECK: encoding: [0xd9,0x6a,0x40]
fldcw 64(%rdx)
// CHECK: fldcw -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x6c,0x82,0xc0]
fldcw -64(%rdx,%rax,4)
// CHECK: fldcw 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x6c,0x82,0x40]
fldcw 64(%rdx,%rax,4)
// CHECK: fldcw 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x6c,0x02,0x40]
fldcw 64(%rdx,%rax)
// CHECK: fldcw (%rdx)
// CHECK: encoding: [0xd9,0x2a]
fldcw (%rdx)
// CHECK: fldenv 485498096
// CHECK: encoding: [0xd9,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fldenv 485498096
// CHECK: fldenv 64(%rdx)
// CHECK: encoding: [0xd9,0x62,0x40]
fldenv 64(%rdx)
// CHECK: fldenv -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x64,0x82,0xc0]
fldenv -64(%rdx,%rax,4)
// CHECK: fldenv 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x64,0x82,0x40]
fldenv 64(%rdx,%rax,4)
// CHECK: fldenv 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x64,0x02,0x40]
fldenv 64(%rdx,%rax)
// CHECK: fldenv (%rdx)
// CHECK: encoding: [0xd9,0x22]
fldenv (%rdx)
// CHECK: fldl2e
// CHECK: encoding: [0xd9,0xea]
fldl2e
// CHECK: fldl2t
// CHECK: encoding: [0xd9,0xe9]
fldl2t
// CHECK: fldl 485498096
// CHECK: encoding: [0xdd,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
fldl 485498096
// CHECK: fldl 64(%rdx)
// CHECK: encoding: [0xdd,0x42,0x40]
fldl 64(%rdx)
// CHECK: fldl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x44,0x82,0xc0]
fldl -64(%rdx,%rax,4)
// CHECK: fldl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x44,0x82,0x40]
fldl 64(%rdx,%rax,4)
// CHECK: fldl 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x44,0x02,0x40]
fldl 64(%rdx,%rax)
// CHECK: fldlg2
// CHECK: encoding: [0xd9,0xec]
fldlg2
// CHECK: fldln2
// CHECK: encoding: [0xd9,0xed]
fldln2
// CHECK: fldl (%rdx)
// CHECK: encoding: [0xdd,0x02]
fldl (%rdx)
// CHECK: fldpi
// CHECK: encoding: [0xd9,0xeb]
fldpi
// CHECK: flds 485498096
// CHECK: encoding: [0xd9,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
flds 485498096
// CHECK: flds 64(%rdx)
// CHECK: encoding: [0xd9,0x42,0x40]
flds 64(%rdx)
// CHECK: flds -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x44,0x82,0xc0]
flds -64(%rdx,%rax,4)
// CHECK: flds 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x44,0x82,0x40]
flds 64(%rdx,%rax,4)
// CHECK: flds 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x44,0x02,0x40]
flds 64(%rdx,%rax)
// CHECK: flds (%rdx)
// CHECK: encoding: [0xd9,0x02]
flds (%rdx)
// CHECK: fld %st(4)
// CHECK: encoding: [0xd9,0xc4]
fld %st(4)
// CHECK: fldt 485498096
// CHECK: encoding: [0xdb,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fldt 485498096
// CHECK: fldt 64(%rdx)
// CHECK: encoding: [0xdb,0x6a,0x40]
fldt 64(%rdx)
// CHECK: fldt -64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x6c,0x82,0xc0]
fldt -64(%rdx,%rax,4)
// CHECK: fldt 64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x6c,0x82,0x40]
fldt 64(%rdx,%rax,4)
// CHECK: fldt 64(%rdx,%rax)
// CHECK: encoding: [0xdb,0x6c,0x02,0x40]
fldt 64(%rdx,%rax)
// CHECK: fldt (%rdx)
// CHECK: encoding: [0xdb,0x2a]
fldt (%rdx)
// CHECK: fldz
// CHECK: encoding: [0xd9,0xee]
fldz
// CHECK: fmull 485498096
// CHECK: encoding: [0xdc,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
fmull 485498096
// CHECK: fmull 64(%rdx)
// CHECK: encoding: [0xdc,0x4a,0x40]
fmull 64(%rdx)
// CHECK: fmull -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x4c,0x82,0xc0]
fmull -64(%rdx,%rax,4)
// CHECK: fmull 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x4c,0x82,0x40]
fmull 64(%rdx,%rax,4)
// CHECK: fmull 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x4c,0x02,0x40]
fmull 64(%rdx,%rax)
// CHECK: fmull (%rdx)
// CHECK: encoding: [0xdc,0x0a]
fmull (%rdx)
-// CHECK: fmulp %st(4)
+// CHECK: fmulp %st, %st(4)
// CHECK: encoding: [0xde,0xcc]
fmulp %st(4)
// CHECK: fmuls 485498096
// CHECK: encoding: [0xd8,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
fmuls 485498096
// CHECK: fmuls 64(%rdx)
// CHECK: encoding: [0xd8,0x4a,0x40]
fmuls 64(%rdx)
// CHECK: fmuls -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x4c,0x82,0xc0]
fmuls -64(%rdx,%rax,4)
// CHECK: fmuls 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x4c,0x82,0x40]
fmuls 64(%rdx,%rax,4)
// CHECK: fmuls 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x4c,0x02,0x40]
fmuls 64(%rdx,%rax)
// CHECK: fmuls (%rdx)
// CHECK: encoding: [0xd8,0x0a]
fmuls (%rdx)
-// CHECK: fmul %st(0), %st(4)
+// CHECK: fmul %st, %st(4)
// CHECK: encoding: [0xdc,0xcc]
-fmul %st(0), %st(4)
+fmul %st, %st(4)
// CHECK: fmul %st(4)
// CHECK: encoding: [0xd8,0xcc]
fmul %st(4)
// CHECK: fnclex
// CHECK: encoding: [0xdb,0xe2]
fnclex
// CHECK: fninit
// CHECK: encoding: [0xdb,0xe3]
fninit
// CHECK: fnop
// CHECK: encoding: [0xd9,0xd0]
fnop
// CHECK: fnsave 485498096
// CHECK: encoding: [0xdd,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fnsave 485498096
// CHECK: fnsave 64(%rdx)
// CHECK: encoding: [0xdd,0x72,0x40]
fnsave 64(%rdx)
// CHECK: fnsave -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x74,0x82,0xc0]
fnsave -64(%rdx,%rax,4)
// CHECK: fnsave 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x74,0x82,0x40]
fnsave 64(%rdx,%rax,4)
// CHECK: fnsave 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x74,0x02,0x40]
fnsave 64(%rdx,%rax)
// CHECK: fnsave (%rdx)
// CHECK: encoding: [0xdd,0x32]
fnsave (%rdx)
// CHECK: fnstcw 485498096
// CHECK: encoding: [0xd9,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fnstcw 485498096
// CHECK: fnstcw 64(%rdx)
// CHECK: encoding: [0xd9,0x7a,0x40]
fnstcw 64(%rdx)
// CHECK: fnstcw -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x7c,0x82,0xc0]
fnstcw -64(%rdx,%rax,4)
// CHECK: fnstcw 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x7c,0x82,0x40]
fnstcw 64(%rdx,%rax,4)
// CHECK: fnstcw 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x7c,0x02,0x40]
fnstcw 64(%rdx,%rax)
// CHECK: fnstcw (%rdx)
// CHECK: encoding: [0xd9,0x3a]
fnstcw (%rdx)
// CHECK: fnstenv 485498096
// CHECK: encoding: [0xd9,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
fnstenv 485498096
// CHECK: fnstenv 64(%rdx)
// CHECK: encoding: [0xd9,0x72,0x40]
fnstenv 64(%rdx)
// CHECK: fnstenv -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x74,0x82,0xc0]
fnstenv -64(%rdx,%rax,4)
// CHECK: fnstenv 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x74,0x82,0x40]
fnstenv 64(%rdx,%rax,4)
// CHECK: fnstenv 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x74,0x02,0x40]
fnstenv 64(%rdx,%rax)
// CHECK: fnstenv (%rdx)
// CHECK: encoding: [0xd9,0x32]
fnstenv (%rdx)
// CHECK: fnstsw 485498096
// CHECK: encoding: [0xdd,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fnstsw 485498096
// CHECK: fnstsw 64(%rdx)
// CHECK: encoding: [0xdd,0x7a,0x40]
fnstsw 64(%rdx)
// CHECK: fnstsw -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x7c,0x82,0xc0]
fnstsw -64(%rdx,%rax,4)
// CHECK: fnstsw 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x7c,0x82,0x40]
fnstsw 64(%rdx,%rax,4)
// CHECK: fnstsw 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x7c,0x02,0x40]
fnstsw 64(%rdx,%rax)
// CHECK: fnstsw %ax
// CHECK: encoding: [0xdf,0xe0]
fnstsw %ax
// CHECK: fnstsw (%rdx)
// CHECK: encoding: [0xdd,0x3a]
fnstsw (%rdx)
// CHECK: fpatan
// CHECK: encoding: [0xd9,0xf3]
fpatan
// CHECK: fprem1
// CHECK: encoding: [0xd9,0xf5]
fprem1
// CHECK: fprem
// CHECK: encoding: [0xd9,0xf8]
fprem
// CHECK: fptan
// CHECK: encoding: [0xd9,0xf2]
fptan
// CHECK: frndint
// CHECK: encoding: [0xd9,0xfc]
frndint
// CHECK: frstor 485498096
// CHECK: encoding: [0xdd,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
frstor 485498096
// CHECK: frstor 64(%rdx)
// CHECK: encoding: [0xdd,0x62,0x40]
frstor 64(%rdx)
// CHECK: frstor -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x64,0x82,0xc0]
frstor -64(%rdx,%rax,4)
// CHECK: frstor 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x64,0x82,0x40]
frstor 64(%rdx,%rax,4)
// CHECK: frstor 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x64,0x02,0x40]
frstor 64(%rdx,%rax)
// CHECK: frstor (%rdx)
// CHECK: encoding: [0xdd,0x22]
frstor (%rdx)
// CHECK: fscale
// CHECK: encoding: [0xd9,0xfd]
fscale
// CHECK: fsincos
// CHECK: encoding: [0xd9,0xfb]
fsincos
// CHECK: fsin
// CHECK: encoding: [0xd9,0xfe]
fsin
// CHECK: fsqrt
// CHECK: encoding: [0xd9,0xfa]
fsqrt
// CHECK: fstl 485498096
// CHECK: encoding: [0xdd,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fstl 485498096
// CHECK: fstl 64(%rdx)
// CHECK: encoding: [0xdd,0x52,0x40]
fstl 64(%rdx)
// CHECK: fstl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x54,0x82,0xc0]
fstl -64(%rdx,%rax,4)
// CHECK: fstl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x54,0x82,0x40]
fstl 64(%rdx,%rax,4)
// CHECK: fstl 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x54,0x02,0x40]
fstl 64(%rdx,%rax)
// CHECK: fstl (%rdx)
// CHECK: encoding: [0xdd,0x12]
fstl (%rdx)
// CHECK: fstpl 485498096
// CHECK: encoding: [0xdd,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fstpl 485498096
// CHECK: fstpl 64(%rdx)
// CHECK: encoding: [0xdd,0x5a,0x40]
fstpl 64(%rdx)
// CHECK: fstpl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x5c,0x82,0xc0]
fstpl -64(%rdx,%rax,4)
// CHECK: fstpl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdd,0x5c,0x82,0x40]
fstpl 64(%rdx,%rax,4)
// CHECK: fstpl 64(%rdx,%rax)
// CHECK: encoding: [0xdd,0x5c,0x02,0x40]
fstpl 64(%rdx,%rax)
// CHECK: fstpl (%rdx)
// CHECK: encoding: [0xdd,0x1a]
fstpl (%rdx)
// CHECK: fstps 485498096
// CHECK: encoding: [0xd9,0x1c,0x25,0xf0,0x1c,0xf0,0x1c]
fstps 485498096
// CHECK: fstps 64(%rdx)
// CHECK: encoding: [0xd9,0x5a,0x40]
fstps 64(%rdx)
// CHECK: fstps -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x5c,0x82,0xc0]
fstps -64(%rdx,%rax,4)
// CHECK: fstps 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x5c,0x82,0x40]
fstps 64(%rdx,%rax,4)
// CHECK: fstps 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x5c,0x02,0x40]
fstps 64(%rdx,%rax)
// CHECK: fstps (%rdx)
// CHECK: encoding: [0xd9,0x1a]
fstps (%rdx)
// CHECK: fstp %st(4)
// CHECK: encoding: [0xdd,0xdc]
fstp %st(4)
// CHECK: fstpt 485498096
// CHECK: encoding: [0xdb,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
fstpt 485498096
// CHECK: fstpt 64(%rdx)
// CHECK: encoding: [0xdb,0x7a,0x40]
fstpt 64(%rdx)
// CHECK: fstpt -64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x7c,0x82,0xc0]
fstpt -64(%rdx,%rax,4)
// CHECK: fstpt 64(%rdx,%rax,4)
// CHECK: encoding: [0xdb,0x7c,0x82,0x40]
fstpt 64(%rdx,%rax,4)
// CHECK: fstpt 64(%rdx,%rax)
// CHECK: encoding: [0xdb,0x7c,0x02,0x40]
fstpt 64(%rdx,%rax)
// CHECK: fstpt (%rdx)
// CHECK: encoding: [0xdb,0x3a]
fstpt (%rdx)
// CHECK: fsts 485498096
// CHECK: encoding: [0xd9,0x14,0x25,0xf0,0x1c,0xf0,0x1c]
fsts 485498096
// CHECK: fsts 64(%rdx)
// CHECK: encoding: [0xd9,0x52,0x40]
fsts 64(%rdx)
// CHECK: fsts -64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x54,0x82,0xc0]
fsts -64(%rdx,%rax,4)
// CHECK: fsts 64(%rdx,%rax,4)
// CHECK: encoding: [0xd9,0x54,0x82,0x40]
fsts 64(%rdx,%rax,4)
// CHECK: fsts 64(%rdx,%rax)
// CHECK: encoding: [0xd9,0x54,0x02,0x40]
fsts 64(%rdx,%rax)
// CHECK: fsts (%rdx)
// CHECK: encoding: [0xd9,0x12]
fsts (%rdx)
// CHECK: fst %st(4)
// CHECK: encoding: [0xdd,0xd4]
fst %st(4)
// CHECK: fsubl 485498096
// CHECK: encoding: [0xdc,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fsubl 485498096
// CHECK: fsubl 64(%rdx)
// CHECK: encoding: [0xdc,0x62,0x40]
fsubl 64(%rdx)
// CHECK: fsubl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x64,0x82,0xc0]
fsubl -64(%rdx,%rax,4)
// CHECK: fsubl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x64,0x82,0x40]
fsubl 64(%rdx,%rax,4)
// CHECK: fsubl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x64,0x02,0x40]
fsubl 64(%rdx,%rax)
// CHECK: fsubl (%rdx)
// CHECK: encoding: [0xdc,0x22]
fsubl (%rdx)
-// CHECK: fsubp %st(4)
+// CHECK: fsubp %st, %st(4)
// CHECK: encoding: [0xde,0xe4]
fsubp %st(4)
// CHECK: fsubrl 485498096
// CHECK: encoding: [0xdc,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fsubrl 485498096
// CHECK: fsubrl 64(%rdx)
// CHECK: encoding: [0xdc,0x6a,0x40]
fsubrl 64(%rdx)
// CHECK: fsubrl -64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x6c,0x82,0xc0]
fsubrl -64(%rdx,%rax,4)
// CHECK: fsubrl 64(%rdx,%rax,4)
// CHECK: encoding: [0xdc,0x6c,0x82,0x40]
fsubrl 64(%rdx,%rax,4)
// CHECK: fsubrl 64(%rdx,%rax)
// CHECK: encoding: [0xdc,0x6c,0x02,0x40]
fsubrl 64(%rdx,%rax)
// CHECK: fsubrl (%rdx)
// CHECK: encoding: [0xdc,0x2a]
fsubrl (%rdx)
-// CHECK: fsubrp %st(4)
+// CHECK: fsubrp %st, %st(4)
// CHECK: encoding: [0xde,0xec]
fsubrp %st(4)
// CHECK: fsubrs 485498096
// CHECK: encoding: [0xd8,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
fsubrs 485498096
// CHECK: fsubrs 64(%rdx)
// CHECK: encoding: [0xd8,0x6a,0x40]
fsubrs 64(%rdx)
// CHECK: fsubrs -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x6c,0x82,0xc0]
fsubrs -64(%rdx,%rax,4)
// CHECK: fsubrs 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x6c,0x82,0x40]
fsubrs 64(%rdx,%rax,4)
// CHECK: fsubrs 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x6c,0x02,0x40]
fsubrs 64(%rdx,%rax)
// CHECK: fsubrs (%rdx)
// CHECK: encoding: [0xd8,0x2a]
fsubrs (%rdx)
-// CHECK: fsubr %st(0), %st(4)
+// CHECK: fsubr %st, %st(4)
// CHECK: encoding: [0xdc,0xec]
-fsubr %st(0), %st(4)
+fsubr %st, %st(4)
-// CHECK: fsubr %st(4)
+// CHECK: fsubr %st(4), %st
// CHECK: encoding: [0xd8,0xec]
fsubr %st(4)
// CHECK: fsubs 485498096
// CHECK: encoding: [0xd8,0x24,0x25,0xf0,0x1c,0xf0,0x1c]
fsubs 485498096
// CHECK: fsubs 64(%rdx)
// CHECK: encoding: [0xd8,0x62,0x40]
fsubs 64(%rdx)
// CHECK: fsubs -64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x64,0x82,0xc0]
fsubs -64(%rdx,%rax,4)
// CHECK: fsubs 64(%rdx,%rax,4)
// CHECK: encoding: [0xd8,0x64,0x82,0x40]
fsubs 64(%rdx,%rax,4)
// CHECK: fsubs 64(%rdx,%rax)
// CHECK: encoding: [0xd8,0x64,0x02,0x40]
fsubs 64(%rdx,%rax)
// CHECK: fsubs (%rdx)
// CHECK: encoding: [0xd8,0x22]
fsubs (%rdx)
-// CHECK: fsub %st(0), %st(4)
+// CHECK: fsub %st, %st(4)
// CHECK: encoding: [0xdc,0xe4]
-fsub %st(0), %st(4)
+fsub %st, %st(4)
-// CHECK: fsub %st(4)
+// CHECK: fsub %st(4), %st
// CHECK: encoding: [0xd8,0xe4]
fsub %st(4)
// CHECK: ftst
// CHECK: encoding: [0xd9,0xe4]
ftst
// CHECK: fucompp
// CHECK: encoding: [0xda,0xe9]
fucompp
// CHECK: fucomp %st(4)
// CHECK: encoding: [0xdd,0xec]
fucomp %st(4)
// CHECK: fucom %st(4)
// CHECK: encoding: [0xdd,0xe4]
fucom %st(4)
// CHECK: fxam
// CHECK: encoding: [0xd9,0xe5]
fxam
// CHECK: fxch %st(4)
// CHECK: encoding: [0xd9,0xcc]
fxch %st(4)
// CHECK: fxtract
// CHECK: encoding: [0xd9,0xf4]
fxtract
// CHECK: fyl2x
// CHECK: encoding: [0xd9,0xf1]
fyl2x
// CHECK: fyl2xp1
// CHECK: encoding: [0xd9,0xf9]
fyl2xp1
// CHECK: wait
// CHECK: encoding: [0x9b]
wait
Index: vendor/llvm/dist-release_80/test/MC/X86/intel-syntax-2.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/intel-syntax-2.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/intel-syntax-2.s (revision 344166)
@@ -1,31 +1,31 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=att %s | FileCheck %s
.intel_syntax
_test:
// CHECK: movl $257, -4(%rsp)
mov DWORD PTR [RSP - 4], 257
.att_syntax
// CHECK: movl $257, -4(%rsp)
movl $257, -4(%rsp)
_test2:
.intel_syntax noprefix
mov DWORD PTR [RSP - 4], 255
// CHECK: movl $255, -4(%rsp)
.att_syntax prefix
movl $255, -4(%rsp)
// CHECK: movl $255, -4(%rsp)
_test3:
fadd
-// CHECK: faddp %st(1)
+// CHECK: faddp %st, %st(1)
fmul
-// CHECK: fmulp %st(1)
+// CHECK: fmulp %st, %st(1)
fsub
-// CHECK: fsubp %st(1)
+// CHECK: fsubp %st, %st(1)
fsubr
-// CHECK: fsubrp %st(1)
+// CHECK: fsubrp %st, %st(1)
fdiv
-// CHECK: fdivp %st(1)
+// CHECK: fdivp %st, %st(1)
fdivr
-// CHECK: fdivrp %st(1)
+// CHECK: fdivrp %st, %st(1)
Index: vendor/llvm/dist-release_80/test/MC/X86/intel-syntax.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/intel-syntax.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/intel-syntax.s (revision 344166)
@@ -1,904 +1,904 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=intel %s > %t 2> %t.err
// RUN: FileCheck < %t %s
// RUN: FileCheck --check-prefix=CHECK-STDERR < %t.err %s
_test:
xor EAX, EAX
ret
.set number, 8
.global _foo
.text
.global main
main:
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [8 * RAX + RBX + _foo]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [_foo + 8 * RAX + RBX]
// CHECK: leaq 8(%rcx,%rax,8), %rdx
lea RDX, [8 + RAX * 8 + RCX]
// CHECK: leaq 8(%rcx,%rax,8), %rdx
lea RDX, [number + 8 * RAX + RCX]
// CHECK: leaq _foo(,%rax,8), %rdx
lea RDX, [_foo + RAX * 8]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [_foo + RAX * 8 + RBX]
// CHECK: leaq -8(%rax), %rdx
lea RDX, [RAX - number]
// CHECK: leaq -8(%rax), %rdx
lea RDX, [RAX - 8]
// CHECK: leaq _foo(%rax), %rdx
lea RDX, [RAX + _foo]
// CHECK: leaq 8(%rax), %rdx
lea RDX, [RAX + number]
// CHECK: leaq 8(%rax), %rdx
lea RDX, [RAX + 8]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [RAX * number + RBX + _foo]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [_foo + RAX * number + RBX]
// CHECK: leaq 8(%rcx,%rax,8), %rdx
lea RDX, [number + RAX * number + RCX]
// CHECK: leaq _foo(,%rax,8), %rdx
lea RDX, [_foo + RAX * number]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [number * RAX + RBX + _foo]
// CHECK: leaq _foo(%rbx,%rax,8), %rdx
lea RDX, [_foo + number * RAX + RBX]
// CHECK: leaq 8(%rcx,%rax,8), %rdx
lea RDX, [8 + number * RAX + RCX]
// CHECK: leaq _foo(%rax), %rdx
lea RDX, [_foo + RAX]
// CHECK: leaq 8(%rax), %rdx
lea RDX, [number + RAX]
// CHECK: leaq 8(%rax), %rdx
lea RDX, [8 + RAX]
// CHECK: lcalll *(%rax)
call FWORD ptr [rax]
// CHECK: lcalll *(%rax)
lcall [rax]
// CHECK: ljmpl *(%rax)
jmp FWORD ptr [rax]
// CHECK: ljmpq *(%rax)
ljmp [rax]
// CHECK: movl $257, -4(%rsp)
mov DWORD PTR [RSP - 4], 257
// CHECK: movl $258, 4(%rsp)
mov DWORD PTR [RSP + 4], 258
// CHECK: movq $123, -16(%rsp)
mov QWORD PTR [RSP - 16], 123
// CHECK: movb $97, -17(%rsp)
mov BYTE PTR [RSP - 17], 97
// CHECK: movl -4(%rsp), %eax
mov EAX, DWORD PTR [RSP - 4]
// CHECK: movq (%rsp), %rax
mov RAX, QWORD PTR [RSP]
// CHECK: movabsq $4294967289, %rax
mov RAX, 4294967289
// CHECK: movl $-4, -4(%rsp)
mov DWORD PTR [RSP - 4], -4
// CHECK: movq 0, %rcx
mov RCX, QWORD PTR [0]
// CHECK: movl -24(%rsp,%rax,4), %eax
mov EAX, DWORD PTR [RSP + 4*RAX - 24]
// CHECK: movb %dil, (%rdx,%rcx)
mov BYTE PTR [RDX + RCX], DIL
// CHECK: movzwl 2(%rcx), %edi
movzx EDI, WORD PTR [RCX + 2]
// CHECK: callq _test
call _test
// CHECK: andw $12, %ax
and ax, 12
// CHECK: andw $-12, %ax
and ax, -12
// CHECK: andw $257, %ax
and ax, 257
// CHECK: andw $-257, %ax
and ax, -257
// CHECK: andl $12, %eax
and eax, 12
// CHECK: andl $-12, %eax
and eax, -12
// CHECK: andl $257, %eax
and eax, 257
// CHECK: andl $-257, %eax
and eax, -257
// CHECK: andq $12, %rax
and rax, 12
// CHECK: andq $-12, %rax
and rax, -12
// CHECK: andq $257, %rax
and rax, 257
// CHECK: andq $-257, %rax
and rax, -257
// CHECK: fld %st(0)
fld ST(0)
// CHECK: movl %fs:(%rdi), %eax
mov EAX, DWORD PTR FS:[RDI]
// CHECK: leal (,%rdi,4), %r8d
lea R8D, DWORD PTR [4*RDI]
// CHECK: movl _fnan(,%ecx,4), %ecx
mov ECX, DWORD PTR [4*ECX + _fnan]
// CHECK: movq %fs:320, %rax
mov RAX, QWORD PTR FS:[320]
// CHECK: movq %fs:320, %rax
mov RAX, QWORD PTR FS:320
// CHECK: movq %rax, %fs:320
mov QWORD PTR FS:320, RAX
// CHECK: movq %rax, %fs:20(%rbx)
mov QWORD PTR FS:20[rbx], RAX
// CHECK: vshufpd $1, %xmm2, %xmm1, %xmm0
vshufpd XMM0, XMM1, XMM2, 1
// CHECK: vpgatherdd %xmm8, (%r15,%xmm9,2), %xmm1
vpgatherdd XMM10, XMMWORD PTR [R15 + 2*XMM9], XMM8
// CHECK: movsd -8, %xmm5
movsd XMM5, QWORD PTR [-8]
// CHECK: movsl (%rsi), %es:(%rdi)
movsd
// CHECK: movl %ecx, (%eax)
mov [eax], ecx
// CHECK: movl %ecx, (,%ebx,4)
mov [4*ebx], ecx
// CHECK: movl %ecx, (,%ebx,4)
mov [ebx*4], ecx
// CHECK: movl %ecx, 1024
mov [1024], ecx
// CHECK: movl %ecx, 4132
mov [0x1024], ecx
// CHECK: movl %ecx, 32
mov [16 + 16], ecx
// CHECK: movl %ecx, 0
mov [16 - 16], ecx
// CHECK: movl %ecx, 32
mov [16][16], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [eax + 4*ebx], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [eax + ebx*4], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [4*ebx + eax], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [ebx*4 + eax], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [eax][4*ebx], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [eax][ebx*4], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [4*ebx][eax], ecx
// CHECK: movl %ecx, (%eax,%ebx,4)
mov [ebx*4][eax], ecx
// CHECK: movl %ecx, 12(%eax)
mov [eax + 12], ecx
// CHECK: movl %ecx, 12(%eax)
mov [12 + eax], ecx
// CHECK: movl %ecx, 32(%eax)
mov [eax + 16 + 16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16 + eax + 16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16 + 16 + eax], ecx
// CHECK: movl %ecx, 12(%eax)
mov [eax][12], ecx
// CHECK: movl %ecx, 12(%eax)
mov [12][eax], ecx
// CHECK: movl %ecx, 32(%eax)
mov [eax][16 + 16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [eax + 16][16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [eax][16][16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16][eax + 16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16 + eax][16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16][16 + eax], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16 + 16][eax], ecx
// CHECK: movl %ecx, 32(%eax)
mov [eax][16][16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16][eax][16], ecx
// CHECK: movl %ecx, 32(%eax)
mov [16][16][eax], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [4*ebx + 16], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [ebx*4 + 16], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [4*ebx][16], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [ebx*4][16], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [16 + 4*ebx], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [16 + ebx*4], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [16][4*ebx], ecx
// CHECK: movl %ecx, 16(,%ebx,4)
mov [16][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 4*ebx + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 16 + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx + eax + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx + 16 + eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][4*ebx + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][16 + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx][eax + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx][16 + eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax + 4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 4*ebx][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 16][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx + eax][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx + 16][eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][4*ebx][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][16][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx][eax][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [4*ebx][16][eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax][4*ebx], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + ebx*4 + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 16 + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4 + eax + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4 + 16 + eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][ebx*4 + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][16 + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4][eax + 16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4][16 + eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax + ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + ebx*4][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax + 16][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4 + eax][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4 + 16][eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16 + eax][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][ebx*4][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [eax][16][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4][eax][16], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [ebx*4][16][eax], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax][ebx*4], ecx
// CHECK: movl %ecx, 16(%eax,%ebx,4)
mov [16][eax][ebx*4], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][ebx*4 - 16], ecx
// CHECK: prefetchnta 12800(%esi)
prefetchnta [esi + (200*64)]
// CHECK: prefetchnta 32(%esi)
prefetchnta [esi + (64/2)]
// CHECK: prefetchnta 128(%esi)
prefetchnta [esi + (64/2*4)]
// CHECK: prefetchnta 8(%esi)
prefetchnta [esi + (64/(2*4))]
// CHECK: prefetchnta 48(%esi)
prefetchnta [esi + (64/(2*4)+40)]
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][ebx*4 - 2*8], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][4*ebx - 2*8], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax + 4*ebx - 2*8], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [12 + eax + (4*ebx) - 2*14], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][ebx*4 - 2*2*2*2], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][ebx*4 - (2*8)], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax][ebx*4 - 2 * 8 + 4 - 4], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax + ebx*4 - 2 * 8 + 4 - 4], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax + ebx*4 - 2 * ((8 + 4) - 4)], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [-2 * ((8 + 4) - 4) + eax + ebx*4], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [((-2) * ((8 + 4) - 4)) + eax + ebx*4], ecx
// CHECK: movl %ecx, -16(%eax,%ebx,4)
mov [eax + ((-2) * ((8 + 4) - 4)) + ebx*4], ecx
// CHECK: movl %ecx, 96(%eax,%ebx,4)
mov [eax + ((-2) * ((8 + 4) * -4)) + ebx*4], ecx
// CHECK: movl %ecx, -8(%eax,%ebx,4)
mov [eax][-8][ebx*4], ecx
// CHECK: movl %ecx, -2(%eax,%ebx,4)
mov [eax][16/-8][ebx*4], ecx
// CHECK: movl %ecx, -2(%eax,%ebx,4)
mov [eax][(16)/-8][ebx*4], ecx
// CHECK: setb %al
setc al
// CHECK: sete %al
setz al
// CHECK: setbe %al
setna al
// CHECK: setae %al
setnb al
// CHECK: setae %al
setnc al
// CHECK: setle %al
setng al
// CHECK: setge %al
setnl al
// CHECK: setne %al
setnz al
// CHECK: setp %al
setpe al
// CHECK: setnp %al
setpo al
// CHECK: setb %al
setnae al
// CHECK: seta %al
setnbe al
// CHECK: setl %al
setnge al
// CHECK: setg %al
setnle al
// CHECK: jne _foo
jnz _foo
// CHECK: outb %al, $4
out 4, al
ret
// CHECK: cmovbl %ebx, %eax
cmovc eax, ebx
// CHECK: cmovel %ebx, %eax
cmovz eax, ebx
// CHECK: cmovbel %ebx, %eax
cmovna eax, ebx
// CHECK: cmovael %ebx, %eax
cmovnb eax, ebx
// CHECK: cmovael %ebx, %eax
cmovnc eax, ebx
// CHECK: cmovlel %ebx, %eax
cmovng eax, ebx
// CHECK: cmovgel %ebx, %eax
cmovnl eax, ebx
// CHECK: cmovnel %ebx, %eax
cmovnz eax, ebx
// CHECK: cmovpl %ebx, %eax
cmovpe eax, ebx
// CHECK: cmovnpl %ebx, %eax
cmovpo eax, ebx
// CHECK: cmovbl %ebx, %eax
cmovnae eax, ebx
// CHECK: cmoval %ebx, %eax
cmovnbe eax, ebx
// CHECK: cmovll %ebx, %eax
cmovnge eax, ebx
// CHECK: cmovgl %ebx, %eax
cmovnle eax, ebx
// CHECK: shldw %cl, %bx, %dx
// CHECK: shldw %cl, %bx, %dx
// CHECK: shldw $1, %bx, %dx
// CHECK: shldw %cl, %bx, (%rax)
// CHECK: shldw %cl, %bx, (%rax)
// CHECK: shrdw %cl, %bx, %dx
// CHECK: shrdw %cl, %bx, %dx
// CHECK: shrdw $1, %bx, %dx
// CHECK: shrdw %cl, %bx, (%rax)
// CHECK: shrdw %cl, %bx, (%rax)
shld DX, BX
shld DX, BX, CL
shld DX, BX, 1
shld [RAX], BX
shld [RAX], BX, CL
shrd DX, BX
shrd DX, BX, CL
shrd DX, BX, 1
shrd [RAX], BX
shrd [RAX], BX, CL
// CHECK: btl $1, (%eax)
// CHECK: btsl $1, (%eax)
// CHECK: btrl $1, (%eax)
// CHECK: btcl $1, (%eax)
bt DWORD PTR [EAX], 1
bt DWORD PTR [EAX], 1
bts DWORD PTR [EAX], 1
btr DWORD PTR [EAX], 1
btc DWORD PTR [EAX], 1
//CHECK: divb %bl
//CHECK: divw %bx
//CHECK: divl %ecx
//CHECK: divl 3735928559(%ebx,%ecx,8)
//CHECK: divl 69
//CHECK: divl 32493
//CHECK: divl 3133065982
//CHECK: divl 305419896
//CHECK: idivb %bl
//CHECK: idivw %bx
//CHECK: idivl %ecx
//CHECK: idivl 3735928559(%ebx,%ecx,8)
//CHECK: idivl 69
//CHECK: idivl 32493
//CHECK: idivl 3133065982
//CHECK: idivl 305419896
div AL, BL
div AX, BX
div EAX, ECX
div EAX, [ECX*8+EBX+0xdeadbeef]
div EAX, [0x45]
div EAX, [0x7eed]
div EAX, [0xbabecafe]
div EAX, [0x12345678]
idiv AL, BL
idiv AX, BX
idiv EAX, ECX
idiv EAX, [ECX*8+EBX+0xdeadbeef]
idiv EAX, [0x45]
idiv EAX, [0x7eed]
idiv EAX, [0xbabecafe]
idiv EAX, [0x12345678]
// CHECK: inb %dx, %al
// CHECK: inw %dx, %ax
// CHECK: inl %dx, %eax
// CHECK: outb %al, %dx
// CHECK: outw %ax, %dx
// CHECK: outl %eax, %dx
inb DX
inw DX
inl DX
outb DX
outw DX
outl DX
// CHECK: xchgq %rcx, %rax
// CHECK: xchgq %rcx, %rax
// CHECK: xchgl %ecx, %eax
// CHECK: xchgl %ecx, %eax
// CHECK: xchgw %cx, %ax
// CHECK: xchgw %cx, %ax
xchg RAX, RCX
xchg RCX, RAX
xchg EAX, ECX
xchg ECX, EAX
xchg AX, CX
xchg CX, AX
// CHECK: xchgq %rax, (%ecx)
// CHECK: xchgq %rax, (%ecx)
// CHECK: xchgl %eax, (%ecx)
// CHECK: xchgl %eax, (%ecx)
// CHECK: xchgw %ax, (%ecx)
// CHECK: xchgw %ax, (%ecx)
xchg RAX, [ECX]
xchg [ECX], RAX
xchg EAX, [ECX]
xchg [ECX], EAX
xchg AX, [ECX]
xchg [ECX], AX
// CHECK: testq %rax, (%ecx)
// CHECK: testq %rax, (%ecx)
// CHECK: testl %eax, (%ecx)
// CHECK: testl %eax, (%ecx)
// CHECK: testw %ax, (%ecx)
// CHECK: testw %ax, (%ecx)
// CHECK: testb %al, (%ecx)
// CHECK: testb %al, (%ecx)
test RAX, [ECX]
test [ECX], RAX
test EAX, [ECX]
test [ECX], EAX
test AX, [ECX]
test [ECX], AX
test AL, [ECX]
test [ECX], AL
// CHECK: fnstsw %ax
// CHECK: fnstsw %ax
// CHECK: fnstsw (%eax)
fnstsw
fnstsw AX
fnstsw WORD PTR [EAX]
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fdivrp %st(1)
-// CHECK: fdivp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
faddp ST(1), ST(0)
fmulp ST(1), ST(0)
fsubp ST(1), ST(0)
fsubrp ST(1), ST(0)
fdivp ST(1), ST(0)
fdivrp ST(1), ST(0)
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fdivrp %st(1)
-// CHECK: fdivp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
faddp ST(0), ST(1)
fmulp ST(0), ST(1)
fsubp ST(0), ST(1)
fsubrp ST(0), ST(1)
fdivp ST(0), ST(1)
fdivrp ST(0), ST(1)
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fdivrp %st(1)
-// CHECK: fdivp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
faddp ST(1)
fmulp ST(1)
fsubp ST(1)
fsubrp ST(1)
fdivp ST(1)
fdivrp ST(1)
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fdivrp %st(1)
-// CHECK: fdivp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
fadd
fmul
fsub
fsubr
fdiv
fdivr
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fdivrp %st(1)
-// CHECK: fdivp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
faddp
fmulp
fsubp
fsubrp
fdivp
fdivrp
-// CHECK: fadd %st(1)
-// CHECK: fmul %st(1)
-// CHECK: fsub %st(1)
-// CHECK: fsubr %st(1)
-// CHECK: fdiv %st(1)
-// CHECK: fdivr %st(1)
+// CHECK: fadd %st(1), %st
+// CHECK: fmul %st(1), %st
+// CHECK: fsub %st(1), %st
+// CHECK: fsubr %st(1), %st
+// CHECK: fdiv %st(1), %st
+// CHECK: fdivr %st(1), %st
fadd ST(0), ST(1)
fmul ST(0), ST(1)
fsub ST(0), ST(1)
fsubr ST(0), ST(1)
fdiv ST(0), ST(1)
fdivr ST(0), ST(1)
-// CHECK: fadd %st(0), %st(1)
-// CHECK: fmul %st(0), %st(1)
-// CHECK: fsubr %st(0), %st(1)
-// CHECK: fsub %st(0), %st(1)
-// CHECK: fdivr %st(0), %st(1)
-// CHECK: fdiv %st(0), %st(1)
+// CHECK: fadd %st, %st(1)
+// CHECK: fmul %st, %st(1)
+// CHECK: fsubr %st, %st(1)
+// CHECK: fsub %st, %st(1)
+// CHECK: fdivr %st, %st(1)
+// CHECK: fdiv %st, %st(1)
fadd ST(1), ST(0)
fmul ST(1), ST(0)
fsub ST(1), ST(0)
fsubr ST(1), ST(0)
fdiv ST(1), ST(0)
fdivr ST(1), ST(0)
-// CHECK: fadd %st(1)
-// CHECK: fmul %st(1)
-// CHECK: fsub %st(1)
-// CHECK: fsubr %st(1)
-// CHECK: fdiv %st(1)
-// CHECK: fdivr %st(1)
+// CHECK: fadd %st(1), %st
+// CHECK: fmul %st(1), %st
+// CHECK: fsub %st(1), %st
+// CHECK: fsubr %st(1), %st
+// CHECK: fdiv %st(1), %st
+// CHECK: fdivr %st(1), %st
fadd ST(1)
fmul ST(1)
fsub ST(1)
fsubr ST(1)
fdiv ST(1)
fdivr ST(1)
// CHECK: fxsave64 (%rax)
// CHECK: fxrstor64 (%rax)
fxsave64 [rax]
fxrstor64 [rax]
.bss
.globl _g0
.text
// CHECK: movq _g0, %rbx
// CHECK: movq _g0+8, %rcx
// CHECK: movq _g0+18(%rbp), %rax
// CHECK: movq _g0(,%rsi,4), %rax
mov rbx, qword ptr [_g0]
mov rcx, qword ptr [_g0 + 8]
mov rax, QWORD PTR _g0[rbp + 1 + (2 * 5) - 3 + 1<<1]
mov rax, QWORD PTR _g0[rsi*4]
"?half@?0??bar@@YAXXZ@4NA":
.quad 4602678819172646912
fadd dword ptr "?half@?0??bar@@YAXXZ@4NA"
fadd dword ptr "?half@?0??bar@@YAXXZ@4NA"@IMGREL
// CHECK: fadds "?half@?0??bar@@YAXXZ@4NA"
// CHECK: fadds "?half@?0??bar@@YAXXZ@4NA"@IMGREL
inc qword ptr [rax]
inc long ptr [rax]
inc dword ptr [rax]
inc word ptr [rax]
inc byte ptr [rax]
// CHECK: incq (%rax)
// CHECK: incl (%rax)
// CHECK: incl (%rax)
// CHECK: incw (%rax)
// CHECK: incb (%rax)
dec qword ptr [rax]
dec dword ptr [rax]
dec word ptr [rax]
dec byte ptr [rax]
// CHECK: decq (%rax)
// CHECK: decl (%rax)
// CHECK: decw (%rax)
// CHECK: decb (%rax)
add qword ptr [rax], 1
add dword ptr [rax], 1
add word ptr [rax], 1
add byte ptr [rax], 1
// CHECK: addq $1, (%rax)
// CHECK: addl $1, (%rax)
// CHECK: addw $1, (%rax)
// CHECK: addb $1, (%rax)
fstp tbyte ptr [rax]
fstp xword ptr [rax]
fstp qword ptr [rax]
fstp dword ptr [rax]
// CHECK: fstpt (%rax)
// CHECK: fstpt (%rax)
// CHECK: fstpl (%rax)
// CHECK: fstps (%rax)
fxsave [eax]
fsave [eax]
fxrstor [eax]
frstor [eax]
// CHECK: fxsave (%eax)
// CHECK: wait
// CHECK: fnsave (%eax)
// CHECK: fxrstor (%eax)
// CHECK: frstor (%eax)
// FIXME: Should we accept this? Masm accepts it, but gas does not.
fxsave dword ptr [eax]
fsave dword ptr [eax]
fxrstor dword ptr [eax]
frstor dword ptr [eax]
// CHECK: fxsave (%eax)
// CHECK: wait
// CHECK: fnsave (%eax)
// CHECK: fxrstor (%eax)
// CHECK: frstor (%eax)
// CHECK: cmpnless %xmm1, %xmm0
cmpnless xmm0, xmm1
insb
insw
insd
// CHECK: insb %dx, %es:(%rdi)
// CHECK: insw %dx, %es:(%rdi)
// CHECK: insl %dx, %es:(%rdi)
outsb
outsw
outsd
// CHECK: outsb (%rsi), %dx
// CHECK: outsw (%rsi), %dx
// CHECK: outsl (%rsi), %dx
imul bx, 123
imul ebx, 123
imul rbx, 123
// CHECK: imulw $123, %bx
// CHECK: imull $123, %ebx
// CHECK: imulq $123, %rbx
repe cmpsb
repz cmpsb
repne cmpsb
repnz cmpsb
// CHECK: rep
// CHECK: cmpsb %es:(%rdi), (%rsi)
// CHECK: rep
// CHECK: cmpsb %es:(%rdi), (%rsi)
// CHECK: repne
// CHECK: cmpsb %es:(%rdi), (%rsi)
// CHECK: repne
// CHECK: cmpsb %es:(%rdi), (%rsi)
sal eax, 123
// CHECK: shll $123, %eax
psignw mm0, MMWORD PTR t2
// CHECK: psignw t2, %mm0
comisd xmm0, QWORD PTR [eax]
comiss xmm0, DWORD PTR [eax]
vcomisd xmm0, QWORD PTR [eax]
vcomiss xmm0, DWORD PTR [eax]
// CHECK: comisd (%eax), %xmm0
// CHECK: comiss (%eax), %xmm0
// CHECK: vcomisd (%eax), %xmm0
// CHECK: vcomiss (%eax), %xmm0
fbld tbyte ptr [eax]
fbstp tbyte ptr [eax]
// CHECK: fbld (%eax)
// CHECK: fbstp (%eax)
fld float ptr [rax]
fld double ptr [rax]
// CHECK: flds (%rax)
// CHECK: fldl (%rax)
fcomip st, st(2)
fucomip st, st(2)
// CHECK: fcompi %st(2)
// CHECK: fucompi %st(2)
loopz _foo
loopnz _foo
// CHECK: loope _foo
// CHECK: loopne _foo
sidt fword ptr [eax]
// CHECK: sidtq (%eax)
ins byte ptr [eax], dx
// CHECK: insb %dx, %es:(%edi)
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)DI will be used for the location
// CHECK-STDERR-NEXT: ins byte ptr [eax], dx
outs dx, word ptr [eax]
// CHECK: outsw (%esi), %dx
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)SI will be used for the location
// CHECK-STDERR-NEXT: outs dx, word ptr [eax]
lods dword ptr [eax]
// CHECK: lodsl (%esi), %eax
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)SI will be used for the location
// CHECK-STDERR-NEXT: lods dword ptr [eax]
stos qword ptr [eax]
// CHECK: stosq %rax, %es:(%edi)
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)DI will be used for the location
// CHECK-STDERR-NEXT: stos qword ptr [eax]
scas byte ptr [eax]
// CHECK: scasb %es:(%edi), %al
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)DI will be used for the location
// CHECK-STDERR-NEXT: scas byte ptr [eax]
cmps word ptr [eax], word ptr [ebx]
// CHECK: cmpsw %es:(%edi), (%esi)
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)SI will be used for the location
// CHECK-STDERR-NEXT: cmps word ptr [eax], word ptr [ebx]
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)DI will be used for the location
// CHECK-STDERR-NEXT: cmps word ptr [eax], word ptr [ebx]
movs dword ptr [eax], dword ptr [ebx]
// CHECK: movsl (%esi), %es:(%edi)
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)DI will be used for the location
// CHECK-STDERR-NEXT: movs dword ptr [eax], dword ptr [ebx]
// CHECK-STDERR: memory operand is only for determining the size, ES:(R|E)SI will be used for the location
// CHECK-STDERR-NEXT: movs dword ptr [eax], dword ptr [ebx]
movsd qword ptr [rax], xmm0
// CHECK: movsd %xmm0, (%rax)
// CHECK-STDERR-NOT: movsd qword ptr [rax], xmm0
xlat byte ptr [eax]
// CHECK: xlatb
// CHECK-STDERR: memory operand is only for determining the size, (R|E)BX will be used for the location
// CHECK: punpcklbw
punpcklbw mm0, dword ptr [rsp]
// CHECK: punpcklwd
punpcklwd mm0, dword ptr [rsp]
// CHECK: punpckldq
punpckldq mm0, dword ptr [rsp]
// CHECK: lslq (%eax), %rbx
lsl rbx, word ptr [eax]
// CHECK: lsll (%eax), %ebx
lsl ebx, word ptr [eax]
// CHECK: lslw (%eax), %bx
lsl bx, word ptr [eax]
// CHECK: sysexitl
sysexit
// CHECK: sysexitq
sysexitq
// CHECK: sysretl
sysret
// CHECK: sysretq
sysretq
// CHECK: leaq (%rsp,%rax), %rax
lea rax, [rax+rsp]
// CHECK: leaq (%rsp,%rax), %rax
lea rax, [rsp+rax]
// CHECK: leal (%esp,%eax), %eax
lea eax, [eax+esp]
// CHECK: leal (%esp,%eax), %eax
lea eax, [esp+eax]
// CHECK: vpgatherdq %ymm2, (%rdi,%xmm1), %ymm0
vpgatherdq ymm0, [rdi+xmm1], ymm2
// CHECK: vpgatherdq %ymm2, (%rdi,%xmm1), %ymm0
vpgatherdq ymm0, [xmm1+rdi], ymm2
Index: vendor/llvm/dist-release_80/test/MC/X86/x86-16.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/x86-16.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/x86-16.s (revision 344166)
@@ -1,991 +1,991 @@
// RUN: llvm-mc -triple i386-unknown-unknown-code16 --show-encoding %s | FileCheck %s
movl $0x12345678, %ebx
// CHECK: movl
// CHECK: encoding: [0x66,0xbb,0x78,0x56,0x34,0x12]
pause
// CHECK: pause
// CHECK: encoding: [0xf3,0x90]
sfence
// CHECK: sfence
// CHECK: encoding: [0x0f,0xae,0xf8]
lfence
// CHECK: lfence
// CHECK: encoding: [0x0f,0xae,0xe8]
mfence
stgi
// CHECK: stgi
// CHECK: encoding: [0x0f,0x01,0xdc]
clgi
// CHECK: clgi
// CHECK: encoding: [0x0f,0x01,0xdd]
rdtscp
// CHECK: rdtscp
// CHECK: encoding: [0x0f,0x01,0xf9]
// CHECK: movl %eax, 16(%ebp) # encoding: [0x67,0x66,0x89,0x45,0x10]
movl %eax, 16(%ebp)
// CHECK: movl %eax, -16(%ebp) # encoding: [0x67,0x66,0x89,0x45,0xf0]
movl %eax, -16(%ebp)
// CHECK: testb %bl, %cl # encoding: [0x84,0xd9]
testb %bl, %cl
// CHECK: cmpl %eax, %ebx # encoding: [0x66,0x39,0xc3]
cmpl %eax, %ebx
// CHECK: addw %ax, %ax # encoding: [0x01,0xc0]
addw %ax, %ax
// CHECK: shrl %eax # encoding: [0x66,0xd1,0xe8]
shrl $1, %eax
// CHECK: shll %eax # encoding: [0x66,0xd1,0xe0]
sall $1, %eax
// CHECK: shll %eax # encoding: [0x66,0xd1,0xe0]
sal $1, %eax
// moffset forms of moves
// CHECK: movb 0, %al # encoding: [0xa0,0x00,0x00]
movb 0, %al
// CHECK: movw 0, %ax # encoding: [0xa1,0x00,0x00]
movw 0, %ax
// CHECK: movl 0, %eax # encoding: [0x66,0xa1,0x00,0x00]
movl 0, %eax
into
// CHECK: into
// CHECK: encoding: [0xce]
int3
// CHECK: int3
// CHECK: encoding: [0xcc]
int $4
// CHECK: int $4
// CHECK: encoding: [0xcd,0x04]
int $255
// CHECK: int $255
// CHECK: encoding: [0xcd,0xff]
// CHECK: pushfw # encoding: [0x9c]
pushf
// CHECK: pushfl # encoding: [0x66,0x9c]
pushfl
// CHECK: popfw # encoding: [0x9d]
popf
// CHECK: popfl # encoding: [0x66,0x9d]
popfl
retl
// CHECK: ret
// CHECK: encoding: [0x66,0xc3]
// CHECK: cmoval %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x47,0xd0]
cmoval %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
cmovael %eax,%edx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
cmovbel %eax,%edx
// CHECK: cmovbl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x42,0xd0]
cmovbl %eax,%edx
// CHECK: cmovbw %bx, %bx
cmovnae %bx,%bx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
cmovbel %eax,%edx
// CHECK: cmovbl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x42,0xd0]
cmovcl %eax,%edx
// CHECK: cmovel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x44,0xd0]
cmovel %eax,%edx
// CHECK: cmovgl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4f,0xd0]
cmovgl %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
cmovgel %eax,%edx
// CHECK: cmovll %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4c,0xd0]
cmovll %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
cmovlel %eax,%edx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
cmovnal %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
cmovnbl %eax,%edx
// CHECK: cmoval %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x47,0xd0]
cmovnbel %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
cmovncl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
cmovngl %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
cmovnl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
cmovngl %eax,%edx
// CHECK: cmovll %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4c,0xd0]
cmovngel %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
cmovnll %eax,%edx
// CHECK: cmovgl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4f,0xd0]
cmovnlel %eax,%edx
// CHECK: cmovnol %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x41,0xd0]
cmovnol %eax,%edx
// CHECK: cmovnpl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4b,0xd0]
cmovnpl %eax,%edx
// CHECK: cmovnsl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x49,0xd0]
cmovnsl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
cmovnzl %eax,%edx
// CHECK: cmovol %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x40,0xd0]
cmovol %eax,%edx
// CHECK: cmovpl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x4a,0xd0]
cmovpl %eax,%edx
// CHECK: cmovsl %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x48,0xd0]
cmovsl %eax,%edx
// CHECK: cmovel %eax, %edx
// CHECK: encoding: [0x66,0x0f,0x44,0xd0]
cmovzl %eax,%edx
// CHECK: fmul %st(0)
// CHECK: encoding: [0xd8,0xc8]
fmul %st(0), %st
// CHECK: fadd %st(0)
// CHECK: encoding: [0xd8,0xc0]
fadd %st(0), %st
// CHECK: fsub %st(0)
// CHECK: encoding: [0xd8,0xe0]
fsub %st(0), %st
// CHECK: fsubr %st(0)
// CHECK: encoding: [0xd8,0xe8]
fsubr %st(0), %st
// CHECK: fdivr %st(0)
// CHECK: encoding: [0xd8,0xf8]
fdivr %st(0), %st
// CHECK: fdiv %st(0)
// CHECK: encoding: [0xd8,0xf0]
fdiv %st(0), %st
// CHECK: movl %cs, %eax
// CHECK: encoding: [0x66,0x8c,0xc8]
movl %cs, %eax
// CHECK: movw %cs, %ax
// CHECK: encoding: [0x8c,0xc8]
movw %cs, %ax
// CHECK: movw %cs, (%eax)
// CHECK: encoding: [0x67,0x8c,0x08]
mov %cs, (%eax)
// CHECK: movw %cs, (%eax)
// CHECK: encoding: [0x67,0x8c,0x08]
movw %cs, (%eax)
// CHECK: movw %ax, %cs
// CHECK: encoding: [0x8e,0xc8]
movl %eax, %cs
// CHECK: movw %ax, %cs
// CHECK: encoding: [0x8e,0xc8]
mov %eax, %cs
// CHECK: movw %ax, %cs
// CHECK: encoding: [0x8e,0xc8]
movw %ax, %cs
// CHECK: movw %ax, %cs
// CHECK: encoding: [0x8e,0xc8]
mov %ax, %cs
// CHECK: movw (%eax), %cs
// CHECK: encoding: [0x67,0x8e,0x08]
mov (%eax), %cs
// CHECK: movw (%eax), %cs
// CHECK: encoding: [0x67,0x8e,0x08]
movw (%eax), %cs
// CHECK: movl %cr0, %eax
// CHECK: encoding: [0x0f,0x20,0xc0]
movl %cr0,%eax
// CHECK: movl %cr1, %eax
// CHECK: encoding: [0x0f,0x20,0xc8]
movl %cr1,%eax
// CHECK: movl %cr2, %eax
// CHECK: encoding: [0x0f,0x20,0xd0]
movl %cr2,%eax
// CHECK: movl %cr3, %eax
// CHECK: encoding: [0x0f,0x20,0xd8]
movl %cr3,%eax
// CHECK: movl %cr4, %eax
// CHECK: encoding: [0x0f,0x20,0xe0]
movl %cr4,%eax
// CHECK: movl %dr0, %eax
// CHECK: encoding: [0x0f,0x21,0xc0]
movl %dr0,%eax
// CHECK: movl %dr1, %eax
// CHECK: encoding: [0x0f,0x21,0xc8]
movl %dr1,%eax
// CHECK: movl %dr1, %eax
// CHECK: encoding: [0x0f,0x21,0xc8]
movl %dr1,%eax
// CHECK: movl %dr2, %eax
// CHECK: encoding: [0x0f,0x21,0xd0]
movl %dr2,%eax
// CHECK: movl %dr3, %eax
// CHECK: encoding: [0x0f,0x21,0xd8]
movl %dr3,%eax
// CHECK: movl %dr4, %eax
// CHECK: encoding: [0x0f,0x21,0xe0]
movl %dr4,%eax
// CHECK: movl %dr5, %eax
// CHECK: encoding: [0x0f,0x21,0xe8]
movl %dr5,%eax
// CHECK: movl %dr6, %eax
// CHECK: encoding: [0x0f,0x21,0xf0]
movl %dr6,%eax
// CHECK: movl %dr7, %eax
// CHECK: encoding: [0x0f,0x21,0xf8]
movl %dr7,%eax
// CHECK: wait
// CHECK: encoding: [0x9b]
fwait
// CHECK: [0x66,0x65,0xa1,0x7c,0x00]
movl %gs:124, %eax
// CHECK: pusha
// CHECK: encoding: [0x60]
pusha
// CHECK: popa
// CHECK: encoding: [0x61]
popa
// CHECK: pushaw
// CHECK: encoding: [0x60]
pushaw
// CHECK: popaw
// CHECK: encoding: [0x61]
popaw
// CHECK: pushal
// CHECK: encoding: [0x66,0x60]
pushal
// CHECK: popal
// CHECK: encoding: [0x66,0x61]
popal
// CHECK: jmpw *8(%eax)
// CHECK: encoding: [0x67,0xff,0x60,0x08]
jmp *8(%eax)
// CHECK: jmpl *8(%eax)
// CHECK: encoding: [0x67,0x66,0xff,0x60,0x08]
jmpl *8(%eax)
// CHECK: lcalll $2, $4660
// CHECK: encoding: [0x66,0x9a,0x34,0x12,0x00,0x00,0x02,0x00]
lcalll $0x2, $0x1234
L1:
jcxz L1
// CHECK: jcxz L1
// CHECK: encoding: [0xe3,A]
jecxz L1
// CHECK: jecxz L1
// CHECK: encoding: [0x67,0xe3,A]
iret
// CHECK: iretw
// CHECK: encoding: [0xcf]
iretw
// CHECK: iretw
// CHECK: encoding: [0xcf]
iretl
// CHECK: iretl
// CHECK: encoding: [0x66,0xcf]
sysret
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
sysretl
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
testl %ecx, -24(%ebp)
// CHECK: testl %ecx, -24(%ebp)
testl -24(%ebp), %ecx
// CHECK: testl %ecx, -24(%ebp)
push %cs
// CHECK: pushw %cs
// CHECK: encoding: [0x0e]
push %ds
// CHECK: pushw %ds
// CHECK: encoding: [0x1e]
push %ss
// CHECK: pushw %ss
// CHECK: encoding: [0x16]
push %es
// CHECK: pushw %es
// CHECK: encoding: [0x06]
push %fs
// CHECK: pushw %fs
// CHECK: encoding: [0x0f,0xa0]
push %gs
// CHECK: pushw %gs
// CHECK: encoding: [0x0f,0xa8]
pushw %cs
// CHECK: pushw %cs
// CHECK: encoding: [0x0e]
pushw %ds
// CHECK: pushw %ds
// CHECK: encoding: [0x1e]
pushw %ss
// CHECK: pushw %ss
// CHECK: encoding: [0x16]
pushw %es
// CHECK: pushw %es
// CHECK: encoding: [0x06]
pushw %fs
// CHECK: pushw %fs
// CHECK: encoding: [0x0f,0xa0]
pushw %gs
// CHECK: pushw %gs
// CHECK: encoding: [0x0f,0xa8]
pushl %cs
// CHECK: pushl %cs
// CHECK: encoding: [0x66,0x0e]
pushl %ds
// CHECK: pushl %ds
// CHECK: encoding: [0x66,0x1e]
pushl %ss
// CHECK: pushl %ss
// CHECK: encoding: [0x66,0x16]
pushl %es
// CHECK: pushl %es
// CHECK: encoding: [0x66,0x06]
pushl %fs
// CHECK: pushl %fs
// CHECK: encoding: [0x66,0x0f,0xa0]
pushl %gs
// CHECK: pushl %gs
// CHECK: encoding: [0x66,0x0f,0xa8]
pop %ss
// CHECK: popw %ss
// CHECK: encoding: [0x17]
pop %ds
// CHECK: popw %ds
// CHECK: encoding: [0x1f]
pop %es
// CHECK: popw %es
// CHECK: encoding: [0x07]
popl %ss
// CHECK: popl %ss
// CHECK: encoding: [0x66,0x17]
popl %ds
// CHECK: popl %ds
// CHECK: encoding: [0x66,0x1f]
popl %es
// CHECK: popl %es
// CHECK: encoding: [0x66,0x07]
pushfd
// CHECK: pushfl
popfd
// CHECK: popfl
pushfl
// CHECK: pushfl
popfl
// CHECK: popfl
setc %bl
setnae %bl
setnb %bl
setnc %bl
setna %bl
setnbe %bl
setpe %bl
setpo %bl
setnge %bl
setnl %bl
setng %bl
setnle %bl
setneb %cl // CHECK: setne %cl
setcb %bl // CHECK: setb %bl
setnaeb %bl // CHECK: setb %bl
// CHECK: lcalll $31438, $31438
// CHECK: lcalll $31438, $31438
// CHECK: ljmpl $31438, $31438
// CHECK: ljmpl $31438, $31438
calll $0x7ace,$0x7ace
lcalll $0x7ace,$0x7ace
jmpl $0x7ace,$0x7ace
ljmpl $0x7ace,$0x7ace
// CHECK: lcallw $31438, $31438
// CHECK: lcallw $31438, $31438
// CHECK: ljmpw $31438, $31438
// CHECK: ljmpw $31438, $31438
callw $0x7ace,$0x7ace
lcallw $0x7ace,$0x7ace
jmpw $0x7ace,$0x7ace
ljmpw $0x7ace,$0x7ace
// CHECK: lcallw $31438, $31438
// CHECK: lcallw $31438, $31438
// CHECK: ljmpw $31438, $31438
// CHECK: ljmpw $31438, $31438
call $0x7ace,$0x7ace
lcall $0x7ace,$0x7ace
jmp $0x7ace,$0x7ace
ljmp $0x7ace,$0x7ace
// CHECK: calll a
calll a
// CHECK: incb %al # encoding: [0xfe,0xc0]
incb %al
// CHECK: incw %ax # encoding: [0x40]
incw %ax
// CHECK: incl %eax # encoding: [0x66,0x40]
incl %eax
// CHECK: decb %al # encoding: [0xfe,0xc8]
decb %al
// CHECK: decw %ax # encoding: [0x48]
decw %ax
// CHECK: decl %eax # encoding: [0x66,0x48]
decl %eax
// CHECK: pshufw $14, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x0e]
pshufw $14, %mm4, %mm0
// CHECK: pshufw $90, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x5a]
pshufw $90, %mm4, %mm0
// CHECK: aaa
// CHECK: encoding: [0x37]
aaa
// CHECK: aad $1
// CHECK: encoding: [0xd5,0x01]
aad $1
// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad $0xA
// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad
// CHECK: aam $2
// CHECK: encoding: [0xd4,0x02]
aam $2
// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam $0xA
// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam
// CHECK: aas
// CHECK: encoding: [0x3f]
aas
// CHECK: daa
// CHECK: encoding: [0x27]
daa
// CHECK: das
// CHECK: encoding: [0x2f]
das
// CHECK: retw $31438
// CHECK: encoding: [0xc2,0xce,0x7a]
retw $0x7ace
// CHECK: lretw $31438
// CHECK: encoding: [0xca,0xce,0x7a]
lretw $0x7ace
// CHECK: retw $31438
// CHECK: encoding: [0xc2,0xce,0x7a]
ret $0x7ace
// CHECK: lretw $31438
// CHECK: encoding: [0xca,0xce,0x7a]
lret $0x7ace
// CHECK: retl $31438
// CHECK: encoding: [0x66,0xc2,0xce,0x7a]
retl $0x7ace
// CHECK: lretl $31438
// CHECK: encoding: [0x66,0xca,0xce,0x7a]
lretl $0x7ace
// CHECK: bound %bx, 2(%eax)
// CHECK: encoding: [0x67,0x62,0x58,0x02]
bound %bx,2(%eax)
// CHECK: bound %ecx, 4(%ebx)
// CHECK: encoding: [0x67,0x66,0x62,0x4b,0x04]
bound %ecx,4(%ebx)
// CHECK: arpl %bx, %bx
// CHECK: encoding: [0x63,0xdb]
arpl %bx,%bx
// CHECK: arpl %bx, 6(%ecx)
// CHECK: encoding: [0x67,0x63,0x59,0x06]
arpl %bx,6(%ecx)
// CHECK: lgdtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
lgdtw 4(%eax)
// CHECK: lgdtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
lgdt 4(%eax)
// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x50,0x04]
lgdtl 4(%eax)
// CHECK: lidtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x58,0x04]
lidtw 4(%eax)
// CHECK: lidtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x58,0x04]
lidt 4(%eax)
// CHECK: lidtl 4(%eax)
// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x58,0x04]
lidtl 4(%eax)
// CHECK: sgdtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x40,0x04]
sgdtw 4(%eax)
// CHECK: sgdtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x40,0x04]
sgdt 4(%eax)
// CHECK: sgdtl 4(%eax)
// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x40,0x04]
sgdtl 4(%eax)
// CHECK: sidtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x48,0x04]
sidtw 4(%eax)
// CHECK: sidtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x48,0x04]
sidt 4(%eax)
// CHECK: sidtl 4(%eax)
// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x48,0x04]
sidtl 4(%eax)
// CHECK: fcompi %st(2)
// CHECK: encoding: [0xdf,0xf2]
fcompi %st(2), %st
// CHECK: fcompi %st(2)
// CHECK: encoding: [0xdf,0xf2]
fcompi %st(2)
// CHECK: fcompi
// CHECK: encoding: [0xdf,0xf1]
fcompi
// CHECK: fucompi %st(2)
// CHECK: encoding: [0xdf,0xea]
fucompi %st(2),%st
// CHECK: fucompi %st(2)
// CHECK: encoding: [0xdf,0xea]
fucompi %st(2)
// CHECK: fucompi
// CHECK: encoding: [0xdf,0xe9]
fucompi
// CHECK: fldcw 32493
// CHECK: encoding: [0xd9,0x2e,0xed,0x7e]
fldcww 0x7eed
// CHECK: fldcw 32493
// CHECK: encoding: [0xd9,0x2e,0xed,0x7e]
fldcw 0x7eed
// CHECK: fnstcw 32493
// CHECK: encoding: [0xd9,0x3e,0xed,0x7e]
fnstcww 0x7eed
// CHECK: fnstcw 32493
// CHECK: encoding: [0xd9,0x3e,0xed,0x7e]
fnstcw 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstcww 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstcw 0x7eed
// CHECK: fnstsw 32493
// CHECK: encoding: [0xdd,0x3e,0xed,0x7e]
fnstsww 0x7eed
// CHECK: fnstsw 32493
// CHECK: encoding: [0xdd,0x3e,0xed,0x7e]
fnstsw 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstsww 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstsw 0x7eed
// CHECK: verr 32493
// CHECK: encoding: [0x0f,0x00,0x26,0xed,0x7e]
verrw 0x7eed
// CHECK: verr 32493
// CHECK: encoding: [0x0f,0x00,0x26,0xed,0x7e]
verr 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fclex
// CHECK: fnclex
// CHECK: encoding: [0xdb,0xe2]
fnclex
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2a
// CHECK: ud2b
// CHECK: encoding: [0x0f,0xb9]
ud2b
// CHECK: loope 0
// CHECK: encoding: [0xe1,A]
loopz 0
// CHECK: loopne 0
// CHECK: encoding: [0xe0,A]
loopnz 0
// CHECK: outsb (%si), %dx # encoding: [0x6e]
// CHECK: outsb
// CHECK: outsb
outsb
outsb %ds:(%si), %dx
outsb (%si), %dx
// CHECK: outsw (%si), %dx # encoding: [0x6f]
// CHECK: outsw
// CHECK: outsw
outsw
outsw %ds:(%si), %dx
outsw (%si), %dx
// CHECK: outsl (%si), %dx # encoding: [0x66,0x6f]
// CHECK: outsl
outsl
outsl %ds:(%si), %dx
outsl (%si), %dx
// CHECK: insb %dx, %es:(%di) # encoding: [0x6c]
// CHECK: insb
insb
insb %dx, %es:(%di)
// CHECK: insw %dx, %es:(%di) # encoding: [0x6d]
// CHECK: insw
insw
insw %dx, %es:(%di)
// CHECK: insl %dx, %es:(%di) # encoding: [0x66,0x6d]
// CHECK: insl
insl
insl %dx, %es:(%di)
// CHECK: movsb (%si), %es:(%di) # encoding: [0xa4]
// CHECK: movsb
// CHECK: movsb
movsb
movsb %ds:(%si), %es:(%di)
movsb (%si), %es:(%di)
// CHECK: movsw (%si), %es:(%di) # encoding: [0xa5]
// CHECK: movsw
// CHECK: movsw
movsw
movsw %ds:(%si), %es:(%di)
movsw (%si), %es:(%di)
// CHECK: movsl (%si), %es:(%di) # encoding: [0x66,0xa5]
// CHECK: movsl
// CHECK: movsl
movsl
movsl %ds:(%si), %es:(%di)
movsl (%si), %es:(%di)
// CHECK: lodsb (%si), %al # encoding: [0xac]
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
lodsb
lodsb %ds:(%si), %al
lodsb (%si), %al
lods %ds:(%si), %al
lods (%si), %al
// CHECK: lodsw (%si), %ax # encoding: [0xad]
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
lodsw
lodsw %ds:(%si), %ax
lodsw (%si), %ax
lods %ds:(%si), %ax
lods (%si), %ax
// CHECK: lodsl (%si), %eax # encoding: [0x66,0xad]
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
lodsl
lodsl %ds:(%si), %eax
lodsl (%si), %eax
lods %ds:(%si), %eax
lods (%si), %eax
// CHECK: stosb %al, %es:(%di) # encoding: [0xaa]
// CHECK: stosb
// CHECK: stosb
stosb
stosb %al, %es:(%di)
stos %al, %es:(%di)
// CHECK: stosw %ax, %es:(%di) # encoding: [0xab]
// CHECK: stosw
// CHECK: stosw
stosw
stosw %ax, %es:(%di)
stos %ax, %es:(%di)
// CHECK: stosl %eax, %es:(%di) # encoding: [0x66,0xab]
// CHECK: stosl
// CHECK: stosl
stosl
stosl %eax, %es:(%di)
stos %eax, %es:(%di)
// CHECK: strw
// CHECK: encoding: [0x0f,0x00,0xc8]
str %ax
// CHECK: strl
// CHECK: encoding: [0x66,0x0f,0x00,0xc8]
str %eax
-// CHECK: fsubp
+// CHECK: fsubp %st, %st(1)
// CHECK: encoding: [0xde,0xe1]
fsubp %st,%st(1)
-// CHECK: fsubp %st(2)
+// CHECK: fsubp %st, %st(2)
// CHECK: encoding: [0xde,0xe2]
fsubp %st, %st(2)
// CHECK: xchgl %eax, %eax
// CHECK: encoding: [0x66,0x90]
xchgl %eax, %eax
// CHECK: xchgw %ax, %ax
// CHECK: encoding: [0x90]
xchgw %ax, %ax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x66,0x91]
xchgl %ecx, %eax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x66,0x91]
xchgl %eax, %ecx
// CHECK: retw
// CHECK: encoding: [0xc3]
retw
// CHECK: retl
// CHECK: encoding: [0x66,0xc3]
retl
// CHECK: lretw
// CHECK: encoding: [0xcb]
lretw
// CHECK: lretl
// CHECK: encoding: [0x66,0xcb]
lretl
// CHECK: data32
// CHECK: encoding: [0x66]
data32
// CHECK: data32
// CHECK: encoding: [0x66]
// CHECK: lgdtw 4(%eax)
// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
data32 lgdt 4(%eax)
// CHECK: wbnoinvd
// CHECK: encoding: [0xf3,0x0f,0x09]
wbnoinvd
// CHECK: umonitor %ax
// CHECK: encoding: [0xf3,0x0f,0xae,0xf0]
umonitor %ax
// CHECK: umonitor %eax
// CHECK: encoding: [0x67,0xf3,0x0f,0xae,0xf0]
umonitor %eax
// CHECK: movdir64b (%esi), %eax
// CHECK: encoding: [0x67,0x66,0x0f,0x38,0xf8,0x06]
movdir64b (%esi), %eax
// CHECK: movdir64b (%si), %ax
// CHECK: encoding: [0x66,0x0f,0x38,0xf8,0x04]
movdir64b (%si), %ax
Index: vendor/llvm/dist-release_80/test/MC/X86/x86-32-coverage.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/x86-32-coverage.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/x86-32-coverage.s (revision 344166)
@@ -1,10810 +1,10810 @@
// RUN: llvm-mc -triple i386-unknown-unknown %s --show-encoding | FileCheck %s
// RUN: llvm-mc -triple i386-unknown-unknown -output-asm-variant=1 %s | FileCheck --check-prefix=INTEL %s
// CHECK: flds (%edi)
// CHECK: encoding: [0xd9,0x07]
flds (%edi)
// CHECK: filds (%edi)
// CHECK: encoding: [0xdf,0x07]
filds (%edi)
// CHECK: movb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc6,0x84,0xcb,0xef,0xbe,0xad,0xde,0x7f]
movb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movb $127, 69
// CHECK: encoding: [0xc6,0x05,0x45,0x00,0x00,0x00,0x7f]
movb $0x7f,0x45
// CHECK: movb $127, 32493
// CHECK: encoding: [0xc6,0x05,0xed,0x7e,0x00,0x00,0x7f]
movb $0x7f,0x7eed
// CHECK: movb $127, 3133065982
// CHECK: encoding: [0xc6,0x05,0xfe,0xca,0xbe,0xba,0x7f]
movb $0x7f,0xbabecafe
// CHECK: movb $127, 305419896
// CHECK: encoding: [0xc6,0x05,0x78,0x56,0x34,0x12,0x7f]
movb $0x7f,0x12345678
// CHECK: movw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0xc7,0x84,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
movw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movw $31438, 69
// CHECK: encoding: [0x66,0xc7,0x05,0x45,0x00,0x00,0x00,0xce,0x7a]
movw $0x7ace,0x45
// CHECK: movw $31438, 32493
// CHECK: encoding: [0x66,0xc7,0x05,0xed,0x7e,0x00,0x00,0xce,0x7a]
movw $0x7ace,0x7eed
// CHECK: movw $31438, 3133065982
// CHECK: encoding: [0x66,0xc7,0x05,0xfe,0xca,0xbe,0xba,0xce,0x7a]
movw $0x7ace,0xbabecafe
// CHECK: movw $31438, 305419896
// CHECK: encoding: [0x66,0xc7,0x05,0x78,0x56,0x34,0x12,0xce,0x7a]
movw $0x7ace,0x12345678
// CHECK: movl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc7,0x84,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
movl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movl $2063514302, 69
// CHECK: encoding: [0xc7,0x05,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
movl $0x7afebabe,0x45
// CHECK: movl $2063514302, 32493
// CHECK: encoding: [0xc7,0x05,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
movl $0x7afebabe,0x7eed
// CHECK: movl $2063514302, 3133065982
// CHECK: encoding: [0xc7,0x05,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
movl $0x7afebabe,0xbabecafe
// CHECK: movl $2063514302, 305419896
// CHECK: encoding: [0xc7,0x05,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
movl $0x7afebabe,0x12345678
// CHECK: movl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc7,0x84,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
movl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movl $324478056, 69
// CHECK: encoding: [0xc7,0x05,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
movl $0x13572468,0x45
// CHECK: movl $324478056, 32493
// CHECK: encoding: [0xc7,0x05,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
movl $0x13572468,0x7eed
// CHECK: movl $324478056, 3133065982
// CHECK: encoding: [0xc7,0x05,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
movl $0x13572468,0xbabecafe
// CHECK: movl $324478056, 305419896
// CHECK: encoding: [0xc7,0x05,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
movl $0x13572468,0x12345678
// CHECK: movsbl 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0x0f,0xbe,0x8c,0xcb,0xef,0xbe,0xad,0xde]
movsbl 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: movsbl 69, %ecx
// CHECK: encoding: [0x0f,0xbe,0x0d,0x45,0x00,0x00,0x00]
movsbl 0x45,%ecx
// CHECK: movsbl 32493, %ecx
// CHECK: encoding: [0x0f,0xbe,0x0d,0xed,0x7e,0x00,0x00]
movsbl 0x7eed,%ecx
// CHECK: movsbl 3133065982, %ecx
// CHECK: encoding: [0x0f,0xbe,0x0d,0xfe,0xca,0xbe,0xba]
movsbl 0xbabecafe,%ecx
// CHECK: movsbl 305419896, %ecx
// CHECK: encoding: [0x0f,0xbe,0x0d,0x78,0x56,0x34,0x12]
movsbl 0x12345678,%ecx
// CHECK: movsbw 3735928559(%ebx,%ecx,8), %bx
// CHECK: encoding: [0x66,0x0f,0xbe,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movsbw 0xdeadbeef(%ebx,%ecx,8),%bx
// CHECK: movsbw 69, %bx
// CHECK: encoding: [0x66,0x0f,0xbe,0x1d,0x45,0x00,0x00,0x00]
movsbw 0x45,%bx
// CHECK: movsbw 32493, %bx
// CHECK: encoding: [0x66,0x0f,0xbe,0x1d,0xed,0x7e,0x00,0x00]
movsbw 0x7eed,%bx
// CHECK: movsbw 3133065982, %bx
// CHECK: encoding: [0x66,0x0f,0xbe,0x1d,0xfe,0xca,0xbe,0xba]
movsbw 0xbabecafe,%bx
// CHECK: movsbw 305419896, %bx
// CHECK: encoding: [0x66,0x0f,0xbe,0x1d,0x78,0x56,0x34,0x12]
movsbw 0x12345678,%bx
// CHECK: movswl 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0x0f,0xbf,0x8c,0xcb,0xef,0xbe,0xad,0xde]
movswl 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: movswl 69, %ecx
// CHECK: encoding: [0x0f,0xbf,0x0d,0x45,0x00,0x00,0x00]
movswl 0x45,%ecx
// CHECK: movswl 32493, %ecx
// CHECK: encoding: [0x0f,0xbf,0x0d,0xed,0x7e,0x00,0x00]
movswl 0x7eed,%ecx
// CHECK: movswl 3133065982, %ecx
// CHECK: encoding: [0x0f,0xbf,0x0d,0xfe,0xca,0xbe,0xba]
movswl 0xbabecafe,%ecx
// CHECK: movswl 305419896, %ecx
// CHECK: encoding: [0x0f,0xbf,0x0d,0x78,0x56,0x34,0x12]
movswl 0x12345678,%ecx
// CHECK: movzbl 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0x0f,0xb6,0x8c,0xcb,0xef,0xbe,0xad,0xde]
movzbl 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: movzbl 69, %ecx
// CHECK: encoding: [0x0f,0xb6,0x0d,0x45,0x00,0x00,0x00]
movzbl 0x45,%ecx
// CHECK: movzbl 32493, %ecx
// CHECK: encoding: [0x0f,0xb6,0x0d,0xed,0x7e,0x00,0x00]
movzbl 0x7eed,%ecx
// CHECK: movzbl 3133065982, %ecx
// CHECK: encoding: [0x0f,0xb6,0x0d,0xfe,0xca,0xbe,0xba]
movzbl 0xbabecafe,%ecx
// CHECK: movzbl 305419896, %ecx
// CHECK: encoding: [0x0f,0xb6,0x0d,0x78,0x56,0x34,0x12]
movzbl 0x12345678,%ecx
// CHECK: movzbw 3735928559(%ebx,%ecx,8), %bx
// CHECK: encoding: [0x66,0x0f,0xb6,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movzbw 0xdeadbeef(%ebx,%ecx,8),%bx
// CHECK: movzbw 69, %bx
// CHECK: encoding: [0x66,0x0f,0xb6,0x1d,0x45,0x00,0x00,0x00]
movzbw 0x45,%bx
// CHECK: movzbw 32493, %bx
// CHECK: encoding: [0x66,0x0f,0xb6,0x1d,0xed,0x7e,0x00,0x00]
movzbw 0x7eed,%bx
// CHECK: movzbw 3133065982, %bx
// CHECK: encoding: [0x66,0x0f,0xb6,0x1d,0xfe,0xca,0xbe,0xba]
movzbw 0xbabecafe,%bx
// CHECK: movzbw 305419896, %bx
// CHECK: encoding: [0x66,0x0f,0xb6,0x1d,0x78,0x56,0x34,0x12]
movzbw 0x12345678,%bx
// CHECK: movzwl 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0x0f,0xb7,0x8c,0xcb,0xef,0xbe,0xad,0xde]
movzwl 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: movzwl 69, %ecx
// CHECK: encoding: [0x0f,0xb7,0x0d,0x45,0x00,0x00,0x00]
movzwl 0x45,%ecx
// CHECK: movzwl 32493, %ecx
// CHECK: encoding: [0x0f,0xb7,0x0d,0xed,0x7e,0x00,0x00]
movzwl 0x7eed,%ecx
// CHECK: movzwl 3133065982, %ecx
// CHECK: encoding: [0x0f,0xb7,0x0d,0xfe,0xca,0xbe,0xba]
movzwl 0xbabecafe,%ecx
// CHECK: movzwl 305419896, %ecx
// CHECK: encoding: [0x0f,0xb7,0x0d,0x78,0x56,0x34,0x12]
movzwl 0x12345678,%ecx
// CHECK: pushl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0xb4,0xcb,0xef,0xbe,0xad,0xde]
pushl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: pushw 32493
// CHECK: encoding: [0x66,0xff,0x35,0xed,0x7e,0x00,0x00]
pushw 0x7eed
// CHECK: pushl 3133065982
// CHECK: encoding: [0xff,0x35,0xfe,0xca,0xbe,0xba]
pushl 0xbabecafe
// CHECK: pushl 305419896
// CHECK: encoding: [0xff,0x35,0x78,0x56,0x34,0x12]
pushl 0x12345678
// CHECK: popl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x8f,0x84,0xcb,0xef,0xbe,0xad,0xde]
popl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: popw 32493
// CHECK: encoding: [0x66,0x8f,0x05,0xed,0x7e,0x00,0x00]
popw 0x7eed
// CHECK: popl 3133065982
// CHECK: encoding: [0x8f,0x05,0xfe,0xca,0xbe,0xba]
popl 0xbabecafe
// CHECK: popl 305419896
// CHECK: encoding: [0x8f,0x05,0x78,0x56,0x34,0x12]
popl 0x12345678
// CHECK: clc
// CHECK: encoding: [0xf8]
clc
// CHECK: cld
// CHECK: encoding: [0xfc]
cld
// CHECK: cli
// CHECK: encoding: [0xfa]
cli
// CHECK: clts
// CHECK: encoding: [0x0f,0x06]
clts
// CHECK: cmc
// CHECK: encoding: [0xf5]
cmc
// CHECK: lahf
// CHECK: encoding: [0x9f]
lahf
// CHECK: sahf
// CHECK: encoding: [0x9e]
sahf
// CHECK: stc
// CHECK: encoding: [0xf9]
stc
// CHECK: std
// CHECK: encoding: [0xfd]
std
// CHECK: sti
// CHECK: encoding: [0xfb]
sti
// CHECK: salc
// CHECK: encoding: [0xd6]
salc
// CHECK: addb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x84,0xcb,0xef,0xbe,0xad,0xde,0xfe]
addb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: addb $254, 69
// CHECK: encoding: [0x80,0x05,0x45,0x00,0x00,0x00,0xfe]
addb $0xfe,0x45
// CHECK: addb $254, 32493
// CHECK: encoding: [0x80,0x05,0xed,0x7e,0x00,0x00,0xfe]
addb $0xfe,0x7eed
// CHECK: addb $254, 3133065982
// CHECK: encoding: [0x80,0x05,0xfe,0xca,0xbe,0xba,0xfe]
addb $0xfe,0xbabecafe
// CHECK: addb $254, 305419896
// CHECK: encoding: [0x80,0x05,0x78,0x56,0x34,0x12,0xfe]
addb $0xfe,0x12345678
// CHECK: addb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x84,0xcb,0xef,0xbe,0xad,0xde,0x7f]
addb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: addb $127, 69
// CHECK: encoding: [0x80,0x05,0x45,0x00,0x00,0x00,0x7f]
addb $0x7f,0x45
// CHECK: addb $127, 32493
// CHECK: encoding: [0x80,0x05,0xed,0x7e,0x00,0x00,0x7f]
addb $0x7f,0x7eed
// CHECK: addb $127, 3133065982
// CHECK: encoding: [0x80,0x05,0xfe,0xca,0xbe,0xba,0x7f]
addb $0x7f,0xbabecafe
// CHECK: addb $127, 305419896
// CHECK: encoding: [0x80,0x05,0x78,0x56,0x34,0x12,0x7f]
addb $0x7f,0x12345678
// CHECK: addw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0x84,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
addw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: addw $31438, 69
// CHECK: encoding: [0x66,0x81,0x05,0x45,0x00,0x00,0x00,0xce,0x7a]
addw $0x7ace,0x45
// CHECK: addw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x05,0xed,0x7e,0x00,0x00,0xce,0x7a]
addw $0x7ace,0x7eed
// CHECK: addw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x05,0xfe,0xca,0xbe,0xba,0xce,0x7a]
addw $0x7ace,0xbabecafe
// CHECK: addw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x05,0x78,0x56,0x34,0x12,0xce,0x7a]
addw $0x7ace,0x12345678
// CHECK: addl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x84,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
addl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: addl $2063514302, 69
// CHECK: encoding: [0x81,0x05,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
addl $0x7afebabe,0x45
// CHECK: addl $2063514302, 32493
// CHECK: encoding: [0x81,0x05,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
addl $0x7afebabe,0x7eed
// CHECK: addl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x05,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
addl $0x7afebabe,0xbabecafe
// CHECK: addl $2063514302, 305419896
// CHECK: encoding: [0x81,0x05,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
addl $0x7afebabe,0x12345678
// CHECK: addl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x84,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
addl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: addl $324478056, 69
// CHECK: encoding: [0x81,0x05,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
addl $0x13572468,0x45
// CHECK: addl $324478056, 32493
// CHECK: encoding: [0x81,0x05,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
addl $0x13572468,0x7eed
// CHECK: addl $324478056, 3133065982
// CHECK: encoding: [0x81,0x05,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
addl $0x13572468,0xbabecafe
// CHECK: addl $324478056, 305419896
// CHECK: encoding: [0x81,0x05,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
addl $0x13572468,0x12345678
// CHECK: incl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0x84,0xcb,0xef,0xbe,0xad,0xde]
incl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: incw 32493
// CHECK: encoding: [0x66,0xff,0x05,0xed,0x7e,0x00,0x00]
incw 0x7eed
// CHECK: incl 3133065982
// CHECK: encoding: [0xff,0x05,0xfe,0xca,0xbe,0xba]
incl 0xbabecafe
// CHECK: incl 305419896
// CHECK: encoding: [0xff,0x05,0x78,0x56,0x34,0x12]
incl 0x12345678
// CHECK: subb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xac,0xcb,0xef,0xbe,0xad,0xde,0xfe]
subb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: subb $254, 69
// CHECK: encoding: [0x80,0x2d,0x45,0x00,0x00,0x00,0xfe]
subb $0xfe,0x45
// CHECK: subb $254, 32493
// CHECK: encoding: [0x80,0x2d,0xed,0x7e,0x00,0x00,0xfe]
subb $0xfe,0x7eed
// CHECK: subb $254, 3133065982
// CHECK: encoding: [0x80,0x2d,0xfe,0xca,0xbe,0xba,0xfe]
subb $0xfe,0xbabecafe
// CHECK: subb $254, 305419896
// CHECK: encoding: [0x80,0x2d,0x78,0x56,0x34,0x12,0xfe]
subb $0xfe,0x12345678
// CHECK: subb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xac,0xcb,0xef,0xbe,0xad,0xde,0x7f]
subb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: subb $127, 69
// CHECK: encoding: [0x80,0x2d,0x45,0x00,0x00,0x00,0x7f]
subb $0x7f,0x45
// CHECK: subb $127, 32493
// CHECK: encoding: [0x80,0x2d,0xed,0x7e,0x00,0x00,0x7f]
subb $0x7f,0x7eed
// CHECK: subb $127, 3133065982
// CHECK: encoding: [0x80,0x2d,0xfe,0xca,0xbe,0xba,0x7f]
subb $0x7f,0xbabecafe
// CHECK: subb $127, 305419896
// CHECK: encoding: [0x80,0x2d,0x78,0x56,0x34,0x12,0x7f]
subb $0x7f,0x12345678
// CHECK: subw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0xac,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
subw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: subw $31438, 69
// CHECK: encoding: [0x66,0x81,0x2d,0x45,0x00,0x00,0x00,0xce,0x7a]
subw $0x7ace,0x45
// CHECK: subw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x2d,0xed,0x7e,0x00,0x00,0xce,0x7a]
subw $0x7ace,0x7eed
// CHECK: subw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x2d,0xfe,0xca,0xbe,0xba,0xce,0x7a]
subw $0x7ace,0xbabecafe
// CHECK: subw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x2d,0x78,0x56,0x34,0x12,0xce,0x7a]
subw $0x7ace,0x12345678
// CHECK: subl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xac,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
subl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: subl $2063514302, 69
// CHECK: encoding: [0x81,0x2d,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
subl $0x7afebabe,0x45
// CHECK: subl $2063514302, 32493
// CHECK: encoding: [0x81,0x2d,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
subl $0x7afebabe,0x7eed
// CHECK: subl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x2d,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
subl $0x7afebabe,0xbabecafe
// CHECK: subl $2063514302, 305419896
// CHECK: encoding: [0x81,0x2d,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
subl $0x7afebabe,0x12345678
// CHECK: subl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xac,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
subl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: subl $324478056, 69
// CHECK: encoding: [0x81,0x2d,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
subl $0x13572468,0x45
// CHECK: subl $324478056, 32493
// CHECK: encoding: [0x81,0x2d,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
subl $0x13572468,0x7eed
// CHECK: subl $324478056, 3133065982
// CHECK: encoding: [0x81,0x2d,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
subl $0x13572468,0xbabecafe
// CHECK: subl $324478056, 305419896
// CHECK: encoding: [0x81,0x2d,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
subl $0x13572468,0x12345678
// CHECK: decl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0x8c,0xcb,0xef,0xbe,0xad,0xde]
decl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: decw 32493
// CHECK: encoding: [0x66,0xff,0x0d,0xed,0x7e,0x00,0x00]
decw 0x7eed
// CHECK: decl 3133065982
// CHECK: encoding: [0xff,0x0d,0xfe,0xca,0xbe,0xba]
decl 0xbabecafe
// CHECK: decl 305419896
// CHECK: encoding: [0xff,0x0d,0x78,0x56,0x34,0x12]
decl 0x12345678
// CHECK: sbbb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x9c,0xcb,0xef,0xbe,0xad,0xde,0xfe]
sbbb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sbbb $254, 69
// CHECK: encoding: [0x80,0x1d,0x45,0x00,0x00,0x00,0xfe]
sbbb $0xfe,0x45
// CHECK: sbbb $254, 32493
// CHECK: encoding: [0x80,0x1d,0xed,0x7e,0x00,0x00,0xfe]
sbbb $0xfe,0x7eed
// CHECK: sbbb $254, 3133065982
// CHECK: encoding: [0x80,0x1d,0xfe,0xca,0xbe,0xba,0xfe]
sbbb $0xfe,0xbabecafe
// CHECK: sbbb $254, 305419896
// CHECK: encoding: [0x80,0x1d,0x78,0x56,0x34,0x12,0xfe]
sbbb $0xfe,0x12345678
// CHECK: sbbb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x9c,0xcb,0xef,0xbe,0xad,0xde,0x7f]
sbbb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sbbb $127, 69
// CHECK: encoding: [0x80,0x1d,0x45,0x00,0x00,0x00,0x7f]
sbbb $0x7f,0x45
// CHECK: sbbb $127, 32493
// CHECK: encoding: [0x80,0x1d,0xed,0x7e,0x00,0x00,0x7f]
sbbb $0x7f,0x7eed
// CHECK: sbbb $127, 3133065982
// CHECK: encoding: [0x80,0x1d,0xfe,0xca,0xbe,0xba,0x7f]
sbbb $0x7f,0xbabecafe
// CHECK: sbbb $127, 305419896
// CHECK: encoding: [0x80,0x1d,0x78,0x56,0x34,0x12,0x7f]
sbbb $0x7f,0x12345678
// CHECK: sbbw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0x9c,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
sbbw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sbbw $31438, 69
// CHECK: encoding: [0x66,0x81,0x1d,0x45,0x00,0x00,0x00,0xce,0x7a]
sbbw $0x7ace,0x45
// CHECK: sbbw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x1d,0xed,0x7e,0x00,0x00,0xce,0x7a]
sbbw $0x7ace,0x7eed
// CHECK: sbbw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x1d,0xfe,0xca,0xbe,0xba,0xce,0x7a]
sbbw $0x7ace,0xbabecafe
// CHECK: sbbw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x1d,0x78,0x56,0x34,0x12,0xce,0x7a]
sbbw $0x7ace,0x12345678
// CHECK: sbbl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x9c,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
sbbl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sbbl $2063514302, 69
// CHECK: encoding: [0x81,0x1d,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
sbbl $0x7afebabe,0x45
// CHECK: sbbl $2063514302, 32493
// CHECK: encoding: [0x81,0x1d,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
sbbl $0x7afebabe,0x7eed
// CHECK: sbbl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x1d,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
sbbl $0x7afebabe,0xbabecafe
// CHECK: sbbl $2063514302, 305419896
// CHECK: encoding: [0x81,0x1d,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
sbbl $0x7afebabe,0x12345678
// CHECK: sbbl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x9c,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
sbbl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sbbl $324478056, 69
// CHECK: encoding: [0x81,0x1d,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
sbbl $0x13572468,0x45
// CHECK: sbbl $324478056, 32493
// CHECK: encoding: [0x81,0x1d,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
sbbl $0x13572468,0x7eed
// CHECK: sbbl $324478056, 3133065982
// CHECK: encoding: [0x81,0x1d,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
sbbl $0x13572468,0xbabecafe
// CHECK: sbbl $324478056, 305419896
// CHECK: encoding: [0x81,0x1d,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
sbbl $0x13572468,0x12345678
// CHECK: cmpb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xbc,0xcb,0xef,0xbe,0xad,0xde,0xfe]
cmpb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpb $254, 69
// CHECK: encoding: [0x80,0x3d,0x45,0x00,0x00,0x00,0xfe]
cmpb $0xfe,0x45
// CHECK: cmpb $254, 32493
// CHECK: encoding: [0x80,0x3d,0xed,0x7e,0x00,0x00,0xfe]
cmpb $0xfe,0x7eed
// CHECK: cmpb $254, 3133065982
// CHECK: encoding: [0x80,0x3d,0xfe,0xca,0xbe,0xba,0xfe]
cmpb $0xfe,0xbabecafe
// CHECK: cmpb $254, 305419896
// CHECK: encoding: [0x80,0x3d,0x78,0x56,0x34,0x12,0xfe]
cmpb $0xfe,0x12345678
// CHECK: cmpb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xbc,0xcb,0xef,0xbe,0xad,0xde,0x7f]
cmpb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpb $127, 69
// CHECK: encoding: [0x80,0x3d,0x45,0x00,0x00,0x00,0x7f]
cmpb $0x7f,0x45
// CHECK: cmpb $127, 32493
// CHECK: encoding: [0x80,0x3d,0xed,0x7e,0x00,0x00,0x7f]
cmpb $0x7f,0x7eed
// CHECK: cmpb $127, 3133065982
// CHECK: encoding: [0x80,0x3d,0xfe,0xca,0xbe,0xba,0x7f]
cmpb $0x7f,0xbabecafe
// CHECK: cmpb $127, 305419896
// CHECK: encoding: [0x80,0x3d,0x78,0x56,0x34,0x12,0x7f]
cmpb $0x7f,0x12345678
// CHECK: cmpw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0xbc,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
cmpw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpw $31438, 69
// CHECK: encoding: [0x66,0x81,0x3d,0x45,0x00,0x00,0x00,0xce,0x7a]
cmpw $0x7ace,0x45
// CHECK: cmpw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x3d,0xed,0x7e,0x00,0x00,0xce,0x7a]
cmpw $0x7ace,0x7eed
// CHECK: cmpw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x3d,0xfe,0xca,0xbe,0xba,0xce,0x7a]
cmpw $0x7ace,0xbabecafe
// CHECK: cmpw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x3d,0x78,0x56,0x34,0x12,0xce,0x7a]
cmpw $0x7ace,0x12345678
// CHECK: cmpl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xbc,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
cmpl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpl $2063514302, 69
// CHECK: encoding: [0x81,0x3d,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
cmpl $0x7afebabe,0x45
// CHECK: cmpl $2063514302, 32493
// CHECK: encoding: [0x81,0x3d,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
cmpl $0x7afebabe,0x7eed
// CHECK: cmpl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x3d,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
cmpl $0x7afebabe,0xbabecafe
// CHECK: cmpl $2063514302, 305419896
// CHECK: encoding: [0x81,0x3d,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
cmpl $0x7afebabe,0x12345678
// CHECK: cmpl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xbc,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
cmpl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpl $324478056, 69
// CHECK: encoding: [0x81,0x3d,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
cmpl $0x13572468,0x45
// CHECK: cmpl $324478056, 32493
// CHECK: encoding: [0x81,0x3d,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
cmpl $0x13572468,0x7eed
// CHECK: cmpl $324478056, 3133065982
// CHECK: encoding: [0x81,0x3d,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
cmpl $0x13572468,0xbabecafe
// CHECK: cmpl $324478056, 305419896
// CHECK: encoding: [0x81,0x3d,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
cmpl $0x13572468,0x12345678
// CHECK: testb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf6,0x84,0xcb,0xef,0xbe,0xad,0xde,0x7f]
testb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: testb $127, 69
// CHECK: encoding: [0xf6,0x05,0x45,0x00,0x00,0x00,0x7f]
testb $0x7f,0x45
// CHECK: testb $127, 32493
// CHECK: encoding: [0xf6,0x05,0xed,0x7e,0x00,0x00,0x7f]
testb $0x7f,0x7eed
// CHECK: testb $127, 3133065982
// CHECK: encoding: [0xf6,0x05,0xfe,0xca,0xbe,0xba,0x7f]
testb $0x7f,0xbabecafe
// CHECK: testb $127, 305419896
// CHECK: encoding: [0xf6,0x05,0x78,0x56,0x34,0x12,0x7f]
testb $0x7f,0x12345678
// CHECK: testw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0xf7,0x84,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
testw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: testw $31438, 69
// CHECK: encoding: [0x66,0xf7,0x05,0x45,0x00,0x00,0x00,0xce,0x7a]
testw $0x7ace,0x45
// CHECK: testw $31438, 32493
// CHECK: encoding: [0x66,0xf7,0x05,0xed,0x7e,0x00,0x00,0xce,0x7a]
testw $0x7ace,0x7eed
// CHECK: testw $31438, 3133065982
// CHECK: encoding: [0x66,0xf7,0x05,0xfe,0xca,0xbe,0xba,0xce,0x7a]
testw $0x7ace,0xbabecafe
// CHECK: testw $31438, 305419896
// CHECK: encoding: [0x66,0xf7,0x05,0x78,0x56,0x34,0x12,0xce,0x7a]
testw $0x7ace,0x12345678
// CHECK: testl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0x84,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
testl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: testl $2063514302, 69
// CHECK: encoding: [0xf7,0x05,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
testl $0x7afebabe,0x45
// CHECK: testl $2063514302, 32493
// CHECK: encoding: [0xf7,0x05,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
testl $0x7afebabe,0x7eed
// CHECK: testl $2063514302, 3133065982
// CHECK: encoding: [0xf7,0x05,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
testl $0x7afebabe,0xbabecafe
// CHECK: testl $2063514302, 305419896
// CHECK: encoding: [0xf7,0x05,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
testl $0x7afebabe,0x12345678
// CHECK: testl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0x84,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
testl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: testl $324478056, 69
// CHECK: encoding: [0xf7,0x05,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
testl $0x13572468,0x45
// CHECK: testl $324478056, 32493
// CHECK: encoding: [0xf7,0x05,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
testl $0x13572468,0x7eed
// CHECK: testl $324478056, 3133065982
// CHECK: encoding: [0xf7,0x05,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
testl $0x13572468,0xbabecafe
// CHECK: testl $324478056, 305419896
// CHECK: encoding: [0xf7,0x05,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
testl $0x13572468,0x12345678
// CHECK: andb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xa4,0xcb,0xef,0xbe,0xad,0xde,0xfe]
andb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: andb $254, 69
// CHECK: encoding: [0x80,0x25,0x45,0x00,0x00,0x00,0xfe]
andb $0xfe,0x45
// CHECK: andb $254, 32493
// CHECK: encoding: [0x80,0x25,0xed,0x7e,0x00,0x00,0xfe]
andb $0xfe,0x7eed
// CHECK: andb $254, 3133065982
// CHECK: encoding: [0x80,0x25,0xfe,0xca,0xbe,0xba,0xfe]
andb $0xfe,0xbabecafe
// CHECK: andb $254, 305419896
// CHECK: encoding: [0x80,0x25,0x78,0x56,0x34,0x12,0xfe]
andb $0xfe,0x12345678
// CHECK: andb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x7f]
andb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: andb $127, 69
// CHECK: encoding: [0x80,0x25,0x45,0x00,0x00,0x00,0x7f]
andb $0x7f,0x45
// CHECK: andb $127, 32493
// CHECK: encoding: [0x80,0x25,0xed,0x7e,0x00,0x00,0x7f]
andb $0x7f,0x7eed
// CHECK: andb $127, 3133065982
// CHECK: encoding: [0x80,0x25,0xfe,0xca,0xbe,0xba,0x7f]
andb $0x7f,0xbabecafe
// CHECK: andb $127, 305419896
// CHECK: encoding: [0x80,0x25,0x78,0x56,0x34,0x12,0x7f]
andb $0x7f,0x12345678
// CHECK: andw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0xa4,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
andw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: andw $31438, 69
// CHECK: encoding: [0x66,0x81,0x25,0x45,0x00,0x00,0x00,0xce,0x7a]
andw $0x7ace,0x45
// CHECK: andw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x25,0xed,0x7e,0x00,0x00,0xce,0x7a]
andw $0x7ace,0x7eed
// CHECK: andw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x25,0xfe,0xca,0xbe,0xba,0xce,0x7a]
andw $0x7ace,0xbabecafe
// CHECK: andw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x25,0x78,0x56,0x34,0x12,0xce,0x7a]
andw $0x7ace,0x12345678
// CHECK: andl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xa4,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
andl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: andl $2063514302, 69
// CHECK: encoding: [0x81,0x25,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
andl $0x7afebabe,0x45
// CHECK: andl $2063514302, 32493
// CHECK: encoding: [0x81,0x25,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
andl $0x7afebabe,0x7eed
// CHECK: andl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x25,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
andl $0x7afebabe,0xbabecafe
// CHECK: andl $2063514302, 305419896
// CHECK: encoding: [0x81,0x25,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
andl $0x7afebabe,0x12345678
// CHECK: andl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
andl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: andl $324478056, 69
// CHECK: encoding: [0x81,0x25,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
andl $0x13572468,0x45
// CHECK: andl $324478056, 32493
// CHECK: encoding: [0x81,0x25,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
andl $0x13572468,0x7eed
// CHECK: andl $324478056, 3133065982
// CHECK: encoding: [0x81,0x25,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
andl $0x13572468,0xbabecafe
// CHECK: andl $324478056, 305419896
// CHECK: encoding: [0x81,0x25,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
andl $0x13572468,0x12345678
// CHECK: orb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x8c,0xcb,0xef,0xbe,0xad,0xde,0xfe]
orb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: orb $254, 69
// CHECK: encoding: [0x80,0x0d,0x45,0x00,0x00,0x00,0xfe]
orb $0xfe,0x45
// CHECK: orb $254, 32493
// CHECK: encoding: [0x80,0x0d,0xed,0x7e,0x00,0x00,0xfe]
orb $0xfe,0x7eed
// CHECK: orb $254, 3133065982
// CHECK: encoding: [0x80,0x0d,0xfe,0xca,0xbe,0xba,0xfe]
orb $0xfe,0xbabecafe
// CHECK: orb $254, 305419896
// CHECK: encoding: [0x80,0x0d,0x78,0x56,0x34,0x12,0xfe]
orb $0xfe,0x12345678
// CHECK: orb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x8c,0xcb,0xef,0xbe,0xad,0xde,0x7f]
orb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: orb $127, 69
// CHECK: encoding: [0x80,0x0d,0x45,0x00,0x00,0x00,0x7f]
orb $0x7f,0x45
// CHECK: orb $127, 32493
// CHECK: encoding: [0x80,0x0d,0xed,0x7e,0x00,0x00,0x7f]
orb $0x7f,0x7eed
// CHECK: orb $127, 3133065982
// CHECK: encoding: [0x80,0x0d,0xfe,0xca,0xbe,0xba,0x7f]
orb $0x7f,0xbabecafe
// CHECK: orb $127, 305419896
// CHECK: encoding: [0x80,0x0d,0x78,0x56,0x34,0x12,0x7f]
orb $0x7f,0x12345678
// CHECK: orw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0x8c,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
orw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: orw $31438, 69
// CHECK: encoding: [0x66,0x81,0x0d,0x45,0x00,0x00,0x00,0xce,0x7a]
orw $0x7ace,0x45
// CHECK: orw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x0d,0xed,0x7e,0x00,0x00,0xce,0x7a]
orw $0x7ace,0x7eed
// CHECK: orw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x0d,0xfe,0xca,0xbe,0xba,0xce,0x7a]
orw $0x7ace,0xbabecafe
// CHECK: orw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x0d,0x78,0x56,0x34,0x12,0xce,0x7a]
orw $0x7ace,0x12345678
// CHECK: orl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x8c,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
orl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: orl $2063514302, 69
// CHECK: encoding: [0x81,0x0d,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
orl $0x7afebabe,0x45
// CHECK: orl $2063514302, 32493
// CHECK: encoding: [0x81,0x0d,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
orl $0x7afebabe,0x7eed
// CHECK: orl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x0d,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
orl $0x7afebabe,0xbabecafe
// CHECK: orl $2063514302, 305419896
// CHECK: encoding: [0x81,0x0d,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
orl $0x7afebabe,0x12345678
// CHECK: orl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x8c,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
orl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: orl $324478056, 69
// CHECK: encoding: [0x81,0x0d,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
orl $0x13572468,0x45
// CHECK: orl $324478056, 32493
// CHECK: encoding: [0x81,0x0d,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
orl $0x13572468,0x7eed
// CHECK: orl $324478056, 3133065982
// CHECK: encoding: [0x81,0x0d,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
orl $0x13572468,0xbabecafe
// CHECK: orl $324478056, 305419896
// CHECK: encoding: [0x81,0x0d,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
orl $0x13572468,0x12345678
// CHECK: xorb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xb4,0xcb,0xef,0xbe,0xad,0xde,0xfe]
xorb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: xorb $254, 69
// CHECK: encoding: [0x80,0x35,0x45,0x00,0x00,0x00,0xfe]
xorb $0xfe,0x45
// CHECK: xorb $254, 32493
// CHECK: encoding: [0x80,0x35,0xed,0x7e,0x00,0x00,0xfe]
xorb $0xfe,0x7eed
// CHECK: xorb $254, 3133065982
// CHECK: encoding: [0x80,0x35,0xfe,0xca,0xbe,0xba,0xfe]
xorb $0xfe,0xbabecafe
// CHECK: xorb $254, 305419896
// CHECK: encoding: [0x80,0x35,0x78,0x56,0x34,0x12,0xfe]
xorb $0xfe,0x12345678
// CHECK: xorb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0xb4,0xcb,0xef,0xbe,0xad,0xde,0x7f]
xorb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: xorb $127, 69
// CHECK: encoding: [0x80,0x35,0x45,0x00,0x00,0x00,0x7f]
xorb $0x7f,0x45
// CHECK: xorb $127, 32493
// CHECK: encoding: [0x80,0x35,0xed,0x7e,0x00,0x00,0x7f]
xorb $0x7f,0x7eed
// CHECK: xorb $127, 3133065982
// CHECK: encoding: [0x80,0x35,0xfe,0xca,0xbe,0xba,0x7f]
xorb $0x7f,0xbabecafe
// CHECK: xorb $127, 305419896
// CHECK: encoding: [0x80,0x35,0x78,0x56,0x34,0x12,0x7f]
xorb $0x7f,0x12345678
// CHECK: xorw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0xb4,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
xorw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: xorw $31438, 69
// CHECK: encoding: [0x66,0x81,0x35,0x45,0x00,0x00,0x00,0xce,0x7a]
xorw $0x7ace,0x45
// CHECK: xorw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x35,0xed,0x7e,0x00,0x00,0xce,0x7a]
xorw $0x7ace,0x7eed
// CHECK: xorw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x35,0xfe,0xca,0xbe,0xba,0xce,0x7a]
xorw $0x7ace,0xbabecafe
// CHECK: xorw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x35,0x78,0x56,0x34,0x12,0xce,0x7a]
xorw $0x7ace,0x12345678
// CHECK: xorl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xb4,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
xorl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: xorl $2063514302, 69
// CHECK: encoding: [0x81,0x35,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
xorl $0x7afebabe,0x45
// CHECK: xorl $2063514302, 32493
// CHECK: encoding: [0x81,0x35,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
xorl $0x7afebabe,0x7eed
// CHECK: xorl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x35,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
xorl $0x7afebabe,0xbabecafe
// CHECK: xorl $2063514302, 305419896
// CHECK: encoding: [0x81,0x35,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
xorl $0x7afebabe,0x12345678
// CHECK: xorl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0xb4,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
xorl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: xorl $324478056, 69
// CHECK: encoding: [0x81,0x35,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
xorl $0x13572468,0x45
// CHECK: xorl $324478056, 32493
// CHECK: encoding: [0x81,0x35,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
xorl $0x13572468,0x7eed
// CHECK: xorl $324478056, 3133065982
// CHECK: encoding: [0x81,0x35,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
xorl $0x13572468,0xbabecafe
// CHECK: xorl $324478056, 305419896
// CHECK: encoding: [0x81,0x35,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
xorl $0x13572468,0x12345678
// CHECK: adcb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x94,0xcb,0xef,0xbe,0xad,0xde,0xfe]
adcb $0xfe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: adcb $254, 69
// CHECK: encoding: [0x80,0x15,0x45,0x00,0x00,0x00,0xfe]
adcb $0xfe,0x45
// CHECK: adcb $254, 32493
// CHECK: encoding: [0x80,0x15,0xed,0x7e,0x00,0x00,0xfe]
adcb $0xfe,0x7eed
// CHECK: adcb $254, 3133065982
// CHECK: encoding: [0x80,0x15,0xfe,0xca,0xbe,0xba,0xfe]
adcb $0xfe,0xbabecafe
// CHECK: adcb $254, 305419896
// CHECK: encoding: [0x80,0x15,0x78,0x56,0x34,0x12,0xfe]
adcb $0xfe,0x12345678
// CHECK: adcb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x94,0xcb,0xef,0xbe,0xad,0xde,0x7f]
adcb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: adcb $127, 69
// CHECK: encoding: [0x80,0x15,0x45,0x00,0x00,0x00,0x7f]
adcb $0x7f,0x45
// CHECK: adcb $127, 32493
// CHECK: encoding: [0x80,0x15,0xed,0x7e,0x00,0x00,0x7f]
adcb $0x7f,0x7eed
// CHECK: adcb $127, 3133065982
// CHECK: encoding: [0x80,0x15,0xfe,0xca,0xbe,0xba,0x7f]
adcb $0x7f,0xbabecafe
// CHECK: adcb $127, 305419896
// CHECK: encoding: [0x80,0x15,0x78,0x56,0x34,0x12,0x7f]
adcb $0x7f,0x12345678
// CHECK: adcw $31438, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x81,0x94,0xcb,0xef,0xbe,0xad,0xde,0xce,0x7a]
adcw $0x7ace,0xdeadbeef(%ebx,%ecx,8)
// CHECK: adcw $31438, 69
// CHECK: encoding: [0x66,0x81,0x15,0x45,0x00,0x00,0x00,0xce,0x7a]
adcw $0x7ace,0x45
// CHECK: adcw $31438, 32493
// CHECK: encoding: [0x66,0x81,0x15,0xed,0x7e,0x00,0x00,0xce,0x7a]
adcw $0x7ace,0x7eed
// CHECK: adcw $31438, 3133065982
// CHECK: encoding: [0x66,0x81,0x15,0xfe,0xca,0xbe,0xba,0xce,0x7a]
adcw $0x7ace,0xbabecafe
// CHECK: adcw $31438, 305419896
// CHECK: encoding: [0x66,0x81,0x15,0x78,0x56,0x34,0x12,0xce,0x7a]
adcw $0x7ace,0x12345678
// CHECK: adcl $2063514302, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x94,0xcb,0xef,0xbe,0xad,0xde,0xbe,0xba,0xfe,0x7a]
adcl $0x7afebabe,0xdeadbeef(%ebx,%ecx,8)
// CHECK: adcl $2063514302, 69
// CHECK: encoding: [0x81,0x15,0x45,0x00,0x00,0x00,0xbe,0xba,0xfe,0x7a]
adcl $0x7afebabe,0x45
// CHECK: adcl $2063514302, 32493
// CHECK: encoding: [0x81,0x15,0xed,0x7e,0x00,0x00,0xbe,0xba,0xfe,0x7a]
adcl $0x7afebabe,0x7eed
// CHECK: adcl $2063514302, 3133065982
// CHECK: encoding: [0x81,0x15,0xfe,0xca,0xbe,0xba,0xbe,0xba,0xfe,0x7a]
adcl $0x7afebabe,0xbabecafe
// CHECK: adcl $2063514302, 305419896
// CHECK: encoding: [0x81,0x15,0x78,0x56,0x34,0x12,0xbe,0xba,0xfe,0x7a]
adcl $0x7afebabe,0x12345678
// CHECK: adcl $324478056, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x81,0x94,0xcb,0xef,0xbe,0xad,0xde,0x68,0x24,0x57,0x13]
adcl $0x13572468,0xdeadbeef(%ebx,%ecx,8)
// CHECK: adcl $324478056, 69
// CHECK: encoding: [0x81,0x15,0x45,0x00,0x00,0x00,0x68,0x24,0x57,0x13]
adcl $0x13572468,0x45
// CHECK: adcl $324478056, 32493
// CHECK: encoding: [0x81,0x15,0xed,0x7e,0x00,0x00,0x68,0x24,0x57,0x13]
adcl $0x13572468,0x7eed
// CHECK: adcl $324478056, 3133065982
// CHECK: encoding: [0x81,0x15,0xfe,0xca,0xbe,0xba,0x68,0x24,0x57,0x13]
adcl $0x13572468,0xbabecafe
// CHECK: adcl $324478056, 305419896
// CHECK: encoding: [0x81,0x15,0x78,0x56,0x34,0x12,0x68,0x24,0x57,0x13]
adcl $0x13572468,0x12345678
// CHECK: negl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0x9c,0xcb,0xef,0xbe,0xad,0xde]
negl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: negw 32493
// CHECK: encoding: [0x66,0xf7,0x1d,0xed,0x7e,0x00,0x00]
negw 0x7eed
// CHECK: negl 3133065982
// CHECK: encoding: [0xf7,0x1d,0xfe,0xca,0xbe,0xba]
negl 0xbabecafe
// CHECK: negl 305419896
// CHECK: encoding: [0xf7,0x1d,0x78,0x56,0x34,0x12]
negl 0x12345678
// CHECK: notl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0x94,0xcb,0xef,0xbe,0xad,0xde]
notl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: notw 32493
// CHECK: encoding: [0x66,0xf7,0x15,0xed,0x7e,0x00,0x00]
notw 0x7eed
// CHECK: notl 3133065982
// CHECK: encoding: [0xf7,0x15,0xfe,0xca,0xbe,0xba]
notl 0xbabecafe
// CHECK: notl 305419896
// CHECK: encoding: [0xf7,0x15,0x78,0x56,0x34,0x12]
notl 0x12345678
// CHECK: cbtw
// CHECK: encoding: [0x66,0x98]
cbtw
// CHECK: cwtl
// CHECK: encoding: [0x98]
cwtl
// CHECK: cwtd
// CHECK: encoding: [0x66,0x99]
cwtd
// CHECK: cltd
// CHECK: encoding: [0x99]
cltd
// CHECK: mull 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0xa4,0xcb,0xef,0xbe,0xad,0xde]
mull 0xdeadbeef(%ebx,%ecx,8)
// CHECK: mulw 32493
// CHECK: encoding: [0x66,0xf7,0x25,0xed,0x7e,0x00,0x00]
mulw 0x7eed
// CHECK: mull 3133065982
// CHECK: encoding: [0xf7,0x25,0xfe,0xca,0xbe,0xba]
mull 0xbabecafe
// CHECK: mull 305419896
// CHECK: encoding: [0xf7,0x25,0x78,0x56,0x34,0x12]
mull 0x12345678
// CHECK: imull 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0xac,0xcb,0xef,0xbe,0xad,0xde]
imull 0xdeadbeef(%ebx,%ecx,8)
// CHECK: imulw 32493
// CHECK: encoding: [0x66,0xf7,0x2d,0xed,0x7e,0x00,0x00]
imulw 0x7eed
// CHECK: imull 3133065982
// CHECK: encoding: [0xf7,0x2d,0xfe,0xca,0xbe,0xba]
imull 0xbabecafe
// CHECK: imull 305419896
// CHECK: encoding: [0xf7,0x2d,0x78,0x56,0x34,0x12]
imull 0x12345678
// CHECK: divl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0xb4,0xcb,0xef,0xbe,0xad,0xde]
divl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: divw 32493
// CHECK: encoding: [0x66,0xf7,0x35,0xed,0x7e,0x00,0x00]
divw 0x7eed
// CHECK: divl 3133065982
// CHECK: encoding: [0xf7,0x35,0xfe,0xca,0xbe,0xba]
divl 0xbabecafe
// CHECK: divl 305419896
// CHECK: encoding: [0xf7,0x35,0x78,0x56,0x34,0x12]
divl 0x12345678
// CHECK: idivl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf7,0xbc,0xcb,0xef,0xbe,0xad,0xde]
idivl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: idivw 32493
// CHECK: encoding: [0x66,0xf7,0x3d,0xed,0x7e,0x00,0x00]
idivw 0x7eed
// CHECK: idivl 3133065982
// CHECK: encoding: [0xf7,0x3d,0xfe,0xca,0xbe,0xba]
idivl 0xbabecafe
// CHECK: idivl 305419896
// CHECK: encoding: [0xf7,0x3d,0x78,0x56,0x34,0x12]
idivl 0x12345678
// CHECK: roll $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0x84,0xcb,0xef,0xbe,0xad,0xde,0x00]
roll $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: roll $0, 69
// CHECK: encoding: [0xc1,0x05,0x45,0x00,0x00,0x00,0x00]
roll $0,0x45
// CHECK: roll $0, 32493
// CHECK: encoding: [0xc1,0x05,0xed,0x7e,0x00,0x00,0x00]
roll $0,0x7eed
// CHECK: roll $0, 3133065982
// CHECK: encoding: [0xc1,0x05,0xfe,0xca,0xbe,0xba,0x00]
roll $0,0xbabecafe
// CHECK: roll $0, 305419896
// CHECK: encoding: [0xc1,0x05,0x78,0x56,0x34,0x12,0x00]
roll $0,0x12345678
// CHECK: rolb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0x84,0xcb,0xef,0xbe,0xad,0xde,0x7f]
rolb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rolb $127, 69
// CHECK: encoding: [0xc0,0x05,0x45,0x00,0x00,0x00,0x7f]
rolb $0x7f,0x45
// CHECK: rolb $127, 32493
// CHECK: encoding: [0xc0,0x05,0xed,0x7e,0x00,0x00,0x7f]
rolb $0x7f,0x7eed
// CHECK: rolb $127, 3133065982
// CHECK: encoding: [0xc0,0x05,0xfe,0xca,0xbe,0xba,0x7f]
rolb $0x7f,0xbabecafe
// CHECK: rolb $127, 305419896
// CHECK: encoding: [0xc0,0x05,0x78,0x56,0x34,0x12,0x7f]
rolb $0x7f,0x12345678
// CHECK: roll 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0x84,0xcb,0xef,0xbe,0xad,0xde]
roll 0xdeadbeef(%ebx,%ecx,8)
// CHECK: rolw 32493
// CHECK: encoding: [0x66,0xd1,0x05,0xed,0x7e,0x00,0x00]
rolw 0x7eed
// CHECK: roll 3133065982
// CHECK: encoding: [0xd1,0x05,0xfe,0xca,0xbe,0xba]
roll 0xbabecafe
// CHECK: roll 305419896
// CHECK: encoding: [0xd1,0x05,0x78,0x56,0x34,0x12]
roll 0x12345678
// CHECK: rorl $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0x8c,0xcb,0xef,0xbe,0xad,0xde,0x00]
rorl $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rorl $0, 69
// CHECK: encoding: [0xc1,0x0d,0x45,0x00,0x00,0x00,0x00]
rorl $0,0x45
// CHECK: rorl $0, 32493
// CHECK: encoding: [0xc1,0x0d,0xed,0x7e,0x00,0x00,0x00]
rorl $0,0x7eed
// CHECK: rorl $0, 3133065982
// CHECK: encoding: [0xc1,0x0d,0xfe,0xca,0xbe,0xba,0x00]
rorl $0,0xbabecafe
// CHECK: rorl $0, 305419896
// CHECK: encoding: [0xc1,0x0d,0x78,0x56,0x34,0x12,0x00]
rorl $0,0x12345678
// CHECK: rorb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0x8c,0xcb,0xef,0xbe,0xad,0xde,0x7f]
rorb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rorb $127, 69
// CHECK: encoding: [0xc0,0x0d,0x45,0x00,0x00,0x00,0x7f]
rorb $0x7f,0x45
// CHECK: rorb $127, 32493
// CHECK: encoding: [0xc0,0x0d,0xed,0x7e,0x00,0x00,0x7f]
rorb $0x7f,0x7eed
// CHECK: rorb $127, 3133065982
// CHECK: encoding: [0xc0,0x0d,0xfe,0xca,0xbe,0xba,0x7f]
rorb $0x7f,0xbabecafe
// CHECK: rorb $127, 305419896
// CHECK: encoding: [0xc0,0x0d,0x78,0x56,0x34,0x12,0x7f]
rorb $0x7f,0x12345678
// CHECK: rorl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0x8c,0xcb,0xef,0xbe,0xad,0xde]
rorl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: rorw 32493
// CHECK: encoding: [0x66,0xd1,0x0d,0xed,0x7e,0x00,0x00]
rorw 0x7eed
// CHECK: rorl 3133065982
// CHECK: encoding: [0xd1,0x0d,0xfe,0xca,0xbe,0xba]
rorl 0xbabecafe
// CHECK: rorl 305419896
// CHECK: encoding: [0xd1,0x0d,0x78,0x56,0x34,0x12]
rorl 0x12345678
// CHECK: rorl $foo, (%ebx)
// INTEL: ror dword ptr [ebx], foo
// CHECK: encoding: [0xc1,0x0b,A]
// CHECK: fixup A - offset: 2, value: foo, kind: FK_Data_1
rorl $foo, (%ebx)
// CHECK: shll $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x00]
sall $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shll $0, 69
// CHECK: encoding: [0xc1,0x25,0x45,0x00,0x00,0x00,0x00]
sall $0,0x45
// CHECK: shll $0, 32493
// CHECK: encoding: [0xc1,0x25,0xed,0x7e,0x00,0x00,0x00]
sall $0,0x7eed
// CHECK: shll $0, 3133065982
// CHECK: encoding: [0xc1,0x25,0xfe,0xca,0xbe,0xba,0x00]
sall $0,0xbabecafe
// CHECK: shll $0, 305419896
// CHECK: encoding: [0xc1,0x25,0x78,0x56,0x34,0x12,0x00]
sall $0,0x12345678
// CHECK: shlb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x7f]
salb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shlb $127, 69
// CHECK: encoding: [0xc0,0x25,0x45,0x00,0x00,0x00,0x7f]
salb $0x7f,0x45
// CHECK: shlb $127, 32493
// CHECK: encoding: [0xc0,0x25,0xed,0x7e,0x00,0x00,0x7f]
salb $0x7f,0x7eed
// CHECK: shlb $127, 3133065982
// CHECK: encoding: [0xc0,0x25,0xfe,0xca,0xbe,0xba,0x7f]
salb $0x7f,0xbabecafe
// CHECK: shlb $127, 305419896
// CHECK: encoding: [0xc0,0x25,0x78,0x56,0x34,0x12,0x7f]
salb $0x7f,0x12345678
// CHECK: shll 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0xa4,0xcb,0xef,0xbe,0xad,0xde]
sall 0xdeadbeef(%ebx,%ecx,8)
// CHECK: shlw 32493
// CHECK: encoding: [0x66,0xd1,0x25,0xed,0x7e,0x00,0x00]
salw 0x7eed
// CHECK: shll 3133065982
// CHECK: encoding: [0xd1,0x25,0xfe,0xca,0xbe,0xba]
sall 0xbabecafe
// CHECK: shll 305419896
// CHECK: encoding: [0xd1,0x25,0x78,0x56,0x34,0x12]
sall 0x12345678
// CHECK: shll $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x00]
shll $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shll $0, 69
// CHECK: encoding: [0xc1,0x25,0x45,0x00,0x00,0x00,0x00]
shll $0,0x45
// CHECK: shll $0, 32493
// CHECK: encoding: [0xc1,0x25,0xed,0x7e,0x00,0x00,0x00]
shll $0,0x7eed
// CHECK: shll $0, 3133065982
// CHECK: encoding: [0xc1,0x25,0xfe,0xca,0xbe,0xba,0x00]
shll $0,0xbabecafe
// CHECK: shll $0, 305419896
// CHECK: encoding: [0xc1,0x25,0x78,0x56,0x34,0x12,0x00]
shll $0,0x12345678
// CHECK: shlb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0xa4,0xcb,0xef,0xbe,0xad,0xde,0x7f]
shlb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shlb $127, 69
// CHECK: encoding: [0xc0,0x25,0x45,0x00,0x00,0x00,0x7f]
shlb $0x7f,0x45
// CHECK: shlb $127, 32493
// CHECK: encoding: [0xc0,0x25,0xed,0x7e,0x00,0x00,0x7f]
shlb $0x7f,0x7eed
// CHECK: shlb $127, 3133065982
// CHECK: encoding: [0xc0,0x25,0xfe,0xca,0xbe,0xba,0x7f]
shlb $0x7f,0xbabecafe
// CHECK: shlb $127, 305419896
// CHECK: encoding: [0xc0,0x25,0x78,0x56,0x34,0x12,0x7f]
shlb $0x7f,0x12345678
// CHECK: shll 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0xa4,0xcb,0xef,0xbe,0xad,0xde]
shll 0xdeadbeef(%ebx,%ecx,8)
// CHECK: shlw 32493
// CHECK: encoding: [0x66,0xd1,0x25,0xed,0x7e,0x00,0x00]
shlw 0x7eed
// CHECK: shll 3133065982
// CHECK: encoding: [0xd1,0x25,0xfe,0xca,0xbe,0xba]
shll 0xbabecafe
// CHECK: shll 305419896
// CHECK: encoding: [0xd1,0x25,0x78,0x56,0x34,0x12]
shll 0x12345678
// CHECK: shrl $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0xac,0xcb,0xef,0xbe,0xad,0xde,0x00]
shrl $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shrl $0, 69
// CHECK: encoding: [0xc1,0x2d,0x45,0x00,0x00,0x00,0x00]
shrl $0,0x45
// CHECK: shrl $0, 32493
// CHECK: encoding: [0xc1,0x2d,0xed,0x7e,0x00,0x00,0x00]
shrl $0,0x7eed
// CHECK: shrl $0, 3133065982
// CHECK: encoding: [0xc1,0x2d,0xfe,0xca,0xbe,0xba,0x00]
shrl $0,0xbabecafe
// CHECK: shrl $0, 305419896
// CHECK: encoding: [0xc1,0x2d,0x78,0x56,0x34,0x12,0x00]
shrl $0,0x12345678
// CHECK: shrb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0xac,0xcb,0xef,0xbe,0xad,0xde,0x7f]
shrb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: shrb $127, 69
// CHECK: encoding: [0xc0,0x2d,0x45,0x00,0x00,0x00,0x7f]
shrb $0x7f,0x45
// CHECK: shrb $127, 32493
// CHECK: encoding: [0xc0,0x2d,0xed,0x7e,0x00,0x00,0x7f]
shrb $0x7f,0x7eed
// CHECK: shrb $127, 3133065982
// CHECK: encoding: [0xc0,0x2d,0xfe,0xca,0xbe,0xba,0x7f]
shrb $0x7f,0xbabecafe
// CHECK: shrb $127, 305419896
// CHECK: encoding: [0xc0,0x2d,0x78,0x56,0x34,0x12,0x7f]
shrb $0x7f,0x12345678
// CHECK: shrl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0xac,0xcb,0xef,0xbe,0xad,0xde]
shrl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: shrw 32493
// CHECK: encoding: [0x66,0xd1,0x2d,0xed,0x7e,0x00,0x00]
shrw 0x7eed
// CHECK: shrl 3133065982
// CHECK: encoding: [0xd1,0x2d,0xfe,0xca,0xbe,0xba]
shrl 0xbabecafe
// CHECK: shrl 305419896
// CHECK: encoding: [0xd1,0x2d,0x78,0x56,0x34,0x12]
shrl 0x12345678
// CHECK: sarl $0, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc1,0xbc,0xcb,0xef,0xbe,0xad,0xde,0x00]
sarl $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sarl $0, 69
// CHECK: encoding: [0xc1,0x3d,0x45,0x00,0x00,0x00,0x00]
sarl $0,0x45
// CHECK: sarl $0, 32493
// CHECK: encoding: [0xc1,0x3d,0xed,0x7e,0x00,0x00,0x00]
sarl $0,0x7eed
// CHECK: sarl $0, 3133065982
// CHECK: encoding: [0xc1,0x3d,0xfe,0xca,0xbe,0xba,0x00]
sarl $0,0xbabecafe
// CHECK: sarl $0, 305419896
// CHECK: encoding: [0xc1,0x3d,0x78,0x56,0x34,0x12,0x00]
sarl $0,0x12345678
// CHECK: sarb $127, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xc0,0xbc,0xcb,0xef,0xbe,0xad,0xde,0x7f]
sarb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: sarb $127, 69
// CHECK: encoding: [0xc0,0x3d,0x45,0x00,0x00,0x00,0x7f]
sarb $0x7f,0x45
// CHECK: sarb $127, 32493
// CHECK: encoding: [0xc0,0x3d,0xed,0x7e,0x00,0x00,0x7f]
sarb $0x7f,0x7eed
// CHECK: sarb $127, 3133065982
// CHECK: encoding: [0xc0,0x3d,0xfe,0xca,0xbe,0xba,0x7f]
sarb $0x7f,0xbabecafe
// CHECK: sarb $127, 305419896
// CHECK: encoding: [0xc0,0x3d,0x78,0x56,0x34,0x12,0x7f]
sarb $0x7f,0x12345678
// CHECK: sarl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd1,0xbc,0xcb,0xef,0xbe,0xad,0xde]
sarl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: sarw 32493
// CHECK: encoding: [0x66,0xd1,0x3d,0xed,0x7e,0x00,0x00]
sarw 0x7eed
// CHECK: sarl 3133065982
// CHECK: encoding: [0xd1,0x3d,0xfe,0xca,0xbe,0xba]
sarl 0xbabecafe
// CHECK: sarl 305419896
// CHECK: encoding: [0xd1,0x3d,0x78,0x56,0x34,0x12]
sarl 0x12345678
// CHECK: calll *%ecx
// CHECK: encoding: [0xff,0xd1]
call *%ecx
// CHECK: notrack calll *%ecx
// CHECK: encoding: [0x3e,0xff,0xd1]
notrack call *%ecx
// CHECK: calll *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0x94,0xcb,0xef,0xbe,0xad,0xde]
call *0xdeadbeef(%ebx,%ecx,8)
// CHECK: notrack calll *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x3e,0xff,0x94,0xcb,0xef,0xbe,0xad,0xde]
notrack call *0xdeadbeef(%ebx,%ecx,8)
// CHECK: calll *3135175374
// CHECK: encoding: [0xff,0x15,0xce,0xfa,0xde,0xba]
call *0xbadeface
// CHECK: calll *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0x94,0xcb,0xef,0xbe,0xad,0xde]
call *0xdeadbeef(%ebx,%ecx,8)
// CHECK: calll *3135175374
// CHECK: encoding: [0xff,0x15,0xce,0xfa,0xde,0xba]
call *0xbadeface
// CHECK: lcallw *32493
// CHECK: encoding: [0x66,0xff,0x1d,0xed,0x7e,0x00,0x00]
lcallw *0x7eed
// CHECK: jmpl *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0xa4,0xcb,0xef,0xbe,0xad,0xde]
jmp *0xdeadbeef(%ebx,%ecx,8)
// CHECK: jmpl *3135175374
// CHECK: encoding: [0xff,0x25,0xce,0xfa,0xde,0xba]
jmp *0xbadeface
// CHECK: jmpl *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0xa4,0xcb,0xef,0xbe,0xad,0xde]
jmp *0xdeadbeef(%ebx,%ecx,8)
// CHECK: notrack jmpl *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x3e,0xff,0xa4,0xcb,0xef,0xbe,0xad,0xde]
notrack jmp *0xdeadbeef(%ebx,%ecx,8)
// CHECK: jmpl *3135175374
// CHECK: encoding: [0xff,0x25,0xce,0xfa,0xde,0xba]
jmp *0xbadeface
// CHECK: ljmpl *3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xff,0xac,0xcb,0xef,0xbe,0xad,0xde]
ljmpl *0xdeadbeef(%ebx,%ecx,8)
// CHECK: ljmpw *32493
// CHECK: encoding: [0x66,0xff,0x2d,0xed,0x7e,0x00,0x00]
ljmpw *0x7eed
// CHECK: ljmpl *3133065982
// CHECK: encoding: [0xff,0x2d,0xfe,0xca,0xbe,0xba]
ljmpl *0xbabecafe
// CHECK: ljmpl *305419896
// CHECK: encoding: [0xff,0x2d,0x78,0x56,0x34,0x12]
ljmpl *0x12345678
// CHECK: ret
// CHECK: encoding: [0xc3]
ret
// CHECK: lret
// CHECK: encoding: [0xcb]
lret
// CHECK: leave
// CHECK: encoding: [0xc9]
leave
// CHECK: leave
// CHECK: encoding: [0xc9]
leavel
// CHECK: seto %bl
// CHECK: encoding: [0x0f,0x90,0xc3]
seto %bl
// CHECK: seto 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x90,0x84,0xcb,0xef,0xbe,0xad,0xde]
seto 0xdeadbeef(%ebx,%ecx,8)
// CHECK: seto 32493
// CHECK: encoding: [0x0f,0x90,0x05,0xed,0x7e,0x00,0x00]
seto 0x7eed
// CHECK: seto 3133065982
// CHECK: encoding: [0x0f,0x90,0x05,0xfe,0xca,0xbe,0xba]
seto 0xbabecafe
// CHECK: seto 305419896
// CHECK: encoding: [0x0f,0x90,0x05,0x78,0x56,0x34,0x12]
seto 0x12345678
// CHECK: setno %bl
// CHECK: encoding: [0x0f,0x91,0xc3]
setno %bl
// CHECK: setno 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x91,0x84,0xcb,0xef,0xbe,0xad,0xde]
setno 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setno 32493
// CHECK: encoding: [0x0f,0x91,0x05,0xed,0x7e,0x00,0x00]
setno 0x7eed
// CHECK: setno 3133065982
// CHECK: encoding: [0x0f,0x91,0x05,0xfe,0xca,0xbe,0xba]
setno 0xbabecafe
// CHECK: setno 305419896
// CHECK: encoding: [0x0f,0x91,0x05,0x78,0x56,0x34,0x12]
setno 0x12345678
// CHECK: setb %bl
// CHECK: encoding: [0x0f,0x92,0xc3]
setb %bl
// CHECK: setb 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x92,0x84,0xcb,0xef,0xbe,0xad,0xde]
setb 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setb 32493
// CHECK: encoding: [0x0f,0x92,0x05,0xed,0x7e,0x00,0x00]
setb 0x7eed
// CHECK: setb 3133065982
// CHECK: encoding: [0x0f,0x92,0x05,0xfe,0xca,0xbe,0xba]
setb 0xbabecafe
// CHECK: setb 305419896
// CHECK: encoding: [0x0f,0x92,0x05,0x78,0x56,0x34,0x12]
setb 0x12345678
// CHECK: setae %bl
// CHECK: encoding: [0x0f,0x93,0xc3]
setae %bl
// CHECK: setae 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x93,0x84,0xcb,0xef,0xbe,0xad,0xde]
setae 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setae 32493
// CHECK: encoding: [0x0f,0x93,0x05,0xed,0x7e,0x00,0x00]
setae 0x7eed
// CHECK: setae 3133065982
// CHECK: encoding: [0x0f,0x93,0x05,0xfe,0xca,0xbe,0xba]
setae 0xbabecafe
// CHECK: setae 305419896
// CHECK: encoding: [0x0f,0x93,0x05,0x78,0x56,0x34,0x12]
setae 0x12345678
// CHECK: sete %bl
// CHECK: encoding: [0x0f,0x94,0xc3]
sete %bl
// CHECK: sete 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x94,0x84,0xcb,0xef,0xbe,0xad,0xde]
sete 0xdeadbeef(%ebx,%ecx,8)
// CHECK: sete 32493
// CHECK: encoding: [0x0f,0x94,0x05,0xed,0x7e,0x00,0x00]
sete 0x7eed
// CHECK: sete 3133065982
// CHECK: encoding: [0x0f,0x94,0x05,0xfe,0xca,0xbe,0xba]
sete 0xbabecafe
// CHECK: sete 305419896
// CHECK: encoding: [0x0f,0x94,0x05,0x78,0x56,0x34,0x12]
sete 0x12345678
// CHECK: setne %bl
// CHECK: encoding: [0x0f,0x95,0xc3]
setne %bl
// CHECK: setne 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x95,0x84,0xcb,0xef,0xbe,0xad,0xde]
setne 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setne 32493
// CHECK: encoding: [0x0f,0x95,0x05,0xed,0x7e,0x00,0x00]
setne 0x7eed
// CHECK: setne 3133065982
// CHECK: encoding: [0x0f,0x95,0x05,0xfe,0xca,0xbe,0xba]
setne 0xbabecafe
// CHECK: setne 305419896
// CHECK: encoding: [0x0f,0x95,0x05,0x78,0x56,0x34,0x12]
setne 0x12345678
// CHECK: setbe %bl
// CHECK: encoding: [0x0f,0x96,0xc3]
setbe %bl
// CHECK: setbe 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x96,0x84,0xcb,0xef,0xbe,0xad,0xde]
setbe 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setbe 32493
// CHECK: encoding: [0x0f,0x96,0x05,0xed,0x7e,0x00,0x00]
setbe 0x7eed
// CHECK: setbe 3133065982
// CHECK: encoding: [0x0f,0x96,0x05,0xfe,0xca,0xbe,0xba]
setbe 0xbabecafe
// CHECK: setbe 305419896
// CHECK: encoding: [0x0f,0x96,0x05,0x78,0x56,0x34,0x12]
setbe 0x12345678
// CHECK: seta %bl
// CHECK: encoding: [0x0f,0x97,0xc3]
seta %bl
// CHECK: seta 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x97,0x84,0xcb,0xef,0xbe,0xad,0xde]
seta 0xdeadbeef(%ebx,%ecx,8)
// CHECK: seta 32493
// CHECK: encoding: [0x0f,0x97,0x05,0xed,0x7e,0x00,0x00]
seta 0x7eed
// CHECK: seta 3133065982
// CHECK: encoding: [0x0f,0x97,0x05,0xfe,0xca,0xbe,0xba]
seta 0xbabecafe
// CHECK: seta 305419896
// CHECK: encoding: [0x0f,0x97,0x05,0x78,0x56,0x34,0x12]
seta 0x12345678
// CHECK: sets %bl
// CHECK: encoding: [0x0f,0x98,0xc3]
sets %bl
// CHECK: sets 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x98,0x84,0xcb,0xef,0xbe,0xad,0xde]
sets 0xdeadbeef(%ebx,%ecx,8)
// CHECK: sets 32493
// CHECK: encoding: [0x0f,0x98,0x05,0xed,0x7e,0x00,0x00]
sets 0x7eed
// CHECK: sets 3133065982
// CHECK: encoding: [0x0f,0x98,0x05,0xfe,0xca,0xbe,0xba]
sets 0xbabecafe
// CHECK: sets 305419896
// CHECK: encoding: [0x0f,0x98,0x05,0x78,0x56,0x34,0x12]
sets 0x12345678
// CHECK: setns %bl
// CHECK: encoding: [0x0f,0x99,0xc3]
setns %bl
// CHECK: setns 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x99,0x84,0xcb,0xef,0xbe,0xad,0xde]
setns 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setns 32493
// CHECK: encoding: [0x0f,0x99,0x05,0xed,0x7e,0x00,0x00]
setns 0x7eed
// CHECK: setns 3133065982
// CHECK: encoding: [0x0f,0x99,0x05,0xfe,0xca,0xbe,0xba]
setns 0xbabecafe
// CHECK: setns 305419896
// CHECK: encoding: [0x0f,0x99,0x05,0x78,0x56,0x34,0x12]
setns 0x12345678
// CHECK: setp %bl
// CHECK: encoding: [0x0f,0x9a,0xc3]
setp %bl
// CHECK: setp 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9a,0x84,0xcb,0xef,0xbe,0xad,0xde]
setp 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setp 32493
// CHECK: encoding: [0x0f,0x9a,0x05,0xed,0x7e,0x00,0x00]
setp 0x7eed
// CHECK: setp 3133065982
// CHECK: encoding: [0x0f,0x9a,0x05,0xfe,0xca,0xbe,0xba]
setp 0xbabecafe
// CHECK: setp 305419896
// CHECK: encoding: [0x0f,0x9a,0x05,0x78,0x56,0x34,0x12]
setp 0x12345678
// CHECK: setnp %bl
// CHECK: encoding: [0x0f,0x9b,0xc3]
setnp %bl
// CHECK: setnp 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9b,0x84,0xcb,0xef,0xbe,0xad,0xde]
setnp 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setnp 32493
// CHECK: encoding: [0x0f,0x9b,0x05,0xed,0x7e,0x00,0x00]
setnp 0x7eed
// CHECK: setnp 3133065982
// CHECK: encoding: [0x0f,0x9b,0x05,0xfe,0xca,0xbe,0xba]
setnp 0xbabecafe
// CHECK: setnp 305419896
// CHECK: encoding: [0x0f,0x9b,0x05,0x78,0x56,0x34,0x12]
setnp 0x12345678
// CHECK: setl %bl
// CHECK: encoding: [0x0f,0x9c,0xc3]
setl %bl
// CHECK: setl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9c,0x84,0xcb,0xef,0xbe,0xad,0xde]
setl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setl 32493
// CHECK: encoding: [0x0f,0x9c,0x05,0xed,0x7e,0x00,0x00]
setl 0x7eed
// CHECK: setl 3133065982
// CHECK: encoding: [0x0f,0x9c,0x05,0xfe,0xca,0xbe,0xba]
setl 0xbabecafe
// CHECK: setl 305419896
// CHECK: encoding: [0x0f,0x9c,0x05,0x78,0x56,0x34,0x12]
setl 0x12345678
// CHECK: setge %bl
// CHECK: encoding: [0x0f,0x9d,0xc3]
setge %bl
// CHECK: setge 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9d,0x84,0xcb,0xef,0xbe,0xad,0xde]
setge 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setge 32493
// CHECK: encoding: [0x0f,0x9d,0x05,0xed,0x7e,0x00,0x00]
setge 0x7eed
// CHECK: setge 3133065982
// CHECK: encoding: [0x0f,0x9d,0x05,0xfe,0xca,0xbe,0xba]
setge 0xbabecafe
// CHECK: setge 305419896
// CHECK: encoding: [0x0f,0x9d,0x05,0x78,0x56,0x34,0x12]
setge 0x12345678
// CHECK: setle %bl
// CHECK: encoding: [0x0f,0x9e,0xc3]
setle %bl
// CHECK: setle 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9e,0x84,0xcb,0xef,0xbe,0xad,0xde]
setle 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setle 32493
// CHECK: encoding: [0x0f,0x9e,0x05,0xed,0x7e,0x00,0x00]
setle 0x7eed
// CHECK: setle 3133065982
// CHECK: encoding: [0x0f,0x9e,0x05,0xfe,0xca,0xbe,0xba]
setle 0xbabecafe
// CHECK: setle 305419896
// CHECK: encoding: [0x0f,0x9e,0x05,0x78,0x56,0x34,0x12]
setle 0x12345678
// CHECK: setg %bl
// CHECK: encoding: [0x0f,0x9f,0xc3]
setg %bl
// CHECK: setg 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x9f,0x84,0xcb,0xef,0xbe,0xad,0xde]
setg 0xdeadbeef(%ebx,%ecx,8)
// CHECK: setg 32493
// CHECK: encoding: [0x0f,0x9f,0x05,0xed,0x7e,0x00,0x00]
setg 0x7eed
// CHECK: setg 3133065982
// CHECK: encoding: [0x0f,0x9f,0x05,0xfe,0xca,0xbe,0xba]
setg 0xbabecafe
// CHECK: setg 305419896
// CHECK: encoding: [0x0f,0x9f,0x05,0x78,0x56,0x34,0x12]
setg 0x12345678
// CHECK: rsm
// CHECK: encoding: [0x0f,0xaa]
rsm
// CHECK: hlt
// CHECK: encoding: [0xf4]
hlt
// CHECK: nopl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x1f,0x84,0xcb,0xef,0xbe,0xad,0xde]
nopl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: nopw 32493
// CHECK: encoding: [0x66,0x0f,0x1f,0x05,0xed,0x7e,0x00,0x00]
nopw 0x7eed
// CHECK: nopl 3133065982
// CHECK: encoding: [0x0f,0x1f,0x05,0xfe,0xca,0xbe,0xba]
nopl 0xbabecafe
// CHECK: nopl 305419896
// CHECK: encoding: [0x0f,0x1f,0x05,0x78,0x56,0x34,0x12]
nopl 0x12345678
// CHECK: nopw %ax
// CHECK: encoding: [0x66,0x0f,0x1f,0xc0]
nopw %ax
// CHECK: nopl %eax
// CHECK: encoding: [0x0f,0x1f,0xc0]
nopl %eax
// CHECK: nop
// CHECK: encoding: [0x90]
nop
// CHECK: lldtw 32493
// CHECK: encoding: [0x0f,0x00,0x15,0xed,0x7e,0x00,0x00]
lldtw 0x7eed
// CHECK: lmsww 32493
// CHECK: encoding: [0x0f,0x01,0x35,0xed,0x7e,0x00,0x00]
lmsww 0x7eed
// CHECK: ltrw 32493
// CHECK: encoding: [0x0f,0x00,0x1d,0xed,0x7e,0x00,0x00]
ltrw 0x7eed
// CHECK: sldtw 32493
// CHECK: encoding: [0x0f,0x00,0x05,0xed,0x7e,0x00,0x00]
sldtw 0x7eed
// CHECK: smsww 32493
// CHECK: encoding: [0x0f,0x01,0x25,0xed,0x7e,0x00,0x00]
smsww 0x7eed
// CHECK: strw 32493
// CHECK: encoding: [0x0f,0x00,0x0d,0xed,0x7e,0x00,0x00]
strw 0x7eed
// CHECK: verr %bx
// CHECK: encoding: [0x0f,0x00,0xe3]
verr %bx
// CHECK: verr 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x00,0xa4,0xcb,0xef,0xbe,0xad,0xde]
verr 0xdeadbeef(%ebx,%ecx,8)
// CHECK: verr 3133065982
// CHECK: encoding: [0x0f,0x00,0x25,0xfe,0xca,0xbe,0xba]
verr 0xbabecafe
// CHECK: verr 305419896
// CHECK: encoding: [0x0f,0x00,0x25,0x78,0x56,0x34,0x12]
verr 0x12345678
// CHECK: verw %bx
// CHECK: encoding: [0x0f,0x00,0xeb]
verw %bx
// CHECK: verw 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x00,0xac,0xcb,0xef,0xbe,0xad,0xde]
verw 0xdeadbeef(%ebx,%ecx,8)
// CHECK: verw 3133065982
// CHECK: encoding: [0x0f,0x00,0x2d,0xfe,0xca,0xbe,0xba]
verw 0xbabecafe
// CHECK: verw 305419896
// CHECK: encoding: [0x0f,0x00,0x2d,0x78,0x56,0x34,0x12]
verw 0x12345678
// CHECK: fld %st(2)
// CHECK: encoding: [0xd9,0xc2]
fld %st(2)
// CHECK: fldl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdd,0x84,0xcb,0xef,0xbe,0xad,0xde]
fldl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fldl 3133065982
// CHECK: encoding: [0xdd,0x05,0xfe,0xca,0xbe,0xba]
fldl 0xbabecafe
// CHECK: fldl 305419896
// CHECK: encoding: [0xdd,0x05,0x78,0x56,0x34,0x12]
fldl 0x12345678
// CHECK: fld %st(2)
// CHECK: encoding: [0xd9,0xc2]
fld %st(2)
// CHECK: fildl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0x84,0xcb,0xef,0xbe,0xad,0xde]
fildl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fildl 3133065982
// CHECK: encoding: [0xdb,0x05,0xfe,0xca,0xbe,0xba]
fildl 0xbabecafe
// CHECK: fildl 305419896
// CHECK: encoding: [0xdb,0x05,0x78,0x56,0x34,0x12]
fildl 0x12345678
// CHECK: fildll 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdf,0xac,0xcb,0xef,0xbe,0xad,0xde]
fildll 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fildll 32493
// CHECK: encoding: [0xdf,0x2d,0xed,0x7e,0x00,0x00]
fildll 0x7eed
// CHECK: fildll 3133065982
// CHECK: encoding: [0xdf,0x2d,0xfe,0xca,0xbe,0xba]
fildll 0xbabecafe
// CHECK: fildll 305419896
// CHECK: encoding: [0xdf,0x2d,0x78,0x56,0x34,0x12]
fildll 0x12345678
// CHECK: fldt 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0xac,0xcb,0xef,0xbe,0xad,0xde]
fldt 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fldt 32493
// CHECK: encoding: [0xdb,0x2d,0xed,0x7e,0x00,0x00]
fldt 0x7eed
// CHECK: fldt 3133065982
// CHECK: encoding: [0xdb,0x2d,0xfe,0xca,0xbe,0xba]
fldt 0xbabecafe
// CHECK: fldt 305419896
// CHECK: encoding: [0xdb,0x2d,0x78,0x56,0x34,0x12]
fldt 0x12345678
// CHECK: fbld 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdf,0xa4,0xcb,0xef,0xbe,0xad,0xde]
fbld 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fbld 32493
// CHECK: encoding: [0xdf,0x25,0xed,0x7e,0x00,0x00]
fbld 0x7eed
// CHECK: fbld 3133065982
// CHECK: encoding: [0xdf,0x25,0xfe,0xca,0xbe,0xba]
fbld 0xbabecafe
// CHECK: fbld 305419896
// CHECK: encoding: [0xdf,0x25,0x78,0x56,0x34,0x12]
fbld 0x12345678
// CHECK: fst %st(2)
// CHECK: encoding: [0xdd,0xd2]
fst %st(2)
// CHECK: fstl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdd,0x94,0xcb,0xef,0xbe,0xad,0xde]
fstl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fstl 3133065982
// CHECK: encoding: [0xdd,0x15,0xfe,0xca,0xbe,0xba]
fstl 0xbabecafe
// CHECK: fstl 305419896
// CHECK: encoding: [0xdd,0x15,0x78,0x56,0x34,0x12]
fstl 0x12345678
// CHECK: fst %st(2)
// CHECK: encoding: [0xdd,0xd2]
fst %st(2)
// CHECK: fistl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0x94,0xcb,0xef,0xbe,0xad,0xde]
fistl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fistl 3133065982
// CHECK: encoding: [0xdb,0x15,0xfe,0xca,0xbe,0xba]
fistl 0xbabecafe
// CHECK: fistl 305419896
// CHECK: encoding: [0xdb,0x15,0x78,0x56,0x34,0x12]
fistl 0x12345678
// CHECK: fstp %st(2)
// CHECK: encoding: [0xdd,0xda]
fstp %st(2)
// CHECK: fstpl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdd,0x9c,0xcb,0xef,0xbe,0xad,0xde]
fstpl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fstpl 3133065982
// CHECK: encoding: [0xdd,0x1d,0xfe,0xca,0xbe,0xba]
fstpl 0xbabecafe
// CHECK: fstpl 305419896
// CHECK: encoding: [0xdd,0x1d,0x78,0x56,0x34,0x12]
fstpl 0x12345678
// CHECK: fstp %st(2)
// CHECK: encoding: [0xdd,0xda]
fstp %st(2)
// CHECK: fistpl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0x9c,0xcb,0xef,0xbe,0xad,0xde]
fistpl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fistpl 3133065982
// CHECK: encoding: [0xdb,0x1d,0xfe,0xca,0xbe,0xba]
fistpl 0xbabecafe
// CHECK: fistpl 305419896
// CHECK: encoding: [0xdb,0x1d,0x78,0x56,0x34,0x12]
fistpl 0x12345678
// CHECK: fistpll 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdf,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fistpll 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fistpll 32493
// CHECK: encoding: [0xdf,0x3d,0xed,0x7e,0x00,0x00]
fistpll 0x7eed
// CHECK: fistpll 3133065982
// CHECK: encoding: [0xdf,0x3d,0xfe,0xca,0xbe,0xba]
fistpll 0xbabecafe
// CHECK: fistpll 305419896
// CHECK: encoding: [0xdf,0x3d,0x78,0x56,0x34,0x12]
fistpll 0x12345678
// CHECK: fstpt 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fstpt 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fstpt 32493
// CHECK: encoding: [0xdb,0x3d,0xed,0x7e,0x00,0x00]
fstpt 0x7eed
// CHECK: fstpt 3133065982
// CHECK: encoding: [0xdb,0x3d,0xfe,0xca,0xbe,0xba]
fstpt 0xbabecafe
// CHECK: fstpt 305419896
// CHECK: encoding: [0xdb,0x3d,0x78,0x56,0x34,0x12]
fstpt 0x12345678
// CHECK: fbstp 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdf,0xb4,0xcb,0xef,0xbe,0xad,0xde]
fbstp 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fbstp 32493
// CHECK: encoding: [0xdf,0x35,0xed,0x7e,0x00,0x00]
fbstp 0x7eed
// CHECK: fbstp 3133065982
// CHECK: encoding: [0xdf,0x35,0xfe,0xca,0xbe,0xba]
fbstp 0xbabecafe
// CHECK: fbstp 305419896
// CHECK: encoding: [0xdf,0x35,0x78,0x56,0x34,0x12]
fbstp 0x12345678
// CHECK: fxch %st(2)
// CHECK: encoding: [0xd9,0xca]
fxch %st(2)
// CHECK: fcom %st(1)
// CHECK: encoding: [0xd8,0xd1]
fcom
// CHECK: fcom %st(2)
// CHECK: encoding: [0xd8,0xd2]
fcom %st(2)
// CHECK: fcom %st(2)
// CHECK: encoding: [0xd8,0xd2]
fcom %st(2)
// CHECK: ficoml 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x94,0xcb,0xef,0xbe,0xad,0xde]
ficoml 0xdeadbeef(%ebx,%ecx,8)
// CHECK: ficoml 3133065982
// CHECK: encoding: [0xda,0x15,0xfe,0xca,0xbe,0xba]
ficoml 0xbabecafe
// CHECK: ficoml 305419896
// CHECK: encoding: [0xda,0x15,0x78,0x56,0x34,0x12]
ficoml 0x12345678
// CHECK: fcomp %st(1)
// CHECK: encoding: [0xd8,0xd9]
fcomp
// CHECK: fcomp %st(2)
// CHECK: encoding: [0xd8,0xda]
fcomp %st(2)
// CHECK: fcomp %st(2)
// CHECK: encoding: [0xd8,0xda]
fcomp %st(2)
// CHECK: ficompl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x9c,0xcb,0xef,0xbe,0xad,0xde]
ficompl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: ficompl 3133065982
// CHECK: encoding: [0xda,0x1d,0xfe,0xca,0xbe,0xba]
ficompl 0xbabecafe
// CHECK: ficompl 305419896
// CHECK: encoding: [0xda,0x1d,0x78,0x56,0x34,0x12]
ficompl 0x12345678
// CHECK: fcompp
// CHECK: encoding: [0xde,0xd9]
fcompp
// CHECK: fucom %st(2)
// CHECK: encoding: [0xdd,0xe2]
fucom %st(2)
// CHECK: fucomp %st(2)
// CHECK: encoding: [0xdd,0xea]
fucomp %st(2)
// CHECK: fucompp
// CHECK: encoding: [0xda,0xe9]
fucompp
// CHECK: ftst
// CHECK: encoding: [0xd9,0xe4]
ftst
// CHECK: fxam
// CHECK: encoding: [0xd9,0xe5]
fxam
// CHECK: fld1
// CHECK: encoding: [0xd9,0xe8]
fld1
// CHECK: fldl2t
// CHECK: encoding: [0xd9,0xe9]
fldl2t
// CHECK: fldl2e
// CHECK: encoding: [0xd9,0xea]
fldl2e
// CHECK: fldpi
// CHECK: encoding: [0xd9,0xeb]
fldpi
// CHECK: fldlg2
// CHECK: encoding: [0xd9,0xec]
fldlg2
// CHECK: fldln2
// CHECK: encoding: [0xd9,0xed]
fldln2
// CHECK: fldz
// CHECK: encoding: [0xd9,0xee]
fldz
// CHECK: fadd %st(2)
// CHECK: encoding: [0xd8,0xc2]
fadd %st(2)
// CHECK: faddl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0x84,0xcb,0xef,0xbe,0xad,0xde]
faddl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: faddl 3133065982
// CHECK: encoding: [0xdc,0x05,0xfe,0xca,0xbe,0xba]
faddl 0xbabecafe
// CHECK: faddl 305419896
// CHECK: encoding: [0xdc,0x05,0x78,0x56,0x34,0x12]
faddl 0x12345678
// CHECK: fiaddl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x84,0xcb,0xef,0xbe,0xad,0xde]
fiaddl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fiaddl 3133065982
// CHECK: encoding: [0xda,0x05,0xfe,0xca,0xbe,0xba]
fiaddl 0xbabecafe
// CHECK: fiaddl 305419896
// CHECK: encoding: [0xda,0x05,0x78,0x56,0x34,0x12]
fiaddl 0x12345678
-// CHECK: faddp %st(2)
+// CHECK: faddp %st, %st(2)
// CHECK: encoding: [0xde,0xc2]
faddp %st(2)
-// CHECK: fsub %st(2)
+// CHECK: fsub %st(2), %st
// CHECK: encoding: [0xd8,0xe2]
fsub %st(2)
// CHECK: fsubl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0xa4,0xcb,0xef,0xbe,0xad,0xde]
fsubl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fsubl 3133065982
// CHECK: encoding: [0xdc,0x25,0xfe,0xca,0xbe,0xba]
fsubl 0xbabecafe
// CHECK: fsubl 305419896
// CHECK: encoding: [0xdc,0x25,0x78,0x56,0x34,0x12]
fsubl 0x12345678
// CHECK: fisubl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0xa4,0xcb,0xef,0xbe,0xad,0xde]
fisubl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fisubl 3133065982
// CHECK: encoding: [0xda,0x25,0xfe,0xca,0xbe,0xba]
fisubl 0xbabecafe
// CHECK: fisubl 305419896
// CHECK: encoding: [0xda,0x25,0x78,0x56,0x34,0x12]
fisubl 0x12345678
-// CHECK: fsubp %st(2)
+// CHECK: fsubp %st, %st(2)
// CHECK: encoding: [0xde,0xe2]
fsubp %st(2)
-// CHECK: fsubr %st(2)
+// CHECK: fsubr %st(2), %st
// CHECK: encoding: [0xd8,0xea]
fsubr %st(2)
// CHECK: fsubrl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0xac,0xcb,0xef,0xbe,0xad,0xde]
fsubrl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fsubrl 3133065982
// CHECK: encoding: [0xdc,0x2d,0xfe,0xca,0xbe,0xba]
fsubrl 0xbabecafe
// CHECK: fsubrl 305419896
// CHECK: encoding: [0xdc,0x2d,0x78,0x56,0x34,0x12]
fsubrl 0x12345678
// CHECK: fisubrl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0xac,0xcb,0xef,0xbe,0xad,0xde]
fisubrl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fisubrl 3133065982
// CHECK: encoding: [0xda,0x2d,0xfe,0xca,0xbe,0xba]
fisubrl 0xbabecafe
// CHECK: fisubrl 305419896
// CHECK: encoding: [0xda,0x2d,0x78,0x56,0x34,0x12]
fisubrl 0x12345678
-// CHECK: fsubrp %st(2)
+// CHECK: fsubrp %st, %st(2)
// CHECK: encoding: [0xde,0xea]
fsubrp %st(2)
-// CHECK: fmul %st(2)
+// CHECK: fmul %st(2), %st
// CHECK: encoding: [0xd8,0xca]
fmul %st(2)
// CHECK: fmull 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0x8c,0xcb,0xef,0xbe,0xad,0xde]
fmull 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fmull 3133065982
// CHECK: encoding: [0xdc,0x0d,0xfe,0xca,0xbe,0xba]
fmull 0xbabecafe
// CHECK: fmull 305419896
// CHECK: encoding: [0xdc,0x0d,0x78,0x56,0x34,0x12]
fmull 0x12345678
// CHECK: fimull 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x8c,0xcb,0xef,0xbe,0xad,0xde]
fimull 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fimull 3133065982
// CHECK: encoding: [0xda,0x0d,0xfe,0xca,0xbe,0xba]
fimull 0xbabecafe
// CHECK: fimull 305419896
// CHECK: encoding: [0xda,0x0d,0x78,0x56,0x34,0x12]
fimull 0x12345678
-// CHECK: fmulp %st(2)
+// CHECK: fmulp %st, %st(2)
// CHECK: encoding: [0xde,0xca]
fmulp %st(2)
-// CHECK: fdiv %st(2)
+// CHECK: fdiv %st(2), %st
// CHECK: encoding: [0xd8,0xf2]
fdiv %st(2)
// CHECK: fdivl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0xb4,0xcb,0xef,0xbe,0xad,0xde]
fdivl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fdivl 3133065982
// CHECK: encoding: [0xdc,0x35,0xfe,0xca,0xbe,0xba]
fdivl 0xbabecafe
// CHECK: fdivl 305419896
// CHECK: encoding: [0xdc,0x35,0x78,0x56,0x34,0x12]
fdivl 0x12345678
// CHECK: fidivl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0xb4,0xcb,0xef,0xbe,0xad,0xde]
fidivl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fidivl 3133065982
// CHECK: encoding: [0xda,0x35,0xfe,0xca,0xbe,0xba]
fidivl 0xbabecafe
// CHECK: fidivl 305419896
// CHECK: encoding: [0xda,0x35,0x78,0x56,0x34,0x12]
fidivl 0x12345678
-// CHECK: fdivp %st(2)
+// CHECK: fdivp %st, %st(2)
// CHECK: encoding: [0xde,0xf2]
fdivp %st(2)
-// CHECK: fdivr %st(2)
+// CHECK: fdivr %st(2), %st
// CHECK: encoding: [0xd8,0xfa]
fdivr %st(2)
// CHECK: fdivrl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fdivrl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fdivrl 3133065982
// CHECK: encoding: [0xdc,0x3d,0xfe,0xca,0xbe,0xba]
fdivrl 0xbabecafe
// CHECK: fdivrl 305419896
// CHECK: encoding: [0xdc,0x3d,0x78,0x56,0x34,0x12]
fdivrl 0x12345678
// CHECK: fidivrl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fidivrl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fidivrl 3133065982
// CHECK: encoding: [0xda,0x3d,0xfe,0xca,0xbe,0xba]
fidivrl 0xbabecafe
// CHECK: fidivrl 305419896
// CHECK: encoding: [0xda,0x3d,0x78,0x56,0x34,0x12]
fidivrl 0x12345678
-// CHECK: fdivrp %st(2)
+// CHECK: fdivrp %st, %st(2)
// CHECK: encoding: [0xde,0xfa]
fdivrp %st(2)
// CHECK: f2xm1
// CHECK: encoding: [0xd9,0xf0]
f2xm1
// CHECK: fyl2x
// CHECK: encoding: [0xd9,0xf1]
fyl2x
// CHECK: fptan
// CHECK: encoding: [0xd9,0xf2]
fptan
// CHECK: fpatan
// CHECK: encoding: [0xd9,0xf3]
fpatan
// CHECK: fxtract
// CHECK: encoding: [0xd9,0xf4]
fxtract
// CHECK: fprem1
// CHECK: encoding: [0xd9,0xf5]
fprem1
// CHECK: fdecstp
// CHECK: encoding: [0xd9,0xf6]
fdecstp
// CHECK: fincstp
// CHECK: encoding: [0xd9,0xf7]
fincstp
// CHECK: fprem
// CHECK: encoding: [0xd9,0xf8]
fprem
// CHECK: fyl2xp1
// CHECK: encoding: [0xd9,0xf9]
fyl2xp1
// CHECK: fsqrt
// CHECK: encoding: [0xd9,0xfa]
fsqrt
// CHECK: fsincos
// CHECK: encoding: [0xd9,0xfb]
fsincos
// CHECK: frndint
// CHECK: encoding: [0xd9,0xfc]
frndint
// CHECK: fscale
// CHECK: encoding: [0xd9,0xfd]
fscale
// CHECK: fsin
// CHECK: encoding: [0xd9,0xfe]
fsin
// CHECK: fcos
// CHECK: encoding: [0xd9,0xff]
fcos
// CHECK: fchs
// CHECK: encoding: [0xd9,0xe0]
fchs
// CHECK: fabs
// CHECK: encoding: [0xd9,0xe1]
fabs
// CHECK: fninit
// CHECK: encoding: [0xdb,0xe3]
fninit
// CHECK: fldcw 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd9,0xac,0xcb,0xef,0xbe,0xad,0xde]
fldcw 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fldcw 3133065982
// CHECK: encoding: [0xd9,0x2d,0xfe,0xca,0xbe,0xba]
fldcw 0xbabecafe
// CHECK: fldcw 305419896
// CHECK: encoding: [0xd9,0x2d,0x78,0x56,0x34,0x12]
fldcw 0x12345678
// CHECK: fnstcw 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xd9,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fnstcw 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fnstcw 3133065982
// CHECK: encoding: [0xd9,0x3d,0xfe,0xca,0xbe,0xba]
fnstcw 0xbabecafe
// CHECK: fnstcw 305419896
// CHECK: encoding: [0xd9,0x3d,0x78,0x56,0x34,0x12]
fnstcw 0x12345678
// CHECK: fnstsw 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdd,0xbc,0xcb,0xef,0xbe,0xad,0xde]
fnstsw 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fnstsw 3133065982
// CHECK: encoding: [0xdd,0x3d,0xfe,0xca,0xbe,0xba]
fnstsw 0xbabecafe
// CHECK: fnstsw 305419896
// CHECK: encoding: [0xdd,0x3d,0x78,0x56,0x34,0x12]
fnstsw 0x12345678
// CHECK: fnclex
// CHECK: encoding: [0xdb,0xe2]
fnclex
// CHECK: fnstenv 32493
// CHECK: encoding: [0xd9,0x35,0xed,0x7e,0x00,0x00]
fnstenv 0x7eed
// CHECK: fldenv 32493
// CHECK: encoding: [0xd9,0x25,0xed,0x7e,0x00,0x00]
fldenv 0x7eed
// CHECK: fnsave 32493
// CHECK: encoding: [0xdd,0x35,0xed,0x7e,0x00,0x00]
fnsave 0x7eed
// CHECK: frstor 32493
// CHECK: encoding: [0xdd,0x25,0xed,0x7e,0x00,0x00]
frstor 0x7eed
// CHECK: ffree %st(2)
// CHECK: encoding: [0xdd,0xc2]
ffree %st(2)
// CHECK: ffreep %st(2)
// CHECK: encoding: [0xdf,0xc2]
ffreep %st(2)
// CHECK: fnop
// CHECK: encoding: [0xd9,0xd0]
fnop
// CHECK: invd
// CHECK: encoding: [0x0f,0x08]
invd
// CHECK: wbinvd
// CHECK: encoding: [0x0f,0x09]
wbinvd
// CHECK: wbnoinvd
// CHECK: encoding: [0xf3,0x0f,0x09]
wbnoinvd
// CHECK: cpuid
// CHECK: encoding: [0x0f,0xa2]
cpuid
// CHECK: wrmsr
// CHECK: encoding: [0x0f,0x30]
wrmsr
// CHECK: rdtsc
// CHECK: encoding: [0x0f,0x31]
rdtsc
// CHECK: rdmsr
// CHECK: encoding: [0x0f,0x32]
rdmsr
// CHECK: cmpxchg8b 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0x8c,0xcb,0xef,0xbe,0xad,0xde]
cmpxchg8b 0xdeadbeef(%ebx,%ecx,8)
// CHECK: cmpxchg8b 32493
// CHECK: encoding: [0x0f,0xc7,0x0d,0xed,0x7e,0x00,0x00]
cmpxchg8b 0x7eed
// CHECK: cmpxchg8b 3133065982
// CHECK: encoding: [0x0f,0xc7,0x0d,0xfe,0xca,0xbe,0xba]
cmpxchg8b 0xbabecafe
// CHECK: cmpxchg8b 305419896
// CHECK: encoding: [0x0f,0xc7,0x0d,0x78,0x56,0x34,0x12]
cmpxchg8b 0x12345678
// CHECK: sysenter
// CHECK: encoding: [0x0f,0x34]
sysenter
// CHECK: sysexit
// CHECK: encoding: [0x0f,0x35]
sysexit
// CHECK: sysexitl
// CHECK: encoding: [0x0f,0x35]
sysexitl
// CHECK: fxsave 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0x84,0xcb,0xef,0xbe,0xad,0xde]
fxsave 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fxsave 32493
// CHECK: encoding: [0x0f,0xae,0x05,0xed,0x7e,0x00,0x00]
fxsave 0x7eed
// CHECK: fxsave 3133065982
// CHECK: encoding: [0x0f,0xae,0x05,0xfe,0xca,0xbe,0xba]
fxsave 0xbabecafe
// CHECK: fxsave 305419896
// CHECK: encoding: [0x0f,0xae,0x05,0x78,0x56,0x34,0x12]
fxsave 0x12345678
// CHECK: fxrstor 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0x8c,0xcb,0xef,0xbe,0xad,0xde]
fxrstor 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fxrstor 32493
// CHECK: encoding: [0x0f,0xae,0x0d,0xed,0x7e,0x00,0x00]
fxrstor 0x7eed
// CHECK: fxrstor 3133065982
// CHECK: encoding: [0x0f,0xae,0x0d,0xfe,0xca,0xbe,0xba]
fxrstor 0xbabecafe
// CHECK: fxrstor 305419896
// CHECK: encoding: [0x0f,0xae,0x0d,0x78,0x56,0x34,0x12]
fxrstor 0x12345678
// CHECK: rdpmc
// CHECK: encoding: [0x0f,0x33]
rdpmc
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2
-// CHECK: fcmovb %st(2), %st(0)
+// CHECK: fcmovb %st(2), %st
// CHECK: encoding: [0xda,0xc2]
fcmovb %st(2),%st
-// CHECK: fcmove %st(2), %st(0)
+// CHECK: fcmove %st(2), %st
// CHECK: encoding: [0xda,0xca]
fcmove %st(2),%st
-// CHECK: fcmovbe %st(2), %st(0)
+// CHECK: fcmovbe %st(2), %st
// CHECK: encoding: [0xda,0xd2]
fcmovbe %st(2),%st
-// CHECK: fcmovu %st(2), %st(0)
+// CHECK: fcmovu %st(2), %st
// CHECK: encoding: [0xda,0xda]
fcmovu %st(2),%st
-// CHECK: fcmovnb %st(2), %st(0)
+// CHECK: fcmovnb %st(2), %st
// CHECK: encoding: [0xdb,0xc2]
fcmovnb %st(2),%st
-// CHECK: fcmovne %st(2), %st(0)
+// CHECK: fcmovne %st(2), %st
// CHECK: encoding: [0xdb,0xca]
fcmovne %st(2),%st
-// CHECK: fcmovnbe %st(2), %st(0)
+// CHECK: fcmovnbe %st(2), %st
// CHECK: encoding: [0xdb,0xd2]
fcmovnbe %st(2),%st
-// CHECK: fcmovnu %st(2), %st(0)
+// CHECK: fcmovnu %st(2), %st
// CHECK: encoding: [0xdb,0xda]
fcmovnu %st(2),%st
// CHECK: fcomi %st(2)
// CHECK: encoding: [0xdb,0xf2]
fcomi %st(2),%st
// CHECK: fucomi %st(2)
// CHECK: encoding: [0xdb,0xea]
fucomi %st(2),%st
// CHECK: fcompi %st(2)
// CHECK: encoding: [0xdf,0xf2]
fcomip %st(2),%st
// CHECK: fucompi %st(2)
// CHECK: encoding: [0xdf,0xea]
fucomip %st(2),%st
// CHECK: movntil %ecx, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc3,0x8c,0xcb,0xef,0xbe,0xad,0xde]
movnti %ecx,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movntil %ecx, 69
// CHECK: encoding: [0x0f,0xc3,0x0d,0x45,0x00,0x00,0x00]
movnti %ecx,0x45
// CHECK: movntil %ecx, 32493
// CHECK: encoding: [0x0f,0xc3,0x0d,0xed,0x7e,0x00,0x00]
movnti %ecx,0x7eed
// CHECK: movntil %ecx, 3133065982
// CHECK: encoding: [0x0f,0xc3,0x0d,0xfe,0xca,0xbe,0xba]
movnti %ecx,0xbabecafe
// CHECK: movntil %ecx, 305419896
// CHECK: encoding: [0x0f,0xc3,0x0d,0x78,0x56,0x34,0x12]
movnti %ecx,0x12345678
// CHECK: clflush 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0xbc,0xcb,0xef,0xbe,0xad,0xde]
clflush 0xdeadbeef(%ebx,%ecx,8)
// CHECK: clflush 32493
// CHECK: encoding: [0x0f,0xae,0x3d,0xed,0x7e,0x00,0x00]
clflush 0x7eed
// CHECK: clflush 3133065982
// CHECK: encoding: [0x0f,0xae,0x3d,0xfe,0xca,0xbe,0xba]
clflush 0xbabecafe
// CHECK: clflush 305419896
// CHECK: encoding: [0x0f,0xae,0x3d,0x78,0x56,0x34,0x12]
clflush 0x12345678
// CHECK: emms
// CHECK: encoding: [0x0f,0x77]
emms
// CHECK: movd %ecx, %mm3
// CHECK: encoding: [0x0f,0x6e,0xd9]
movd %ecx,%mm3
// CHECK: movd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x6e,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: movd 69, %mm3
// CHECK: encoding: [0x0f,0x6e,0x1d,0x45,0x00,0x00,0x00]
movd 0x45,%mm3
// CHECK: movd 32493, %mm3
// CHECK: encoding: [0x0f,0x6e,0x1d,0xed,0x7e,0x00,0x00]
movd 0x7eed,%mm3
// CHECK: movd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x6e,0x1d,0xfe,0xca,0xbe,0xba]
movd 0xbabecafe,%mm3
// CHECK: movd 305419896, %mm3
// CHECK: encoding: [0x0f,0x6e,0x1d,0x78,0x56,0x34,0x12]
movd 0x12345678,%mm3
// CHECK: movd %mm3, %ecx
// CHECK: encoding: [0x0f,0x7e,0xd9]
movd %mm3,%ecx
// CHECK: movd %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x7e,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movd %mm3,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movd %mm3, 69
// CHECK: encoding: [0x0f,0x7e,0x1d,0x45,0x00,0x00,0x00]
movd %mm3,0x45
// CHECK: movd %mm3, 32493
// CHECK: encoding: [0x0f,0x7e,0x1d,0xed,0x7e,0x00,0x00]
movd %mm3,0x7eed
// CHECK: movd %mm3, 3133065982
// CHECK: encoding: [0x0f,0x7e,0x1d,0xfe,0xca,0xbe,0xba]
movd %mm3,0xbabecafe
// CHECK: movd %mm3, 305419896
// CHECK: encoding: [0x0f,0x7e,0x1d,0x78,0x56,0x34,0x12]
movd %mm3,0x12345678
// CHECK: movd %ecx, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0xe9]
movd %ecx,%xmm5
// CHECK: movd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0xac,0xcb,0xef,0xbe,0xad,0xde]
movd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0x2d,0x45,0x00,0x00,0x00]
movd 0x45,%xmm5
// CHECK: movd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0x2d,0xed,0x7e,0x00,0x00]
movd 0x7eed,%xmm5
// CHECK: movd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0x2d,0xfe,0xca,0xbe,0xba]
movd 0xbabecafe,%xmm5
// CHECK: movd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0x2d,0x78,0x56,0x34,0x12]
movd 0x12345678,%xmm5
// CHECK: movd %xmm5, %ecx
// CHECK: encoding: [0x66,0x0f,0x7e,0xe9]
movd %xmm5,%ecx
// CHECK: movd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x7e,0xac,0xcb,0xef,0xbe,0xad,0xde]
movd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x7e,0x2d,0x45,0x00,0x00,0x00]
movd %xmm5,0x45
// CHECK: movd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x7e,0x2d,0xed,0x7e,0x00,0x00]
movd %xmm5,0x7eed
// CHECK: movd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x7e,0x2d,0xfe,0xca,0xbe,0xba]
movd %xmm5,0xbabecafe
// CHECK: movd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x7e,0x2d,0x78,0x56,0x34,0x12]
movd %xmm5,0x12345678
// CHECK: movq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x6f,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: movq 69, %mm3
// CHECK: encoding: [0x0f,0x6f,0x1d,0x45,0x00,0x00,0x00]
movq 0x45,%mm3
// CHECK: movq 32493, %mm3
// CHECK: encoding: [0x0f,0x6f,0x1d,0xed,0x7e,0x00,0x00]
movq 0x7eed,%mm3
// CHECK: movq 3133065982, %mm3
// CHECK: encoding: [0x0f,0x6f,0x1d,0xfe,0xca,0xbe,0xba]
movq 0xbabecafe,%mm3
// CHECK: movq 305419896, %mm3
// CHECK: encoding: [0x0f,0x6f,0x1d,0x78,0x56,0x34,0x12]
movq 0x12345678,%mm3
// CHECK: movq %mm3, %mm3
// CHECK: encoding: [0x0f,0x6f,0xdb]
movq %mm3,%mm3
// CHECK: movq %mm3, %mm3
// CHECK: encoding: [0x0f,0x6f,0xdb]
movq %mm3,%mm3
// CHECK: movq %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x7e,0xed]
movq %xmm5,%xmm5
// CHECK: movq %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0xd6,0xac,0xcb,0xef,0xbe,0xad,0xde]
movq %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movq %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0xd6,0x2d,0x45,0x00,0x00,0x00]
movq %xmm5,0x45
// CHECK: movq %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0xd6,0x2d,0xed,0x7e,0x00,0x00]
movq %xmm5,0x7eed
// CHECK: movq %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0xd6,0x2d,0xfe,0xca,0xbe,0xba]
movq %xmm5,0xbabecafe
// CHECK: movq %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0xd6,0x2d,0x78,0x56,0x34,0x12]
movq %xmm5,0x12345678
// CHECK: packssdw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x6b,0x9c,0xcb,0xef,0xbe,0xad,0xde]
packssdw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: packssdw 69, %mm3
// CHECK: encoding: [0x0f,0x6b,0x1d,0x45,0x00,0x00,0x00]
packssdw 0x45,%mm3
// CHECK: packssdw 32493, %mm3
// CHECK: encoding: [0x0f,0x6b,0x1d,0xed,0x7e,0x00,0x00]
packssdw 0x7eed,%mm3
// CHECK: packssdw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x6b,0x1d,0xfe,0xca,0xbe,0xba]
packssdw 0xbabecafe,%mm3
// CHECK: packssdw 305419896, %mm3
// CHECK: encoding: [0x0f,0x6b,0x1d,0x78,0x56,0x34,0x12]
packssdw 0x12345678,%mm3
// CHECK: packssdw %mm3, %mm3
// CHECK: encoding: [0x0f,0x6b,0xdb]
packssdw %mm3,%mm3
// CHECK: packssdw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0xac,0xcb,0xef,0xbe,0xad,0xde]
packssdw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: packssdw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0x2d,0x45,0x00,0x00,0x00]
packssdw 0x45,%xmm5
// CHECK: packssdw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0x2d,0xed,0x7e,0x00,0x00]
packssdw 0x7eed,%xmm5
// CHECK: packssdw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0x2d,0xfe,0xca,0xbe,0xba]
packssdw 0xbabecafe,%xmm5
// CHECK: packssdw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0x2d,0x78,0x56,0x34,0x12]
packssdw 0x12345678,%xmm5
// CHECK: packssdw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6b,0xed]
packssdw %xmm5,%xmm5
// CHECK: packsswb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x63,0x9c,0xcb,0xef,0xbe,0xad,0xde]
packsswb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: packsswb 69, %mm3
// CHECK: encoding: [0x0f,0x63,0x1d,0x45,0x00,0x00,0x00]
packsswb 0x45,%mm3
// CHECK: packsswb 32493, %mm3
// CHECK: encoding: [0x0f,0x63,0x1d,0xed,0x7e,0x00,0x00]
packsswb 0x7eed,%mm3
// CHECK: packsswb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x63,0x1d,0xfe,0xca,0xbe,0xba]
packsswb 0xbabecafe,%mm3
// CHECK: packsswb 305419896, %mm3
// CHECK: encoding: [0x0f,0x63,0x1d,0x78,0x56,0x34,0x12]
packsswb 0x12345678,%mm3
// CHECK: packsswb %mm3, %mm3
// CHECK: encoding: [0x0f,0x63,0xdb]
packsswb %mm3,%mm3
// CHECK: packsswb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0xac,0xcb,0xef,0xbe,0xad,0xde]
packsswb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: packsswb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0x2d,0x45,0x00,0x00,0x00]
packsswb 0x45,%xmm5
// CHECK: packsswb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0x2d,0xed,0x7e,0x00,0x00]
packsswb 0x7eed,%xmm5
// CHECK: packsswb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0x2d,0xfe,0xca,0xbe,0xba]
packsswb 0xbabecafe,%xmm5
// CHECK: packsswb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0x2d,0x78,0x56,0x34,0x12]
packsswb 0x12345678,%xmm5
// CHECK: packsswb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x63,0xed]
packsswb %xmm5,%xmm5
// CHECK: packuswb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x67,0x9c,0xcb,0xef,0xbe,0xad,0xde]
packuswb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: packuswb 69, %mm3
// CHECK: encoding: [0x0f,0x67,0x1d,0x45,0x00,0x00,0x00]
packuswb 0x45,%mm3
// CHECK: packuswb 32493, %mm3
// CHECK: encoding: [0x0f,0x67,0x1d,0xed,0x7e,0x00,0x00]
packuswb 0x7eed,%mm3
// CHECK: packuswb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x67,0x1d,0xfe,0xca,0xbe,0xba]
packuswb 0xbabecafe,%mm3
// CHECK: packuswb 305419896, %mm3
// CHECK: encoding: [0x0f,0x67,0x1d,0x78,0x56,0x34,0x12]
packuswb 0x12345678,%mm3
// CHECK: packuswb %mm3, %mm3
// CHECK: encoding: [0x0f,0x67,0xdb]
packuswb %mm3,%mm3
// CHECK: packuswb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0xac,0xcb,0xef,0xbe,0xad,0xde]
packuswb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: packuswb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0x2d,0x45,0x00,0x00,0x00]
packuswb 0x45,%xmm5
// CHECK: packuswb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0x2d,0xed,0x7e,0x00,0x00]
packuswb 0x7eed,%xmm5
// CHECK: packuswb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0x2d,0xfe,0xca,0xbe,0xba]
packuswb 0xbabecafe,%xmm5
// CHECK: packuswb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0x2d,0x78,0x56,0x34,0x12]
packuswb 0x12345678,%xmm5
// CHECK: packuswb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x67,0xed]
packuswb %xmm5,%xmm5
// CHECK: paddb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xfc,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddb 69, %mm3
// CHECK: encoding: [0x0f,0xfc,0x1d,0x45,0x00,0x00,0x00]
paddb 0x45,%mm3
// CHECK: paddb 32493, %mm3
// CHECK: encoding: [0x0f,0xfc,0x1d,0xed,0x7e,0x00,0x00]
paddb 0x7eed,%mm3
// CHECK: paddb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xfc,0x1d,0xfe,0xca,0xbe,0xba]
paddb 0xbabecafe,%mm3
// CHECK: paddb 305419896, %mm3
// CHECK: encoding: [0x0f,0xfc,0x1d,0x78,0x56,0x34,0x12]
paddb 0x12345678,%mm3
// CHECK: paddb %mm3, %mm3
// CHECK: encoding: [0x0f,0xfc,0xdb]
paddb %mm3,%mm3
// CHECK: paddb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0x2d,0x45,0x00,0x00,0x00]
paddb 0x45,%xmm5
// CHECK: paddb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0x2d,0xed,0x7e,0x00,0x00]
paddb 0x7eed,%xmm5
// CHECK: paddb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0x2d,0xfe,0xca,0xbe,0xba]
paddb 0xbabecafe,%xmm5
// CHECK: paddb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0x2d,0x78,0x56,0x34,0x12]
paddb 0x12345678,%xmm5
// CHECK: paddb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfc,0xed]
paddb %xmm5,%xmm5
// CHECK: paddw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xfd,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddw 69, %mm3
// CHECK: encoding: [0x0f,0xfd,0x1d,0x45,0x00,0x00,0x00]
paddw 0x45,%mm3
// CHECK: paddw 32493, %mm3
// CHECK: encoding: [0x0f,0xfd,0x1d,0xed,0x7e,0x00,0x00]
paddw 0x7eed,%mm3
// CHECK: paddw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xfd,0x1d,0xfe,0xca,0xbe,0xba]
paddw 0xbabecafe,%mm3
// CHECK: paddw 305419896, %mm3
// CHECK: encoding: [0x0f,0xfd,0x1d,0x78,0x56,0x34,0x12]
paddw 0x12345678,%mm3
// CHECK: paddw %mm3, %mm3
// CHECK: encoding: [0x0f,0xfd,0xdb]
paddw %mm3,%mm3
// CHECK: paddw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0x2d,0x45,0x00,0x00,0x00]
paddw 0x45,%xmm5
// CHECK: paddw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0x2d,0xed,0x7e,0x00,0x00]
paddw 0x7eed,%xmm5
// CHECK: paddw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0x2d,0xfe,0xca,0xbe,0xba]
paddw 0xbabecafe,%xmm5
// CHECK: paddw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0x2d,0x78,0x56,0x34,0x12]
paddw 0x12345678,%xmm5
// CHECK: paddw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfd,0xed]
paddw %xmm5,%xmm5
// CHECK: paddd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xfe,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddd 69, %mm3
// CHECK: encoding: [0x0f,0xfe,0x1d,0x45,0x00,0x00,0x00]
paddd 0x45,%mm3
// CHECK: paddd 32493, %mm3
// CHECK: encoding: [0x0f,0xfe,0x1d,0xed,0x7e,0x00,0x00]
paddd 0x7eed,%mm3
// CHECK: paddd 3133065982, %mm3
// CHECK: encoding: [0x0f,0xfe,0x1d,0xfe,0xca,0xbe,0xba]
paddd 0xbabecafe,%mm3
// CHECK: paddd 305419896, %mm3
// CHECK: encoding: [0x0f,0xfe,0x1d,0x78,0x56,0x34,0x12]
paddd 0x12345678,%mm3
// CHECK: paddd %mm3, %mm3
// CHECK: encoding: [0x0f,0xfe,0xdb]
paddd %mm3,%mm3
// CHECK: paddd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0x2d,0x45,0x00,0x00,0x00]
paddd 0x45,%xmm5
// CHECK: paddd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0x2d,0xed,0x7e,0x00,0x00]
paddd 0x7eed,%xmm5
// CHECK: paddd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0x2d,0xfe,0xca,0xbe,0xba]
paddd 0xbabecafe,%xmm5
// CHECK: paddd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0x2d,0x78,0x56,0x34,0x12]
paddd 0x12345678,%xmm5
// CHECK: paddd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfe,0xed]
paddd %xmm5,%xmm5
// CHECK: paddq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd4,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddq 69, %mm3
// CHECK: encoding: [0x0f,0xd4,0x1d,0x45,0x00,0x00,0x00]
paddq 0x45,%mm3
// CHECK: paddq 32493, %mm3
// CHECK: encoding: [0x0f,0xd4,0x1d,0xed,0x7e,0x00,0x00]
paddq 0x7eed,%mm3
// CHECK: paddq 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd4,0x1d,0xfe,0xca,0xbe,0xba]
paddq 0xbabecafe,%mm3
// CHECK: paddq 305419896, %mm3
// CHECK: encoding: [0x0f,0xd4,0x1d,0x78,0x56,0x34,0x12]
paddq 0x12345678,%mm3
// CHECK: paddq %mm3, %mm3
// CHECK: encoding: [0x0f,0xd4,0xdb]
paddq %mm3,%mm3
// CHECK: paddq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0x2d,0x45,0x00,0x00,0x00]
paddq 0x45,%xmm5
// CHECK: paddq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0x2d,0xed,0x7e,0x00,0x00]
paddq 0x7eed,%xmm5
// CHECK: paddq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0x2d,0xfe,0xca,0xbe,0xba]
paddq 0xbabecafe,%xmm5
// CHECK: paddq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0x2d,0x78,0x56,0x34,0x12]
paddq 0x12345678,%xmm5
// CHECK: paddq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd4,0xed]
paddq %xmm5,%xmm5
// CHECK: paddsb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xec,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddsb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddsb 69, %mm3
// CHECK: encoding: [0x0f,0xec,0x1d,0x45,0x00,0x00,0x00]
paddsb 0x45,%mm3
// CHECK: paddsb 32493, %mm3
// CHECK: encoding: [0x0f,0xec,0x1d,0xed,0x7e,0x00,0x00]
paddsb 0x7eed,%mm3
// CHECK: paddsb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xec,0x1d,0xfe,0xca,0xbe,0xba]
paddsb 0xbabecafe,%mm3
// CHECK: paddsb 305419896, %mm3
// CHECK: encoding: [0x0f,0xec,0x1d,0x78,0x56,0x34,0x12]
paddsb 0x12345678,%mm3
// CHECK: paddsb %mm3, %mm3
// CHECK: encoding: [0x0f,0xec,0xdb]
paddsb %mm3,%mm3
// CHECK: paddsb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddsb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddsb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0x2d,0x45,0x00,0x00,0x00]
paddsb 0x45,%xmm5
// CHECK: paddsb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0x2d,0xed,0x7e,0x00,0x00]
paddsb 0x7eed,%xmm5
// CHECK: paddsb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0x2d,0xfe,0xca,0xbe,0xba]
paddsb 0xbabecafe,%xmm5
// CHECK: paddsb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0x2d,0x78,0x56,0x34,0x12]
paddsb 0x12345678,%xmm5
// CHECK: paddsb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xec,0xed]
paddsb %xmm5,%xmm5
// CHECK: paddsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xed,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddsw 69, %mm3
// CHECK: encoding: [0x0f,0xed,0x1d,0x45,0x00,0x00,0x00]
paddsw 0x45,%mm3
// CHECK: paddsw 32493, %mm3
// CHECK: encoding: [0x0f,0xed,0x1d,0xed,0x7e,0x00,0x00]
paddsw 0x7eed,%mm3
// CHECK: paddsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xed,0x1d,0xfe,0xca,0xbe,0xba]
paddsw 0xbabecafe,%mm3
// CHECK: paddsw 305419896, %mm3
// CHECK: encoding: [0x0f,0xed,0x1d,0x78,0x56,0x34,0x12]
paddsw 0x12345678,%mm3
// CHECK: paddsw %mm3, %mm3
// CHECK: encoding: [0x0f,0xed,0xdb]
paddsw %mm3,%mm3
// CHECK: paddsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0x2d,0x45,0x00,0x00,0x00]
paddsw 0x45,%xmm5
// CHECK: paddsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0x2d,0xed,0x7e,0x00,0x00]
paddsw 0x7eed,%xmm5
// CHECK: paddsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0x2d,0xfe,0xca,0xbe,0xba]
paddsw 0xbabecafe,%xmm5
// CHECK: paddsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0x2d,0x78,0x56,0x34,0x12]
paddsw 0x12345678,%xmm5
// CHECK: paddsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xed,0xed]
paddsw %xmm5,%xmm5
// CHECK: paddusb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xdc,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddusb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddusb 69, %mm3
// CHECK: encoding: [0x0f,0xdc,0x1d,0x45,0x00,0x00,0x00]
paddusb 0x45,%mm3
// CHECK: paddusb 32493, %mm3
// CHECK: encoding: [0x0f,0xdc,0x1d,0xed,0x7e,0x00,0x00]
paddusb 0x7eed,%mm3
// CHECK: paddusb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xdc,0x1d,0xfe,0xca,0xbe,0xba]
paddusb 0xbabecafe,%mm3
// CHECK: paddusb 305419896, %mm3
// CHECK: encoding: [0x0f,0xdc,0x1d,0x78,0x56,0x34,0x12]
paddusb 0x12345678,%mm3
// CHECK: paddusb %mm3, %mm3
// CHECK: encoding: [0x0f,0xdc,0xdb]
paddusb %mm3,%mm3
// CHECK: paddusb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddusb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddusb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0x2d,0x45,0x00,0x00,0x00]
paddusb 0x45,%xmm5
// CHECK: paddusb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0x2d,0xed,0x7e,0x00,0x00]
paddusb 0x7eed,%xmm5
// CHECK: paddusb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0x2d,0xfe,0xca,0xbe,0xba]
paddusb 0xbabecafe,%xmm5
// CHECK: paddusb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0x2d,0x78,0x56,0x34,0x12]
paddusb 0x12345678,%xmm5
// CHECK: paddusb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdc,0xed]
paddusb %xmm5,%xmm5
// CHECK: paddusw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xdd,0x9c,0xcb,0xef,0xbe,0xad,0xde]
paddusw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: paddusw 69, %mm3
// CHECK: encoding: [0x0f,0xdd,0x1d,0x45,0x00,0x00,0x00]
paddusw 0x45,%mm3
// CHECK: paddusw 32493, %mm3
// CHECK: encoding: [0x0f,0xdd,0x1d,0xed,0x7e,0x00,0x00]
paddusw 0x7eed,%mm3
// CHECK: paddusw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xdd,0x1d,0xfe,0xca,0xbe,0xba]
paddusw 0xbabecafe,%mm3
// CHECK: paddusw 305419896, %mm3
// CHECK: encoding: [0x0f,0xdd,0x1d,0x78,0x56,0x34,0x12]
paddusw 0x12345678,%mm3
// CHECK: paddusw %mm3, %mm3
// CHECK: encoding: [0x0f,0xdd,0xdb]
paddusw %mm3,%mm3
// CHECK: paddusw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0xac,0xcb,0xef,0xbe,0xad,0xde]
paddusw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: paddusw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0x2d,0x45,0x00,0x00,0x00]
paddusw 0x45,%xmm5
// CHECK: paddusw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0x2d,0xed,0x7e,0x00,0x00]
paddusw 0x7eed,%xmm5
// CHECK: paddusw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0x2d,0xfe,0xca,0xbe,0xba]
paddusw 0xbabecafe,%xmm5
// CHECK: paddusw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0x2d,0x78,0x56,0x34,0x12]
paddusw 0x12345678,%xmm5
// CHECK: paddusw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdd,0xed]
paddusw %xmm5,%xmm5
// CHECK: pand 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xdb,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pand 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pand 69, %mm3
// CHECK: encoding: [0x0f,0xdb,0x1d,0x45,0x00,0x00,0x00]
pand 0x45,%mm3
// CHECK: pand 32493, %mm3
// CHECK: encoding: [0x0f,0xdb,0x1d,0xed,0x7e,0x00,0x00]
pand 0x7eed,%mm3
// CHECK: pand 3133065982, %mm3
// CHECK: encoding: [0x0f,0xdb,0x1d,0xfe,0xca,0xbe,0xba]
pand 0xbabecafe,%mm3
// CHECK: pand 305419896, %mm3
// CHECK: encoding: [0x0f,0xdb,0x1d,0x78,0x56,0x34,0x12]
pand 0x12345678,%mm3
// CHECK: pand %mm3, %mm3
// CHECK: encoding: [0x0f,0xdb,0xdb]
pand %mm3,%mm3
// CHECK: pand 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0xac,0xcb,0xef,0xbe,0xad,0xde]
pand 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pand 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0x2d,0x45,0x00,0x00,0x00]
pand 0x45,%xmm5
// CHECK: pand 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0x2d,0xed,0x7e,0x00,0x00]
pand 0x7eed,%xmm5
// CHECK: pand 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0x2d,0xfe,0xca,0xbe,0xba]
pand 0xbabecafe,%xmm5
// CHECK: pand 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0x2d,0x78,0x56,0x34,0x12]
pand 0x12345678,%xmm5
// CHECK: pand %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdb,0xed]
pand %xmm5,%xmm5
// CHECK: pandn 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xdf,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pandn 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pandn 69, %mm3
// CHECK: encoding: [0x0f,0xdf,0x1d,0x45,0x00,0x00,0x00]
pandn 0x45,%mm3
// CHECK: pandn 32493, %mm3
// CHECK: encoding: [0x0f,0xdf,0x1d,0xed,0x7e,0x00,0x00]
pandn 0x7eed,%mm3
// CHECK: pandn 3133065982, %mm3
// CHECK: encoding: [0x0f,0xdf,0x1d,0xfe,0xca,0xbe,0xba]
pandn 0xbabecafe,%mm3
// CHECK: pandn 305419896, %mm3
// CHECK: encoding: [0x0f,0xdf,0x1d,0x78,0x56,0x34,0x12]
pandn 0x12345678,%mm3
// CHECK: pandn %mm3, %mm3
// CHECK: encoding: [0x0f,0xdf,0xdb]
pandn %mm3,%mm3
// CHECK: pandn 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0xac,0xcb,0xef,0xbe,0xad,0xde]
pandn 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pandn 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0x2d,0x45,0x00,0x00,0x00]
pandn 0x45,%xmm5
// CHECK: pandn 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0x2d,0xed,0x7e,0x00,0x00]
pandn 0x7eed,%xmm5
// CHECK: pandn 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0x2d,0xfe,0xca,0xbe,0xba]
pandn 0xbabecafe,%xmm5
// CHECK: pandn 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0x2d,0x78,0x56,0x34,0x12]
pandn 0x12345678,%xmm5
// CHECK: pandn %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xdf,0xed]
pandn %xmm5,%xmm5
// CHECK: pcmpeqb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x74,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpeqb 69, %mm3
// CHECK: encoding: [0x0f,0x74,0x1d,0x45,0x00,0x00,0x00]
pcmpeqb 0x45,%mm3
// CHECK: pcmpeqb 32493, %mm3
// CHECK: encoding: [0x0f,0x74,0x1d,0xed,0x7e,0x00,0x00]
pcmpeqb 0x7eed,%mm3
// CHECK: pcmpeqb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x74,0x1d,0xfe,0xca,0xbe,0xba]
pcmpeqb 0xbabecafe,%mm3
// CHECK: pcmpeqb 305419896, %mm3
// CHECK: encoding: [0x0f,0x74,0x1d,0x78,0x56,0x34,0x12]
pcmpeqb 0x12345678,%mm3
// CHECK: pcmpeqb %mm3, %mm3
// CHECK: encoding: [0x0f,0x74,0xdb]
pcmpeqb %mm3,%mm3
// CHECK: pcmpeqb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpeqb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0x2d,0x45,0x00,0x00,0x00]
pcmpeqb 0x45,%xmm5
// CHECK: pcmpeqb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0x2d,0xed,0x7e,0x00,0x00]
pcmpeqb 0x7eed,%xmm5
// CHECK: pcmpeqb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0x2d,0xfe,0xca,0xbe,0xba]
pcmpeqb 0xbabecafe,%xmm5
// CHECK: pcmpeqb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0x2d,0x78,0x56,0x34,0x12]
pcmpeqb 0x12345678,%xmm5
// CHECK: pcmpeqb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x74,0xed]
pcmpeqb %xmm5,%xmm5
// CHECK: pcmpeqw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x75,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpeqw 69, %mm3
// CHECK: encoding: [0x0f,0x75,0x1d,0x45,0x00,0x00,0x00]
pcmpeqw 0x45,%mm3
// CHECK: pcmpeqw 32493, %mm3
// CHECK: encoding: [0x0f,0x75,0x1d,0xed,0x7e,0x00,0x00]
pcmpeqw 0x7eed,%mm3
// CHECK: pcmpeqw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x75,0x1d,0xfe,0xca,0xbe,0xba]
pcmpeqw 0xbabecafe,%mm3
// CHECK: pcmpeqw 305419896, %mm3
// CHECK: encoding: [0x0f,0x75,0x1d,0x78,0x56,0x34,0x12]
pcmpeqw 0x12345678,%mm3
// CHECK: pcmpeqw %mm3, %mm3
// CHECK: encoding: [0x0f,0x75,0xdb]
pcmpeqw %mm3,%mm3
// CHECK: pcmpeqw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpeqw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0x2d,0x45,0x00,0x00,0x00]
pcmpeqw 0x45,%xmm5
// CHECK: pcmpeqw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0x2d,0xed,0x7e,0x00,0x00]
pcmpeqw 0x7eed,%xmm5
// CHECK: pcmpeqw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0x2d,0xfe,0xca,0xbe,0xba]
pcmpeqw 0xbabecafe,%xmm5
// CHECK: pcmpeqw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0x2d,0x78,0x56,0x34,0x12]
pcmpeqw 0x12345678,%xmm5
// CHECK: pcmpeqw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x75,0xed]
pcmpeqw %xmm5,%xmm5
// CHECK: pcmpeqd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x76,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpeqd 69, %mm3
// CHECK: encoding: [0x0f,0x76,0x1d,0x45,0x00,0x00,0x00]
pcmpeqd 0x45,%mm3
// CHECK: pcmpeqd 32493, %mm3
// CHECK: encoding: [0x0f,0x76,0x1d,0xed,0x7e,0x00,0x00]
pcmpeqd 0x7eed,%mm3
// CHECK: pcmpeqd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x76,0x1d,0xfe,0xca,0xbe,0xba]
pcmpeqd 0xbabecafe,%mm3
// CHECK: pcmpeqd 305419896, %mm3
// CHECK: encoding: [0x0f,0x76,0x1d,0x78,0x56,0x34,0x12]
pcmpeqd 0x12345678,%mm3
// CHECK: pcmpeqd %mm3, %mm3
// CHECK: encoding: [0x0f,0x76,0xdb]
pcmpeqd %mm3,%mm3
// CHECK: pcmpeqd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpeqd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0x2d,0x45,0x00,0x00,0x00]
pcmpeqd 0x45,%xmm5
// CHECK: pcmpeqd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0x2d,0xed,0x7e,0x00,0x00]
pcmpeqd 0x7eed,%xmm5
// CHECK: pcmpeqd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0x2d,0xfe,0xca,0xbe,0xba]
pcmpeqd 0xbabecafe,%xmm5
// CHECK: pcmpeqd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0x2d,0x78,0x56,0x34,0x12]
pcmpeqd 0x12345678,%xmm5
// CHECK: pcmpeqd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x76,0xed]
pcmpeqd %xmm5,%xmm5
// CHECK: pcmpgtb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x64,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpgtb 69, %mm3
// CHECK: encoding: [0x0f,0x64,0x1d,0x45,0x00,0x00,0x00]
pcmpgtb 0x45,%mm3
// CHECK: pcmpgtb 32493, %mm3
// CHECK: encoding: [0x0f,0x64,0x1d,0xed,0x7e,0x00,0x00]
pcmpgtb 0x7eed,%mm3
// CHECK: pcmpgtb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x64,0x1d,0xfe,0xca,0xbe,0xba]
pcmpgtb 0xbabecafe,%mm3
// CHECK: pcmpgtb 305419896, %mm3
// CHECK: encoding: [0x0f,0x64,0x1d,0x78,0x56,0x34,0x12]
pcmpgtb 0x12345678,%mm3
// CHECK: pcmpgtb %mm3, %mm3
// CHECK: encoding: [0x0f,0x64,0xdb]
pcmpgtb %mm3,%mm3
// CHECK: pcmpgtb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpgtb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0x2d,0x45,0x00,0x00,0x00]
pcmpgtb 0x45,%xmm5
// CHECK: pcmpgtb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0x2d,0xed,0x7e,0x00,0x00]
pcmpgtb 0x7eed,%xmm5
// CHECK: pcmpgtb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0x2d,0xfe,0xca,0xbe,0xba]
pcmpgtb 0xbabecafe,%xmm5
// CHECK: pcmpgtb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0x2d,0x78,0x56,0x34,0x12]
pcmpgtb 0x12345678,%xmm5
// CHECK: pcmpgtb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x64,0xed]
pcmpgtb %xmm5,%xmm5
// CHECK: pcmpgtw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x65,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpgtw 69, %mm3
// CHECK: encoding: [0x0f,0x65,0x1d,0x45,0x00,0x00,0x00]
pcmpgtw 0x45,%mm3
// CHECK: pcmpgtw 32493, %mm3
// CHECK: encoding: [0x0f,0x65,0x1d,0xed,0x7e,0x00,0x00]
pcmpgtw 0x7eed,%mm3
// CHECK: pcmpgtw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x65,0x1d,0xfe,0xca,0xbe,0xba]
pcmpgtw 0xbabecafe,%mm3
// CHECK: pcmpgtw 305419896, %mm3
// CHECK: encoding: [0x0f,0x65,0x1d,0x78,0x56,0x34,0x12]
pcmpgtw 0x12345678,%mm3
// CHECK: pcmpgtw %mm3, %mm3
// CHECK: encoding: [0x0f,0x65,0xdb]
pcmpgtw %mm3,%mm3
// CHECK: pcmpgtw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpgtw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0x2d,0x45,0x00,0x00,0x00]
pcmpgtw 0x45,%xmm5
// CHECK: pcmpgtw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0x2d,0xed,0x7e,0x00,0x00]
pcmpgtw 0x7eed,%xmm5
// CHECK: pcmpgtw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0x2d,0xfe,0xca,0xbe,0xba]
pcmpgtw 0xbabecafe,%xmm5
// CHECK: pcmpgtw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0x2d,0x78,0x56,0x34,0x12]
pcmpgtw 0x12345678,%xmm5
// CHECK: pcmpgtw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x65,0xed]
pcmpgtw %xmm5,%xmm5
// CHECK: pcmpgtd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x66,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pcmpgtd 69, %mm3
// CHECK: encoding: [0x0f,0x66,0x1d,0x45,0x00,0x00,0x00]
pcmpgtd 0x45,%mm3
// CHECK: pcmpgtd 32493, %mm3
// CHECK: encoding: [0x0f,0x66,0x1d,0xed,0x7e,0x00,0x00]
pcmpgtd 0x7eed,%mm3
// CHECK: pcmpgtd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x66,0x1d,0xfe,0xca,0xbe,0xba]
pcmpgtd 0xbabecafe,%mm3
// CHECK: pcmpgtd 305419896, %mm3
// CHECK: encoding: [0x0f,0x66,0x1d,0x78,0x56,0x34,0x12]
pcmpgtd 0x12345678,%mm3
// CHECK: pcmpgtd %mm3, %mm3
// CHECK: encoding: [0x0f,0x66,0xdb]
pcmpgtd %mm3,%mm3
// CHECK: pcmpgtd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpgtd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0x2d,0x45,0x00,0x00,0x00]
pcmpgtd 0x45,%xmm5
// CHECK: pcmpgtd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0x2d,0xed,0x7e,0x00,0x00]
pcmpgtd 0x7eed,%xmm5
// CHECK: pcmpgtd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0x2d,0xfe,0xca,0xbe,0xba]
pcmpgtd 0xbabecafe,%xmm5
// CHECK: pcmpgtd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0x2d,0x78,0x56,0x34,0x12]
pcmpgtd 0x12345678,%xmm5
// CHECK: pcmpgtd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x66,0xed]
pcmpgtd %xmm5,%xmm5
// CHECK: pmaddwd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf5,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmaddwd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmaddwd 69, %mm3
// CHECK: encoding: [0x0f,0xf5,0x1d,0x45,0x00,0x00,0x00]
pmaddwd 0x45,%mm3
// CHECK: pmaddwd 32493, %mm3
// CHECK: encoding: [0x0f,0xf5,0x1d,0xed,0x7e,0x00,0x00]
pmaddwd 0x7eed,%mm3
// CHECK: pmaddwd 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf5,0x1d,0xfe,0xca,0xbe,0xba]
pmaddwd 0xbabecafe,%mm3
// CHECK: pmaddwd 305419896, %mm3
// CHECK: encoding: [0x0f,0xf5,0x1d,0x78,0x56,0x34,0x12]
pmaddwd 0x12345678,%mm3
// CHECK: pmaddwd %mm3, %mm3
// CHECK: encoding: [0x0f,0xf5,0xdb]
pmaddwd %mm3,%mm3
// CHECK: pmaddwd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaddwd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaddwd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0x2d,0x45,0x00,0x00,0x00]
pmaddwd 0x45,%xmm5
// CHECK: pmaddwd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0x2d,0xed,0x7e,0x00,0x00]
pmaddwd 0x7eed,%xmm5
// CHECK: pmaddwd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0x2d,0xfe,0xca,0xbe,0xba]
pmaddwd 0xbabecafe,%xmm5
// CHECK: pmaddwd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0x2d,0x78,0x56,0x34,0x12]
pmaddwd 0x12345678,%xmm5
// CHECK: pmaddwd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf5,0xed]
pmaddwd %xmm5,%xmm5
// CHECK: pmulhw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe5,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmulhw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmulhw 69, %mm3
// CHECK: encoding: [0x0f,0xe5,0x1d,0x45,0x00,0x00,0x00]
pmulhw 0x45,%mm3
// CHECK: pmulhw 32493, %mm3
// CHECK: encoding: [0x0f,0xe5,0x1d,0xed,0x7e,0x00,0x00]
pmulhw 0x7eed,%mm3
// CHECK: pmulhw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe5,0x1d,0xfe,0xca,0xbe,0xba]
pmulhw 0xbabecafe,%mm3
// CHECK: pmulhw 305419896, %mm3
// CHECK: encoding: [0x0f,0xe5,0x1d,0x78,0x56,0x34,0x12]
pmulhw 0x12345678,%mm3
// CHECK: pmulhw %mm3, %mm3
// CHECK: encoding: [0x0f,0xe5,0xdb]
pmulhw %mm3,%mm3
// CHECK: pmulhw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmulhw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmulhw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0x2d,0x45,0x00,0x00,0x00]
pmulhw 0x45,%xmm5
// CHECK: pmulhw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0x2d,0xed,0x7e,0x00,0x00]
pmulhw 0x7eed,%xmm5
// CHECK: pmulhw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0x2d,0xfe,0xca,0xbe,0xba]
pmulhw 0xbabecafe,%xmm5
// CHECK: pmulhw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0x2d,0x78,0x56,0x34,0x12]
pmulhw 0x12345678,%xmm5
// CHECK: pmulhw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe5,0xed]
pmulhw %xmm5,%xmm5
// CHECK: pmullw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd5,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmullw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmullw 69, %mm3
// CHECK: encoding: [0x0f,0xd5,0x1d,0x45,0x00,0x00,0x00]
pmullw 0x45,%mm3
// CHECK: pmullw 32493, %mm3
// CHECK: encoding: [0x0f,0xd5,0x1d,0xed,0x7e,0x00,0x00]
pmullw 0x7eed,%mm3
// CHECK: pmullw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd5,0x1d,0xfe,0xca,0xbe,0xba]
pmullw 0xbabecafe,%mm3
// CHECK: pmullw 305419896, %mm3
// CHECK: encoding: [0x0f,0xd5,0x1d,0x78,0x56,0x34,0x12]
pmullw 0x12345678,%mm3
// CHECK: pmullw %mm3, %mm3
// CHECK: encoding: [0x0f,0xd5,0xdb]
pmullw %mm3,%mm3
// CHECK: pmullw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmullw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmullw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0x2d,0x45,0x00,0x00,0x00]
pmullw 0x45,%xmm5
// CHECK: pmullw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0x2d,0xed,0x7e,0x00,0x00]
pmullw 0x7eed,%xmm5
// CHECK: pmullw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0x2d,0xfe,0xca,0xbe,0xba]
pmullw 0xbabecafe,%xmm5
// CHECK: pmullw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0x2d,0x78,0x56,0x34,0x12]
pmullw 0x12345678,%xmm5
// CHECK: pmullw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd5,0xed]
pmullw %xmm5,%xmm5
// CHECK: por 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xeb,0x9c,0xcb,0xef,0xbe,0xad,0xde]
por 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: por 69, %mm3
// CHECK: encoding: [0x0f,0xeb,0x1d,0x45,0x00,0x00,0x00]
por 0x45,%mm3
// CHECK: por 32493, %mm3
// CHECK: encoding: [0x0f,0xeb,0x1d,0xed,0x7e,0x00,0x00]
por 0x7eed,%mm3
// CHECK: por 3133065982, %mm3
// CHECK: encoding: [0x0f,0xeb,0x1d,0xfe,0xca,0xbe,0xba]
por 0xbabecafe,%mm3
// CHECK: por 305419896, %mm3
// CHECK: encoding: [0x0f,0xeb,0x1d,0x78,0x56,0x34,0x12]
por 0x12345678,%mm3
// CHECK: por %mm3, %mm3
// CHECK: encoding: [0x0f,0xeb,0xdb]
por %mm3,%mm3
// CHECK: por 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0xac,0xcb,0xef,0xbe,0xad,0xde]
por 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: por 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0x2d,0x45,0x00,0x00,0x00]
por 0x45,%xmm5
// CHECK: por 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0x2d,0xed,0x7e,0x00,0x00]
por 0x7eed,%xmm5
// CHECK: por 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0x2d,0xfe,0xca,0xbe,0xba]
por 0xbabecafe,%xmm5
// CHECK: por 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0x2d,0x78,0x56,0x34,0x12]
por 0x12345678,%xmm5
// CHECK: por %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xeb,0xed]
por %xmm5,%xmm5
// CHECK: psllw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf1,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psllw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psllw 69, %mm3
// CHECK: encoding: [0x0f,0xf1,0x1d,0x45,0x00,0x00,0x00]
psllw 0x45,%mm3
// CHECK: psllw 32493, %mm3
// CHECK: encoding: [0x0f,0xf1,0x1d,0xed,0x7e,0x00,0x00]
psllw 0x7eed,%mm3
// CHECK: psllw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf1,0x1d,0xfe,0xca,0xbe,0xba]
psllw 0xbabecafe,%mm3
// CHECK: psllw 305419896, %mm3
// CHECK: encoding: [0x0f,0xf1,0x1d,0x78,0x56,0x34,0x12]
psllw 0x12345678,%mm3
// CHECK: psllw %mm3, %mm3
// CHECK: encoding: [0x0f,0xf1,0xdb]
psllw %mm3,%mm3
// CHECK: psllw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0xac,0xcb,0xef,0xbe,0xad,0xde]
psllw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psllw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0x2d,0x45,0x00,0x00,0x00]
psllw 0x45,%xmm5
// CHECK: psllw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0x2d,0xed,0x7e,0x00,0x00]
psllw 0x7eed,%xmm5
// CHECK: psllw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0x2d,0xfe,0xca,0xbe,0xba]
psllw 0xbabecafe,%xmm5
// CHECK: psllw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0x2d,0x78,0x56,0x34,0x12]
psllw 0x12345678,%xmm5
// CHECK: psllw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf1,0xed]
psllw %xmm5,%xmm5
// CHECK: psllw $127, %mm3
// CHECK: encoding: [0x0f,0x71,0xf3,0x7f]
psllw $0x7f,%mm3
// CHECK: psllw $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x71,0xf5,0x7f]
psllw $0x7f,%xmm5
// CHECK: pslld 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf2,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pslld 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pslld 69, %mm3
// CHECK: encoding: [0x0f,0xf2,0x1d,0x45,0x00,0x00,0x00]
pslld 0x45,%mm3
// CHECK: pslld 32493, %mm3
// CHECK: encoding: [0x0f,0xf2,0x1d,0xed,0x7e,0x00,0x00]
pslld 0x7eed,%mm3
// CHECK: pslld 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf2,0x1d,0xfe,0xca,0xbe,0xba]
pslld 0xbabecafe,%mm3
// CHECK: pslld 305419896, %mm3
// CHECK: encoding: [0x0f,0xf2,0x1d,0x78,0x56,0x34,0x12]
pslld 0x12345678,%mm3
// CHECK: pslld %mm3, %mm3
// CHECK: encoding: [0x0f,0xf2,0xdb]
pslld %mm3,%mm3
// CHECK: pslld 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0xac,0xcb,0xef,0xbe,0xad,0xde]
pslld 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pslld 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0x2d,0x45,0x00,0x00,0x00]
pslld 0x45,%xmm5
// CHECK: pslld 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0x2d,0xed,0x7e,0x00,0x00]
pslld 0x7eed,%xmm5
// CHECK: pslld 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0x2d,0xfe,0xca,0xbe,0xba]
pslld 0xbabecafe,%xmm5
// CHECK: pslld 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0x2d,0x78,0x56,0x34,0x12]
pslld 0x12345678,%xmm5
// CHECK: pslld %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf2,0xed]
pslld %xmm5,%xmm5
// CHECK: pslld $127, %mm3
// CHECK: encoding: [0x0f,0x72,0xf3,0x7f]
pslld $0x7f,%mm3
// CHECK: pslld $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x72,0xf5,0x7f]
pslld $0x7f,%xmm5
// CHECK: psllq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf3,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psllq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psllq 69, %mm3
// CHECK: encoding: [0x0f,0xf3,0x1d,0x45,0x00,0x00,0x00]
psllq 0x45,%mm3
// CHECK: psllq 32493, %mm3
// CHECK: encoding: [0x0f,0xf3,0x1d,0xed,0x7e,0x00,0x00]
psllq 0x7eed,%mm3
// CHECK: psllq 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf3,0x1d,0xfe,0xca,0xbe,0xba]
psllq 0xbabecafe,%mm3
// CHECK: psllq 305419896, %mm3
// CHECK: encoding: [0x0f,0xf3,0x1d,0x78,0x56,0x34,0x12]
psllq 0x12345678,%mm3
// CHECK: psllq %mm3, %mm3
// CHECK: encoding: [0x0f,0xf3,0xdb]
psllq %mm3,%mm3
// CHECK: psllq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0xac,0xcb,0xef,0xbe,0xad,0xde]
psllq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psllq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0x2d,0x45,0x00,0x00,0x00]
psllq 0x45,%xmm5
// CHECK: psllq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0x2d,0xed,0x7e,0x00,0x00]
psllq 0x7eed,%xmm5
// CHECK: psllq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0x2d,0xfe,0xca,0xbe,0xba]
psllq 0xbabecafe,%xmm5
// CHECK: psllq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0x2d,0x78,0x56,0x34,0x12]
psllq 0x12345678,%xmm5
// CHECK: psllq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf3,0xed]
psllq %xmm5,%xmm5
// CHECK: psllq $127, %mm3
// CHECK: encoding: [0x0f,0x73,0xf3,0x7f]
psllq $0x7f,%mm3
// CHECK: psllq $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x73,0xf5,0x7f]
psllq $0x7f,%xmm5
// CHECK: psraw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe1,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psraw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psraw 69, %mm3
// CHECK: encoding: [0x0f,0xe1,0x1d,0x45,0x00,0x00,0x00]
psraw 0x45,%mm3
// CHECK: psraw 32493, %mm3
// CHECK: encoding: [0x0f,0xe1,0x1d,0xed,0x7e,0x00,0x00]
psraw 0x7eed,%mm3
// CHECK: psraw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe1,0x1d,0xfe,0xca,0xbe,0xba]
psraw 0xbabecafe,%mm3
// CHECK: psraw 305419896, %mm3
// CHECK: encoding: [0x0f,0xe1,0x1d,0x78,0x56,0x34,0x12]
psraw 0x12345678,%mm3
// CHECK: psraw %mm3, %mm3
// CHECK: encoding: [0x0f,0xe1,0xdb]
psraw %mm3,%mm3
// CHECK: psraw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0xac,0xcb,0xef,0xbe,0xad,0xde]
psraw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psraw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0x2d,0x45,0x00,0x00,0x00]
psraw 0x45,%xmm5
// CHECK: psraw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0x2d,0xed,0x7e,0x00,0x00]
psraw 0x7eed,%xmm5
// CHECK: psraw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0x2d,0xfe,0xca,0xbe,0xba]
psraw 0xbabecafe,%xmm5
// CHECK: psraw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0x2d,0x78,0x56,0x34,0x12]
psraw 0x12345678,%xmm5
// CHECK: psraw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe1,0xed]
psraw %xmm5,%xmm5
// CHECK: psraw $127, %mm3
// CHECK: encoding: [0x0f,0x71,0xe3,0x7f]
psraw $0x7f,%mm3
// CHECK: psraw $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x71,0xe5,0x7f]
psraw $0x7f,%xmm5
// CHECK: psrad 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe2,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psrad 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psrad 69, %mm3
// CHECK: encoding: [0x0f,0xe2,0x1d,0x45,0x00,0x00,0x00]
psrad 0x45,%mm3
// CHECK: psrad 32493, %mm3
// CHECK: encoding: [0x0f,0xe2,0x1d,0xed,0x7e,0x00,0x00]
psrad 0x7eed,%mm3
// CHECK: psrad 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe2,0x1d,0xfe,0xca,0xbe,0xba]
psrad 0xbabecafe,%mm3
// CHECK: psrad 305419896, %mm3
// CHECK: encoding: [0x0f,0xe2,0x1d,0x78,0x56,0x34,0x12]
psrad 0x12345678,%mm3
// CHECK: psrad %mm3, %mm3
// CHECK: encoding: [0x0f,0xe2,0xdb]
psrad %mm3,%mm3
// CHECK: psrad 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0xac,0xcb,0xef,0xbe,0xad,0xde]
psrad 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psrad 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0x2d,0x45,0x00,0x00,0x00]
psrad 0x45,%xmm5
// CHECK: psrad 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0x2d,0xed,0x7e,0x00,0x00]
psrad 0x7eed,%xmm5
// CHECK: psrad 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0x2d,0xfe,0xca,0xbe,0xba]
psrad 0xbabecafe,%xmm5
// CHECK: psrad 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0x2d,0x78,0x56,0x34,0x12]
psrad 0x12345678,%xmm5
// CHECK: psrad %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe2,0xed]
psrad %xmm5,%xmm5
// CHECK: psrad $127, %mm3
// CHECK: encoding: [0x0f,0x72,0xe3,0x7f]
psrad $0x7f,%mm3
// CHECK: psrad $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x72,0xe5,0x7f]
psrad $0x7f,%xmm5
// CHECK: psrlw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd1,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psrlw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psrlw 69, %mm3
// CHECK: encoding: [0x0f,0xd1,0x1d,0x45,0x00,0x00,0x00]
psrlw 0x45,%mm3
// CHECK: psrlw 32493, %mm3
// CHECK: encoding: [0x0f,0xd1,0x1d,0xed,0x7e,0x00,0x00]
psrlw 0x7eed,%mm3
// CHECK: psrlw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd1,0x1d,0xfe,0xca,0xbe,0xba]
psrlw 0xbabecafe,%mm3
// CHECK: psrlw 305419896, %mm3
// CHECK: encoding: [0x0f,0xd1,0x1d,0x78,0x56,0x34,0x12]
psrlw 0x12345678,%mm3
// CHECK: psrlw %mm3, %mm3
// CHECK: encoding: [0x0f,0xd1,0xdb]
psrlw %mm3,%mm3
// CHECK: psrlw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0xac,0xcb,0xef,0xbe,0xad,0xde]
psrlw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psrlw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0x2d,0x45,0x00,0x00,0x00]
psrlw 0x45,%xmm5
// CHECK: psrlw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0x2d,0xed,0x7e,0x00,0x00]
psrlw 0x7eed,%xmm5
// CHECK: psrlw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0x2d,0xfe,0xca,0xbe,0xba]
psrlw 0xbabecafe,%xmm5
// CHECK: psrlw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0x2d,0x78,0x56,0x34,0x12]
psrlw 0x12345678,%xmm5
// CHECK: psrlw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd1,0xed]
psrlw %xmm5,%xmm5
// CHECK: psrlw $127, %mm3
// CHECK: encoding: [0x0f,0x71,0xd3,0x7f]
psrlw $0x7f,%mm3
// CHECK: psrlw $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x71,0xd5,0x7f]
psrlw $0x7f,%xmm5
// CHECK: psrld 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd2,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psrld 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psrld 69, %mm3
// CHECK: encoding: [0x0f,0xd2,0x1d,0x45,0x00,0x00,0x00]
psrld 0x45,%mm3
// CHECK: psrld 32493, %mm3
// CHECK: encoding: [0x0f,0xd2,0x1d,0xed,0x7e,0x00,0x00]
psrld 0x7eed,%mm3
// CHECK: psrld 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd2,0x1d,0xfe,0xca,0xbe,0xba]
psrld 0xbabecafe,%mm3
// CHECK: psrld 305419896, %mm3
// CHECK: encoding: [0x0f,0xd2,0x1d,0x78,0x56,0x34,0x12]
psrld 0x12345678,%mm3
// CHECK: psrld %mm3, %mm3
// CHECK: encoding: [0x0f,0xd2,0xdb]
psrld %mm3,%mm3
// CHECK: psrld 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0xac,0xcb,0xef,0xbe,0xad,0xde]
psrld 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psrld 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0x2d,0x45,0x00,0x00,0x00]
psrld 0x45,%xmm5
// CHECK: psrld 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0x2d,0xed,0x7e,0x00,0x00]
psrld 0x7eed,%xmm5
// CHECK: psrld 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0x2d,0xfe,0xca,0xbe,0xba]
psrld 0xbabecafe,%xmm5
// CHECK: psrld 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0x2d,0x78,0x56,0x34,0x12]
psrld 0x12345678,%xmm5
// CHECK: psrld %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd2,0xed]
psrld %xmm5,%xmm5
// CHECK: psrld $127, %mm3
// CHECK: encoding: [0x0f,0x72,0xd3,0x7f]
psrld $0x7f,%mm3
// CHECK: psrld $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x72,0xd5,0x7f]
psrld $0x7f,%xmm5
// CHECK: psrlq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd3,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psrlq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psrlq 69, %mm3
// CHECK: encoding: [0x0f,0xd3,0x1d,0x45,0x00,0x00,0x00]
psrlq 0x45,%mm3
// CHECK: psrlq 32493, %mm3
// CHECK: encoding: [0x0f,0xd3,0x1d,0xed,0x7e,0x00,0x00]
psrlq 0x7eed,%mm3
// CHECK: psrlq 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd3,0x1d,0xfe,0xca,0xbe,0xba]
psrlq 0xbabecafe,%mm3
// CHECK: psrlq 305419896, %mm3
// CHECK: encoding: [0x0f,0xd3,0x1d,0x78,0x56,0x34,0x12]
psrlq 0x12345678,%mm3
// CHECK: psrlq %mm3, %mm3
// CHECK: encoding: [0x0f,0xd3,0xdb]
psrlq %mm3,%mm3
// CHECK: psrlq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0xac,0xcb,0xef,0xbe,0xad,0xde]
psrlq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psrlq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0x2d,0x45,0x00,0x00,0x00]
psrlq 0x45,%xmm5
// CHECK: psrlq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0x2d,0xed,0x7e,0x00,0x00]
psrlq 0x7eed,%xmm5
// CHECK: psrlq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0x2d,0xfe,0xca,0xbe,0xba]
psrlq 0xbabecafe,%xmm5
// CHECK: psrlq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0x2d,0x78,0x56,0x34,0x12]
psrlq 0x12345678,%xmm5
// CHECK: psrlq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd3,0xed]
psrlq %xmm5,%xmm5
// CHECK: psrlq $127, %mm3
// CHECK: encoding: [0x0f,0x73,0xd3,0x7f]
psrlq $0x7f,%mm3
// CHECK: psrlq $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x73,0xd5,0x7f]
psrlq $0x7f,%xmm5
// CHECK: psubb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf8,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubb 69, %mm3
// CHECK: encoding: [0x0f,0xf8,0x1d,0x45,0x00,0x00,0x00]
psubb 0x45,%mm3
// CHECK: psubb 32493, %mm3
// CHECK: encoding: [0x0f,0xf8,0x1d,0xed,0x7e,0x00,0x00]
psubb 0x7eed,%mm3
// CHECK: psubb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf8,0x1d,0xfe,0xca,0xbe,0xba]
psubb 0xbabecafe,%mm3
// CHECK: psubb 305419896, %mm3
// CHECK: encoding: [0x0f,0xf8,0x1d,0x78,0x56,0x34,0x12]
psubb 0x12345678,%mm3
// CHECK: psubb %mm3, %mm3
// CHECK: encoding: [0x0f,0xf8,0xdb]
psubb %mm3,%mm3
// CHECK: psubb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0x2d,0x45,0x00,0x00,0x00]
psubb 0x45,%xmm5
// CHECK: psubb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0x2d,0xed,0x7e,0x00,0x00]
psubb 0x7eed,%xmm5
// CHECK: psubb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0x2d,0xfe,0xca,0xbe,0xba]
psubb 0xbabecafe,%xmm5
// CHECK: psubb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0x2d,0x78,0x56,0x34,0x12]
psubb 0x12345678,%xmm5
// CHECK: psubb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf8,0xed]
psubb %xmm5,%xmm5
// CHECK: psubw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf9,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubw 69, %mm3
// CHECK: encoding: [0x0f,0xf9,0x1d,0x45,0x00,0x00,0x00]
psubw 0x45,%mm3
// CHECK: psubw 32493, %mm3
// CHECK: encoding: [0x0f,0xf9,0x1d,0xed,0x7e,0x00,0x00]
psubw 0x7eed,%mm3
// CHECK: psubw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf9,0x1d,0xfe,0xca,0xbe,0xba]
psubw 0xbabecafe,%mm3
// CHECK: psubw 305419896, %mm3
// CHECK: encoding: [0x0f,0xf9,0x1d,0x78,0x56,0x34,0x12]
psubw 0x12345678,%mm3
// CHECK: psubw %mm3, %mm3
// CHECK: encoding: [0x0f,0xf9,0xdb]
psubw %mm3,%mm3
// CHECK: psubw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0x2d,0x45,0x00,0x00,0x00]
psubw 0x45,%xmm5
// CHECK: psubw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0x2d,0xed,0x7e,0x00,0x00]
psubw 0x7eed,%xmm5
// CHECK: psubw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0x2d,0xfe,0xca,0xbe,0xba]
psubw 0xbabecafe,%xmm5
// CHECK: psubw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0x2d,0x78,0x56,0x34,0x12]
psubw 0x12345678,%xmm5
// CHECK: psubw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf9,0xed]
psubw %xmm5,%xmm5
// CHECK: psubd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xfa,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubd 69, %mm3
// CHECK: encoding: [0x0f,0xfa,0x1d,0x45,0x00,0x00,0x00]
psubd 0x45,%mm3
// CHECK: psubd 32493, %mm3
// CHECK: encoding: [0x0f,0xfa,0x1d,0xed,0x7e,0x00,0x00]
psubd 0x7eed,%mm3
// CHECK: psubd 3133065982, %mm3
// CHECK: encoding: [0x0f,0xfa,0x1d,0xfe,0xca,0xbe,0xba]
psubd 0xbabecafe,%mm3
// CHECK: psubd 305419896, %mm3
// CHECK: encoding: [0x0f,0xfa,0x1d,0x78,0x56,0x34,0x12]
psubd 0x12345678,%mm3
// CHECK: psubd %mm3, %mm3
// CHECK: encoding: [0x0f,0xfa,0xdb]
psubd %mm3,%mm3
// CHECK: psubd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0x2d,0x45,0x00,0x00,0x00]
psubd 0x45,%xmm5
// CHECK: psubd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0x2d,0xed,0x7e,0x00,0x00]
psubd 0x7eed,%xmm5
// CHECK: psubd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0x2d,0xfe,0xca,0xbe,0xba]
psubd 0xbabecafe,%xmm5
// CHECK: psubd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0x2d,0x78,0x56,0x34,0x12]
psubd 0x12345678,%xmm5
// CHECK: psubd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfa,0xed]
psubd %xmm5,%xmm5
// CHECK: psubq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xfb,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubq 69, %mm3
// CHECK: encoding: [0x0f,0xfb,0x1d,0x45,0x00,0x00,0x00]
psubq 0x45,%mm3
// CHECK: psubq 32493, %mm3
// CHECK: encoding: [0x0f,0xfb,0x1d,0xed,0x7e,0x00,0x00]
psubq 0x7eed,%mm3
// CHECK: psubq 3133065982, %mm3
// CHECK: encoding: [0x0f,0xfb,0x1d,0xfe,0xca,0xbe,0xba]
psubq 0xbabecafe,%mm3
// CHECK: psubq 305419896, %mm3
// CHECK: encoding: [0x0f,0xfb,0x1d,0x78,0x56,0x34,0x12]
psubq 0x12345678,%mm3
// CHECK: psubq %mm3, %mm3
// CHECK: encoding: [0x0f,0xfb,0xdb]
psubq %mm3,%mm3
// CHECK: psubq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0x2d,0x45,0x00,0x00,0x00]
psubq 0x45,%xmm5
// CHECK: psubq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0x2d,0xed,0x7e,0x00,0x00]
psubq 0x7eed,%xmm5
// CHECK: psubq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0x2d,0xfe,0xca,0xbe,0xba]
psubq 0xbabecafe,%xmm5
// CHECK: psubq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0x2d,0x78,0x56,0x34,0x12]
psubq 0x12345678,%xmm5
// CHECK: psubq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xfb,0xed]
psubq %xmm5,%xmm5
// CHECK: psubsb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe8,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubsb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubsb 69, %mm3
// CHECK: encoding: [0x0f,0xe8,0x1d,0x45,0x00,0x00,0x00]
psubsb 0x45,%mm3
// CHECK: psubsb 32493, %mm3
// CHECK: encoding: [0x0f,0xe8,0x1d,0xed,0x7e,0x00,0x00]
psubsb 0x7eed,%mm3
// CHECK: psubsb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe8,0x1d,0xfe,0xca,0xbe,0xba]
psubsb 0xbabecafe,%mm3
// CHECK: psubsb 305419896, %mm3
// CHECK: encoding: [0x0f,0xe8,0x1d,0x78,0x56,0x34,0x12]
psubsb 0x12345678,%mm3
// CHECK: psubsb %mm3, %mm3
// CHECK: encoding: [0x0f,0xe8,0xdb]
psubsb %mm3,%mm3
// CHECK: psubsb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubsb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubsb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0x2d,0x45,0x00,0x00,0x00]
psubsb 0x45,%xmm5
// CHECK: psubsb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0x2d,0xed,0x7e,0x00,0x00]
psubsb 0x7eed,%xmm5
// CHECK: psubsb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0x2d,0xfe,0xca,0xbe,0xba]
psubsb 0xbabecafe,%xmm5
// CHECK: psubsb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0x2d,0x78,0x56,0x34,0x12]
psubsb 0x12345678,%xmm5
// CHECK: psubsb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe8,0xed]
psubsb %xmm5,%xmm5
// CHECK: psubsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe9,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubsw 69, %mm3
// CHECK: encoding: [0x0f,0xe9,0x1d,0x45,0x00,0x00,0x00]
psubsw 0x45,%mm3
// CHECK: psubsw 32493, %mm3
// CHECK: encoding: [0x0f,0xe9,0x1d,0xed,0x7e,0x00,0x00]
psubsw 0x7eed,%mm3
// CHECK: psubsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe9,0x1d,0xfe,0xca,0xbe,0xba]
psubsw 0xbabecafe,%mm3
// CHECK: psubsw 305419896, %mm3
// CHECK: encoding: [0x0f,0xe9,0x1d,0x78,0x56,0x34,0x12]
psubsw 0x12345678,%mm3
// CHECK: psubsw %mm3, %mm3
// CHECK: encoding: [0x0f,0xe9,0xdb]
psubsw %mm3,%mm3
// CHECK: psubsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0x2d,0x45,0x00,0x00,0x00]
psubsw 0x45,%xmm5
// CHECK: psubsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0x2d,0xed,0x7e,0x00,0x00]
psubsw 0x7eed,%xmm5
// CHECK: psubsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0x2d,0xfe,0xca,0xbe,0xba]
psubsw 0xbabecafe,%xmm5
// CHECK: psubsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0x2d,0x78,0x56,0x34,0x12]
psubsw 0x12345678,%xmm5
// CHECK: psubsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe9,0xed]
psubsw %xmm5,%xmm5
// CHECK: psubusb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd8,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubusb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubusb 69, %mm3
// CHECK: encoding: [0x0f,0xd8,0x1d,0x45,0x00,0x00,0x00]
psubusb 0x45,%mm3
// CHECK: psubusb 32493, %mm3
// CHECK: encoding: [0x0f,0xd8,0x1d,0xed,0x7e,0x00,0x00]
psubusb 0x7eed,%mm3
// CHECK: psubusb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd8,0x1d,0xfe,0xca,0xbe,0xba]
psubusb 0xbabecafe,%mm3
// CHECK: psubusb 305419896, %mm3
// CHECK: encoding: [0x0f,0xd8,0x1d,0x78,0x56,0x34,0x12]
psubusb 0x12345678,%mm3
// CHECK: psubusb %mm3, %mm3
// CHECK: encoding: [0x0f,0xd8,0xdb]
psubusb %mm3,%mm3
// CHECK: psubusb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubusb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubusb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0x2d,0x45,0x00,0x00,0x00]
psubusb 0x45,%xmm5
// CHECK: psubusb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0x2d,0xed,0x7e,0x00,0x00]
psubusb 0x7eed,%xmm5
// CHECK: psubusb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0x2d,0xfe,0xca,0xbe,0xba]
psubusb 0xbabecafe,%xmm5
// CHECK: psubusb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0x2d,0x78,0x56,0x34,0x12]
psubusb 0x12345678,%xmm5
// CHECK: psubusb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd8,0xed]
psubusb %xmm5,%xmm5
// CHECK: psubusw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xd9,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psubusw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psubusw 69, %mm3
// CHECK: encoding: [0x0f,0xd9,0x1d,0x45,0x00,0x00,0x00]
psubusw 0x45,%mm3
// CHECK: psubusw 32493, %mm3
// CHECK: encoding: [0x0f,0xd9,0x1d,0xed,0x7e,0x00,0x00]
psubusw 0x7eed,%mm3
// CHECK: psubusw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xd9,0x1d,0xfe,0xca,0xbe,0xba]
psubusw 0xbabecafe,%mm3
// CHECK: psubusw 305419896, %mm3
// CHECK: encoding: [0x0f,0xd9,0x1d,0x78,0x56,0x34,0x12]
psubusw 0x12345678,%mm3
// CHECK: psubusw %mm3, %mm3
// CHECK: encoding: [0x0f,0xd9,0xdb]
psubusw %mm3,%mm3
// CHECK: psubusw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0xac,0xcb,0xef,0xbe,0xad,0xde]
psubusw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psubusw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0x2d,0x45,0x00,0x00,0x00]
psubusw 0x45,%xmm5
// CHECK: psubusw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0x2d,0xed,0x7e,0x00,0x00]
psubusw 0x7eed,%xmm5
// CHECK: psubusw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0x2d,0xfe,0xca,0xbe,0xba]
psubusw 0xbabecafe,%xmm5
// CHECK: psubusw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0x2d,0x78,0x56,0x34,0x12]
psubusw 0x12345678,%xmm5
// CHECK: psubusw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd9,0xed]
psubusw %xmm5,%xmm5
// CHECK: punpckhbw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x68,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpckhbw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpckhbw 69, %mm3
// CHECK: encoding: [0x0f,0x68,0x1d,0x45,0x00,0x00,0x00]
punpckhbw 0x45,%mm3
// CHECK: punpckhbw 32493, %mm3
// CHECK: encoding: [0x0f,0x68,0x1d,0xed,0x7e,0x00,0x00]
punpckhbw 0x7eed,%mm3
// CHECK: punpckhbw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x68,0x1d,0xfe,0xca,0xbe,0xba]
punpckhbw 0xbabecafe,%mm3
// CHECK: punpckhbw 305419896, %mm3
// CHECK: encoding: [0x0f,0x68,0x1d,0x78,0x56,0x34,0x12]
punpckhbw 0x12345678,%mm3
// CHECK: punpckhbw %mm3, %mm3
// CHECK: encoding: [0x0f,0x68,0xdb]
punpckhbw %mm3,%mm3
// CHECK: punpckhbw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpckhbw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpckhbw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0x2d,0x45,0x00,0x00,0x00]
punpckhbw 0x45,%xmm5
// CHECK: punpckhbw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0x2d,0xed,0x7e,0x00,0x00]
punpckhbw 0x7eed,%xmm5
// CHECK: punpckhbw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0x2d,0xfe,0xca,0xbe,0xba]
punpckhbw 0xbabecafe,%xmm5
// CHECK: punpckhbw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0x2d,0x78,0x56,0x34,0x12]
punpckhbw 0x12345678,%xmm5
// CHECK: punpckhbw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x68,0xed]
punpckhbw %xmm5,%xmm5
// CHECK: punpckhwd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x69,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpckhwd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpckhwd 69, %mm3
// CHECK: encoding: [0x0f,0x69,0x1d,0x45,0x00,0x00,0x00]
punpckhwd 0x45,%mm3
// CHECK: punpckhwd 32493, %mm3
// CHECK: encoding: [0x0f,0x69,0x1d,0xed,0x7e,0x00,0x00]
punpckhwd 0x7eed,%mm3
// CHECK: punpckhwd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x69,0x1d,0xfe,0xca,0xbe,0xba]
punpckhwd 0xbabecafe,%mm3
// CHECK: punpckhwd 305419896, %mm3
// CHECK: encoding: [0x0f,0x69,0x1d,0x78,0x56,0x34,0x12]
punpckhwd 0x12345678,%mm3
// CHECK: punpckhwd %mm3, %mm3
// CHECK: encoding: [0x0f,0x69,0xdb]
punpckhwd %mm3,%mm3
// CHECK: punpckhwd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpckhwd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpckhwd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0x2d,0x45,0x00,0x00,0x00]
punpckhwd 0x45,%xmm5
// CHECK: punpckhwd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0x2d,0xed,0x7e,0x00,0x00]
punpckhwd 0x7eed,%xmm5
// CHECK: punpckhwd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0x2d,0xfe,0xca,0xbe,0xba]
punpckhwd 0xbabecafe,%xmm5
// CHECK: punpckhwd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0x2d,0x78,0x56,0x34,0x12]
punpckhwd 0x12345678,%xmm5
// CHECK: punpckhwd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x69,0xed]
punpckhwd %xmm5,%xmm5
// CHECK: punpckhdq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x6a,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpckhdq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpckhdq 69, %mm3
// CHECK: encoding: [0x0f,0x6a,0x1d,0x45,0x00,0x00,0x00]
punpckhdq 0x45,%mm3
// CHECK: punpckhdq 32493, %mm3
// CHECK: encoding: [0x0f,0x6a,0x1d,0xed,0x7e,0x00,0x00]
punpckhdq 0x7eed,%mm3
// CHECK: punpckhdq 3133065982, %mm3
// CHECK: encoding: [0x0f,0x6a,0x1d,0xfe,0xca,0xbe,0xba]
punpckhdq 0xbabecafe,%mm3
// CHECK: punpckhdq 305419896, %mm3
// CHECK: encoding: [0x0f,0x6a,0x1d,0x78,0x56,0x34,0x12]
punpckhdq 0x12345678,%mm3
// CHECK: punpckhdq %mm3, %mm3
// CHECK: encoding: [0x0f,0x6a,0xdb]
punpckhdq %mm3,%mm3
// CHECK: punpckhdq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpckhdq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpckhdq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0x2d,0x45,0x00,0x00,0x00]
punpckhdq 0x45,%xmm5
// CHECK: punpckhdq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0x2d,0xed,0x7e,0x00,0x00]
punpckhdq 0x7eed,%xmm5
// CHECK: punpckhdq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0x2d,0xfe,0xca,0xbe,0xba]
punpckhdq 0xbabecafe,%xmm5
// CHECK: punpckhdq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0x2d,0x78,0x56,0x34,0x12]
punpckhdq 0x12345678,%xmm5
// CHECK: punpckhdq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6a,0xed]
punpckhdq %xmm5,%xmm5
// CHECK: punpcklbw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x60,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpcklbw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpcklbw 69, %mm3
// CHECK: encoding: [0x0f,0x60,0x1d,0x45,0x00,0x00,0x00]
punpcklbw 0x45,%mm3
// CHECK: punpcklbw 32493, %mm3
// CHECK: encoding: [0x0f,0x60,0x1d,0xed,0x7e,0x00,0x00]
punpcklbw 0x7eed,%mm3
// CHECK: punpcklbw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x60,0x1d,0xfe,0xca,0xbe,0xba]
punpcklbw 0xbabecafe,%mm3
// CHECK: punpcklbw 305419896, %mm3
// CHECK: encoding: [0x0f,0x60,0x1d,0x78,0x56,0x34,0x12]
punpcklbw 0x12345678,%mm3
// CHECK: punpcklbw %mm3, %mm3
// CHECK: encoding: [0x0f,0x60,0xdb]
punpcklbw %mm3,%mm3
// CHECK: punpcklbw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpcklbw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpcklbw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0x2d,0x45,0x00,0x00,0x00]
punpcklbw 0x45,%xmm5
// CHECK: punpcklbw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0x2d,0xed,0x7e,0x00,0x00]
punpcklbw 0x7eed,%xmm5
// CHECK: punpcklbw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0x2d,0xfe,0xca,0xbe,0xba]
punpcklbw 0xbabecafe,%xmm5
// CHECK: punpcklbw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0x2d,0x78,0x56,0x34,0x12]
punpcklbw 0x12345678,%xmm5
// CHECK: punpcklbw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x60,0xed]
punpcklbw %xmm5,%xmm5
// CHECK: punpcklwd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x61,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpcklwd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpcklwd 69, %mm3
// CHECK: encoding: [0x0f,0x61,0x1d,0x45,0x00,0x00,0x00]
punpcklwd 0x45,%mm3
// CHECK: punpcklwd 32493, %mm3
// CHECK: encoding: [0x0f,0x61,0x1d,0xed,0x7e,0x00,0x00]
punpcklwd 0x7eed,%mm3
// CHECK: punpcklwd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x61,0x1d,0xfe,0xca,0xbe,0xba]
punpcklwd 0xbabecafe,%mm3
// CHECK: punpcklwd 305419896, %mm3
// CHECK: encoding: [0x0f,0x61,0x1d,0x78,0x56,0x34,0x12]
punpcklwd 0x12345678,%mm3
// CHECK: punpcklwd %mm3, %mm3
// CHECK: encoding: [0x0f,0x61,0xdb]
punpcklwd %mm3,%mm3
// CHECK: punpcklwd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpcklwd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpcklwd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0x2d,0x45,0x00,0x00,0x00]
punpcklwd 0x45,%xmm5
// CHECK: punpcklwd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0x2d,0xed,0x7e,0x00,0x00]
punpcklwd 0x7eed,%xmm5
// CHECK: punpcklwd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0x2d,0xfe,0xca,0xbe,0xba]
punpcklwd 0xbabecafe,%xmm5
// CHECK: punpcklwd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0x2d,0x78,0x56,0x34,0x12]
punpcklwd 0x12345678,%xmm5
// CHECK: punpcklwd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x61,0xed]
punpcklwd %xmm5,%xmm5
// CHECK: punpckldq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x62,0x9c,0xcb,0xef,0xbe,0xad,0xde]
punpckldq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: punpckldq 69, %mm3
// CHECK: encoding: [0x0f,0x62,0x1d,0x45,0x00,0x00,0x00]
punpckldq 0x45,%mm3
// CHECK: punpckldq 32493, %mm3
// CHECK: encoding: [0x0f,0x62,0x1d,0xed,0x7e,0x00,0x00]
punpckldq 0x7eed,%mm3
// CHECK: punpckldq 3133065982, %mm3
// CHECK: encoding: [0x0f,0x62,0x1d,0xfe,0xca,0xbe,0xba]
punpckldq 0xbabecafe,%mm3
// CHECK: punpckldq 305419896, %mm3
// CHECK: encoding: [0x0f,0x62,0x1d,0x78,0x56,0x34,0x12]
punpckldq 0x12345678,%mm3
// CHECK: punpckldq %mm3, %mm3
// CHECK: encoding: [0x0f,0x62,0xdb]
punpckldq %mm3,%mm3
// CHECK: punpckldq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpckldq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpckldq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0x2d,0x45,0x00,0x00,0x00]
punpckldq 0x45,%xmm5
// CHECK: punpckldq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0x2d,0xed,0x7e,0x00,0x00]
punpckldq 0x7eed,%xmm5
// CHECK: punpckldq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0x2d,0xfe,0xca,0xbe,0xba]
punpckldq 0xbabecafe,%xmm5
// CHECK: punpckldq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0x2d,0x78,0x56,0x34,0x12]
punpckldq 0x12345678,%xmm5
// CHECK: punpckldq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x62,0xed]
punpckldq %xmm5,%xmm5
// CHECK: pxor 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xef,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pxor 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pxor 69, %mm3
// CHECK: encoding: [0x0f,0xef,0x1d,0x45,0x00,0x00,0x00]
pxor 0x45,%mm3
// CHECK: pxor 32493, %mm3
// CHECK: encoding: [0x0f,0xef,0x1d,0xed,0x7e,0x00,0x00]
pxor 0x7eed,%mm3
// CHECK: pxor 3133065982, %mm3
// CHECK: encoding: [0x0f,0xef,0x1d,0xfe,0xca,0xbe,0xba]
pxor 0xbabecafe,%mm3
// CHECK: pxor 305419896, %mm3
// CHECK: encoding: [0x0f,0xef,0x1d,0x78,0x56,0x34,0x12]
pxor 0x12345678,%mm3
// CHECK: pxor %mm3, %mm3
// CHECK: encoding: [0x0f,0xef,0xdb]
pxor %mm3,%mm3
// CHECK: pxor 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0xac,0xcb,0xef,0xbe,0xad,0xde]
pxor 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pxor 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0x2d,0x45,0x00,0x00,0x00]
pxor 0x45,%xmm5
// CHECK: pxor 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0x2d,0xed,0x7e,0x00,0x00]
pxor 0x7eed,%xmm5
// CHECK: pxor 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0x2d,0xfe,0xca,0xbe,0xba]
pxor 0xbabecafe,%xmm5
// CHECK: pxor 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0x2d,0x78,0x56,0x34,0x12]
pxor 0x12345678,%xmm5
// CHECK: pxor %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xef,0xed]
pxor %xmm5,%xmm5
// CHECK: addps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
addps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addps 69, %xmm5
// CHECK: encoding: [0x0f,0x58,0x2d,0x45,0x00,0x00,0x00]
addps 0x45,%xmm5
// CHECK: addps 32493, %xmm5
// CHECK: encoding: [0x0f,0x58,0x2d,0xed,0x7e,0x00,0x00]
addps 0x7eed,%xmm5
// CHECK: addps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x58,0x2d,0xfe,0xca,0xbe,0xba]
addps 0xbabecafe,%xmm5
// CHECK: addps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x58,0x2d,0x78,0x56,0x34,0x12]
addps 0x12345678,%xmm5
// CHECK: addps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x58,0xed]
addps %xmm5,%xmm5
// CHECK: addss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
addss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0x2d,0x45,0x00,0x00,0x00]
addss 0x45,%xmm5
// CHECK: addss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0x2d,0xed,0x7e,0x00,0x00]
addss 0x7eed,%xmm5
// CHECK: addss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0x2d,0xfe,0xca,0xbe,0xba]
addss 0xbabecafe,%xmm5
// CHECK: addss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0x2d,0x78,0x56,0x34,0x12]
addss 0x12345678,%xmm5
// CHECK: addss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x58,0xed]
addss %xmm5,%xmm5
// CHECK: andnps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x55,0xac,0xcb,0xef,0xbe,0xad,0xde]
andnps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: andnps 69, %xmm5
// CHECK: encoding: [0x0f,0x55,0x2d,0x45,0x00,0x00,0x00]
andnps 0x45,%xmm5
// CHECK: andnps 32493, %xmm5
// CHECK: encoding: [0x0f,0x55,0x2d,0xed,0x7e,0x00,0x00]
andnps 0x7eed,%xmm5
// CHECK: andnps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x55,0x2d,0xfe,0xca,0xbe,0xba]
andnps 0xbabecafe,%xmm5
// CHECK: andnps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x55,0x2d,0x78,0x56,0x34,0x12]
andnps 0x12345678,%xmm5
// CHECK: andnps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x55,0xed]
andnps %xmm5,%xmm5
// CHECK: andps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x54,0xac,0xcb,0xef,0xbe,0xad,0xde]
andps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: andps 69, %xmm5
// CHECK: encoding: [0x0f,0x54,0x2d,0x45,0x00,0x00,0x00]
andps 0x45,%xmm5
// CHECK: andps 32493, %xmm5
// CHECK: encoding: [0x0f,0x54,0x2d,0xed,0x7e,0x00,0x00]
andps 0x7eed,%xmm5
// CHECK: andps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x54,0x2d,0xfe,0xca,0xbe,0xba]
andps 0xbabecafe,%xmm5
// CHECK: andps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x54,0x2d,0x78,0x56,0x34,0x12]
andps 0x12345678,%xmm5
// CHECK: andps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x54,0xed]
andps %xmm5,%xmm5
// CHECK: comiss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x2f,0xac,0xcb,0xef,0xbe,0xad,0xde]
comiss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: comiss 69, %xmm5
// CHECK: encoding: [0x0f,0x2f,0x2d,0x45,0x00,0x00,0x00]
comiss 0x45,%xmm5
// CHECK: comiss 32493, %xmm5
// CHECK: encoding: [0x0f,0x2f,0x2d,0xed,0x7e,0x00,0x00]
comiss 0x7eed,%xmm5
// CHECK: comiss 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x2f,0x2d,0xfe,0xca,0xbe,0xba]
comiss 0xbabecafe,%xmm5
// CHECK: comiss 305419896, %xmm5
// CHECK: encoding: [0x0f,0x2f,0x2d,0x78,0x56,0x34,0x12]
comiss 0x12345678,%xmm5
// CHECK: comiss %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x2f,0xed]
comiss %xmm5,%xmm5
// CHECK: cvtpi2ps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtpi2ps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtpi2ps 69, %xmm5
// CHECK: encoding: [0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
cvtpi2ps 0x45,%xmm5
// CHECK: cvtpi2ps 32493, %xmm5
// CHECK: encoding: [0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
cvtpi2ps 0x7eed,%xmm5
// CHECK: cvtpi2ps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
cvtpi2ps 0xbabecafe,%xmm5
// CHECK: cvtpi2ps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
cvtpi2ps 0x12345678,%xmm5
// CHECK: cvtpi2ps %mm3, %xmm5
// CHECK: encoding: [0x0f,0x2a,0xeb]
cvtpi2ps %mm3,%xmm5
// CHECK: cvtps2pi 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x2d,0x9c,0xcb,0xef,0xbe,0xad,0xde]
cvtps2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: cvtps2pi 69, %mm3
// CHECK: encoding: [0x0f,0x2d,0x1d,0x45,0x00,0x00,0x00]
cvtps2pi 0x45,%mm3
// CHECK: cvtps2pi 32493, %mm3
// CHECK: encoding: [0x0f,0x2d,0x1d,0xed,0x7e,0x00,0x00]
cvtps2pi 0x7eed,%mm3
// CHECK: cvtps2pi 3133065982, %mm3
// CHECK: encoding: [0x0f,0x2d,0x1d,0xfe,0xca,0xbe,0xba]
cvtps2pi 0xbabecafe,%mm3
// CHECK: cvtps2pi 305419896, %mm3
// CHECK: encoding: [0x0f,0x2d,0x1d,0x78,0x56,0x34,0x12]
cvtps2pi 0x12345678,%mm3
// CHECK: cvtps2pi %xmm5, %mm3
// CHECK: encoding: [0x0f,0x2d,0xdd]
cvtps2pi %xmm5,%mm3
// CHECK: cvtsi2ssl %ecx, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0xe9]
cvtsi2ssl %ecx,%xmm5
// CHECK: cvtsi2ssl 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtsi2ssl 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtsi2ssl 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
cvtsi2ssl 0x45,%xmm5
// CHECK: cvtsi2ssl 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
cvtsi2ssl 0x7eed,%xmm5
// CHECK: cvtsi2ssl 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
cvtsi2ssl 0xbabecafe,%xmm5
// CHECK: cvtsi2ssl 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
cvtsi2ssl 0x12345678,%xmm5
// CHECK: cvttps2pi 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x2c,0x9c,0xcb,0xef,0xbe,0xad,0xde]
cvttps2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: cvttps2pi 69, %mm3
// CHECK: encoding: [0x0f,0x2c,0x1d,0x45,0x00,0x00,0x00]
cvttps2pi 0x45,%mm3
// CHECK: cvttps2pi 32493, %mm3
// CHECK: encoding: [0x0f,0x2c,0x1d,0xed,0x7e,0x00,0x00]
cvttps2pi 0x7eed,%mm3
// CHECK: cvttps2pi 3133065982, %mm3
// CHECK: encoding: [0x0f,0x2c,0x1d,0xfe,0xca,0xbe,0xba]
cvttps2pi 0xbabecafe,%mm3
// CHECK: cvttps2pi 305419896, %mm3
// CHECK: encoding: [0x0f,0x2c,0x1d,0x78,0x56,0x34,0x12]
cvttps2pi 0x12345678,%mm3
// CHECK: cvttps2pi %xmm5, %mm3
// CHECK: encoding: [0x0f,0x2c,0xdd]
cvttps2pi %xmm5,%mm3
// CHECK: cvttss2si 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0x8c,0xcb,0xef,0xbe,0xad,0xde]
cvttss2si 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: cvttss2si 69, %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0x0d,0x45,0x00,0x00,0x00]
cvttss2si 0x45,%ecx
// CHECK: cvttss2si 32493, %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0x0d,0xed,0x7e,0x00,0x00]
cvttss2si 0x7eed,%ecx
// CHECK: cvttss2si 3133065982, %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0x0d,0xfe,0xca,0xbe,0xba]
cvttss2si 0xbabecafe,%ecx
// CHECK: cvttss2si 305419896, %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0x0d,0x78,0x56,0x34,0x12]
cvttss2si 0x12345678,%ecx
// CHECK: cvttss2si %xmm5, %ecx
// CHECK: encoding: [0xf3,0x0f,0x2c,0xcd]
cvttss2si %xmm5,%ecx
// CHECK: divps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
divps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: divps 69, %xmm5
// CHECK: encoding: [0x0f,0x5e,0x2d,0x45,0x00,0x00,0x00]
divps 0x45,%xmm5
// CHECK: divps 32493, %xmm5
// CHECK: encoding: [0x0f,0x5e,0x2d,0xed,0x7e,0x00,0x00]
divps 0x7eed,%xmm5
// CHECK: divps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5e,0x2d,0xfe,0xca,0xbe,0xba]
divps 0xbabecafe,%xmm5
// CHECK: divps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5e,0x2d,0x78,0x56,0x34,0x12]
divps 0x12345678,%xmm5
// CHECK: divps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5e,0xed]
divps %xmm5,%xmm5
// CHECK: divss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
divss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: divss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0x2d,0x45,0x00,0x00,0x00]
divss 0x45,%xmm5
// CHECK: divss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0x2d,0xed,0x7e,0x00,0x00]
divss 0x7eed,%xmm5
// CHECK: divss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0x2d,0xfe,0xca,0xbe,0xba]
divss 0xbabecafe,%xmm5
// CHECK: divss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0x2d,0x78,0x56,0x34,0x12]
divss 0x12345678,%xmm5
// CHECK: divss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5e,0xed]
divss %xmm5,%xmm5
// CHECK: ldmxcsr 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0x94,0xcb,0xef,0xbe,0xad,0xde]
ldmxcsr 0xdeadbeef(%ebx,%ecx,8)
// CHECK: ldmxcsr 32493
// CHECK: encoding: [0x0f,0xae,0x15,0xed,0x7e,0x00,0x00]
ldmxcsr 0x7eed
// CHECK: ldmxcsr 3133065982
// CHECK: encoding: [0x0f,0xae,0x15,0xfe,0xca,0xbe,0xba]
ldmxcsr 0xbabecafe
// CHECK: ldmxcsr 305419896
// CHECK: encoding: [0x0f,0xae,0x15,0x78,0x56,0x34,0x12]
ldmxcsr 0x12345678
// CHECK: maskmovq %mm3, %mm3
// CHECK: encoding: [0x0f,0xf7,0xdb]
maskmovq %mm3,%mm3
// CHECK: maxps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5f,0xac,0xcb,0xef,0xbe,0xad,0xde]
maxps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: maxps 69, %xmm5
// CHECK: encoding: [0x0f,0x5f,0x2d,0x45,0x00,0x00,0x00]
maxps 0x45,%xmm5
// CHECK: maxps 32493, %xmm5
// CHECK: encoding: [0x0f,0x5f,0x2d,0xed,0x7e,0x00,0x00]
maxps 0x7eed,%xmm5
// CHECK: maxps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5f,0x2d,0xfe,0xca,0xbe,0xba]
maxps 0xbabecafe,%xmm5
// CHECK: maxps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5f,0x2d,0x78,0x56,0x34,0x12]
maxps 0x12345678,%xmm5
// CHECK: maxps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5f,0xed]
maxps %xmm5,%xmm5
// CHECK: maxss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0xac,0xcb,0xef,0xbe,0xad,0xde]
maxss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: maxss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0x2d,0x45,0x00,0x00,0x00]
maxss 0x45,%xmm5
// CHECK: maxss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0x2d,0xed,0x7e,0x00,0x00]
maxss 0x7eed,%xmm5
// CHECK: maxss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0x2d,0xfe,0xca,0xbe,0xba]
maxss 0xbabecafe,%xmm5
// CHECK: maxss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0x2d,0x78,0x56,0x34,0x12]
maxss 0x12345678,%xmm5
// CHECK: maxss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5f,0xed]
maxss %xmm5,%xmm5
// CHECK: minps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5d,0xac,0xcb,0xef,0xbe,0xad,0xde]
minps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: minps 69, %xmm5
// CHECK: encoding: [0x0f,0x5d,0x2d,0x45,0x00,0x00,0x00]
minps 0x45,%xmm5
// CHECK: minps 32493, %xmm5
// CHECK: encoding: [0x0f,0x5d,0x2d,0xed,0x7e,0x00,0x00]
minps 0x7eed,%xmm5
// CHECK: minps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5d,0x2d,0xfe,0xca,0xbe,0xba]
minps 0xbabecafe,%xmm5
// CHECK: minps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5d,0x2d,0x78,0x56,0x34,0x12]
minps 0x12345678,%xmm5
// CHECK: minps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5d,0xed]
minps %xmm5,%xmm5
// CHECK: minss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0xac,0xcb,0xef,0xbe,0xad,0xde]
minss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: minss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0x2d,0x45,0x00,0x00,0x00]
minss 0x45,%xmm5
// CHECK: minss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0x2d,0xed,0x7e,0x00,0x00]
minss 0x7eed,%xmm5
// CHECK: minss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0x2d,0xfe,0xca,0xbe,0xba]
minss 0xbabecafe,%xmm5
// CHECK: minss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0x2d,0x78,0x56,0x34,0x12]
minss 0x12345678,%xmm5
// CHECK: minss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5d,0xed]
minss %xmm5,%xmm5
// CHECK: movaps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x28,0xac,0xcb,0xef,0xbe,0xad,0xde]
movaps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movaps 69, %xmm5
// CHECK: encoding: [0x0f,0x28,0x2d,0x45,0x00,0x00,0x00]
movaps 0x45,%xmm5
// CHECK: movaps 32493, %xmm5
// CHECK: encoding: [0x0f,0x28,0x2d,0xed,0x7e,0x00,0x00]
movaps 0x7eed,%xmm5
// CHECK: movaps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x28,0x2d,0xfe,0xca,0xbe,0xba]
movaps 0xbabecafe,%xmm5
// CHECK: movaps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x28,0x2d,0x78,0x56,0x34,0x12]
movaps 0x12345678,%xmm5
// CHECK: movaps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x28,0xed]
movaps %xmm5,%xmm5
// CHECK: movaps %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x29,0xac,0xcb,0xef,0xbe,0xad,0xde]
movaps %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movaps %xmm5, 69
// CHECK: encoding: [0x0f,0x29,0x2d,0x45,0x00,0x00,0x00]
movaps %xmm5,0x45
// CHECK: movaps %xmm5, 32493
// CHECK: encoding: [0x0f,0x29,0x2d,0xed,0x7e,0x00,0x00]
movaps %xmm5,0x7eed
// CHECK: movaps %xmm5, 3133065982
// CHECK: encoding: [0x0f,0x29,0x2d,0xfe,0xca,0xbe,0xba]
movaps %xmm5,0xbabecafe
// CHECK: movaps %xmm5, 305419896
// CHECK: encoding: [0x0f,0x29,0x2d,0x78,0x56,0x34,0x12]
movaps %xmm5,0x12345678
// CHECK: movhlps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x12,0xed]
movhlps %xmm5,%xmm5
// CHECK: movhps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x16,0xac,0xcb,0xef,0xbe,0xad,0xde]
movhps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movhps 69, %xmm5
// CHECK: encoding: [0x0f,0x16,0x2d,0x45,0x00,0x00,0x00]
movhps 0x45,%xmm5
// CHECK: movhps 32493, %xmm5
// CHECK: encoding: [0x0f,0x16,0x2d,0xed,0x7e,0x00,0x00]
movhps 0x7eed,%xmm5
// CHECK: movhps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x16,0x2d,0xfe,0xca,0xbe,0xba]
movhps 0xbabecafe,%xmm5
// CHECK: movhps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x16,0x2d,0x78,0x56,0x34,0x12]
movhps 0x12345678,%xmm5
// CHECK: movhps %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x17,0xac,0xcb,0xef,0xbe,0xad,0xde]
movhps %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movhps %xmm5, 69
// CHECK: encoding: [0x0f,0x17,0x2d,0x45,0x00,0x00,0x00]
movhps %xmm5,0x45
// CHECK: movhps %xmm5, 32493
// CHECK: encoding: [0x0f,0x17,0x2d,0xed,0x7e,0x00,0x00]
movhps %xmm5,0x7eed
// CHECK: movhps %xmm5, 3133065982
// CHECK: encoding: [0x0f,0x17,0x2d,0xfe,0xca,0xbe,0xba]
movhps %xmm5,0xbabecafe
// CHECK: movhps %xmm5, 305419896
// CHECK: encoding: [0x0f,0x17,0x2d,0x78,0x56,0x34,0x12]
movhps %xmm5,0x12345678
// CHECK: movlhps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x16,0xed]
movlhps %xmm5,%xmm5
// CHECK: movlps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x12,0xac,0xcb,0xef,0xbe,0xad,0xde]
movlps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movlps 69, %xmm5
// CHECK: encoding: [0x0f,0x12,0x2d,0x45,0x00,0x00,0x00]
movlps 0x45,%xmm5
// CHECK: movlps 32493, %xmm5
// CHECK: encoding: [0x0f,0x12,0x2d,0xed,0x7e,0x00,0x00]
movlps 0x7eed,%xmm5
// CHECK: movlps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x12,0x2d,0xfe,0xca,0xbe,0xba]
movlps 0xbabecafe,%xmm5
// CHECK: movlps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x12,0x2d,0x78,0x56,0x34,0x12]
movlps 0x12345678,%xmm5
// CHECK: movlps %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x13,0xac,0xcb,0xef,0xbe,0xad,0xde]
movlps %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movlps %xmm5, 69
// CHECK: encoding: [0x0f,0x13,0x2d,0x45,0x00,0x00,0x00]
movlps %xmm5,0x45
// CHECK: movlps %xmm5, 32493
// CHECK: encoding: [0x0f,0x13,0x2d,0xed,0x7e,0x00,0x00]
movlps %xmm5,0x7eed
// CHECK: movlps %xmm5, 3133065982
// CHECK: encoding: [0x0f,0x13,0x2d,0xfe,0xca,0xbe,0xba]
movlps %xmm5,0xbabecafe
// CHECK: movlps %xmm5, 305419896
// CHECK: encoding: [0x0f,0x13,0x2d,0x78,0x56,0x34,0x12]
movlps %xmm5,0x12345678
// CHECK: movmskps %xmm5, %ecx
// CHECK: encoding: [0x0f,0x50,0xcd]
movmskps %xmm5,%ecx
// CHECK: movntps %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x2b,0xac,0xcb,0xef,0xbe,0xad,0xde]
movntps %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movntps %xmm5, 69
// CHECK: encoding: [0x0f,0x2b,0x2d,0x45,0x00,0x00,0x00]
movntps %xmm5,0x45
// CHECK: movntps %xmm5, 32493
// CHECK: encoding: [0x0f,0x2b,0x2d,0xed,0x7e,0x00,0x00]
movntps %xmm5,0x7eed
// CHECK: movntps %xmm5, 3133065982
// CHECK: encoding: [0x0f,0x2b,0x2d,0xfe,0xca,0xbe,0xba]
movntps %xmm5,0xbabecafe
// CHECK: movntps %xmm5, 305419896
// CHECK: encoding: [0x0f,0x2b,0x2d,0x78,0x56,0x34,0x12]
movntps %xmm5,0x12345678
// CHECK: movntq %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xe7,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movntq %mm3,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movntq %mm3, 69
// CHECK: encoding: [0x0f,0xe7,0x1d,0x45,0x00,0x00,0x00]
movntq %mm3,0x45
// CHECK: movntq %mm3, 32493
// CHECK: encoding: [0x0f,0xe7,0x1d,0xed,0x7e,0x00,0x00]
movntq %mm3,0x7eed
// CHECK: movntq %mm3, 3133065982
// CHECK: encoding: [0x0f,0xe7,0x1d,0xfe,0xca,0xbe,0xba]
movntq %mm3,0xbabecafe
// CHECK: movntq %mm3, 305419896
// CHECK: encoding: [0x0f,0xe7,0x1d,0x78,0x56,0x34,0x12]
movntq %mm3,0x12345678
// CHECK: movntdq %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0xe7,0xac,0xcb,0xef,0xbe,0xad,0xde]
movntdq %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movntdq %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0xe7,0x2d,0x45,0x00,0x00,0x00]
movntdq %xmm5,0x45
// CHECK: movntdq %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0xe7,0x2d,0xed,0x7e,0x00,0x00]
movntdq %xmm5,0x7eed
// CHECK: movntdq %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0xe7,0x2d,0xfe,0xca,0xbe,0xba]
movntdq %xmm5,0xbabecafe
// CHECK: movntdq %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0xe7,0x2d,0x78,0x56,0x34,0x12]
movntdq %xmm5,0x12345678
// CHECK: movss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0xac,0xcb,0xef,0xbe,0xad,0xde]
movss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0x2d,0x45,0x00,0x00,0x00]
movss 0x45,%xmm5
// CHECK: movss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0x2d,0xed,0x7e,0x00,0x00]
movss 0x7eed,%xmm5
// CHECK: movss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0x2d,0xfe,0xca,0xbe,0xba]
movss 0xbabecafe,%xmm5
// CHECK: movss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0x2d,0x78,0x56,0x34,0x12]
movss 0x12345678,%xmm5
// CHECK: movss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0xed]
movss %xmm5,%xmm5
// CHECK: movss %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf3,0x0f,0x11,0xac,0xcb,0xef,0xbe,0xad,0xde]
movss %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movss %xmm5, 69
// CHECK: encoding: [0xf3,0x0f,0x11,0x2d,0x45,0x00,0x00,0x00]
movss %xmm5,0x45
// CHECK: movss %xmm5, 32493
// CHECK: encoding: [0xf3,0x0f,0x11,0x2d,0xed,0x7e,0x00,0x00]
movss %xmm5,0x7eed
// CHECK: movss %xmm5, 3133065982
// CHECK: encoding: [0xf3,0x0f,0x11,0x2d,0xfe,0xca,0xbe,0xba]
movss %xmm5,0xbabecafe
// CHECK: movss %xmm5, 305419896
// CHECK: encoding: [0xf3,0x0f,0x11,0x2d,0x78,0x56,0x34,0x12]
movss %xmm5,0x12345678
// CHECK: movss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x10,0xed]
movss %xmm5,%xmm5
// CHECK: movups 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x10,0xac,0xcb,0xef,0xbe,0xad,0xde]
movups 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movups 69, %xmm5
// CHECK: encoding: [0x0f,0x10,0x2d,0x45,0x00,0x00,0x00]
movups 0x45,%xmm5
// CHECK: movups 32493, %xmm5
// CHECK: encoding: [0x0f,0x10,0x2d,0xed,0x7e,0x00,0x00]
movups 0x7eed,%xmm5
// CHECK: movups 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x10,0x2d,0xfe,0xca,0xbe,0xba]
movups 0xbabecafe,%xmm5
// CHECK: movups 305419896, %xmm5
// CHECK: encoding: [0x0f,0x10,0x2d,0x78,0x56,0x34,0x12]
movups 0x12345678,%xmm5
// CHECK: movups %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x10,0xed]
movups %xmm5,%xmm5
// CHECK: movups %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x11,0xac,0xcb,0xef,0xbe,0xad,0xde]
movups %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movups %xmm5, 69
// CHECK: encoding: [0x0f,0x11,0x2d,0x45,0x00,0x00,0x00]
movups %xmm5,0x45
// CHECK: movups %xmm5, 32493
// CHECK: encoding: [0x0f,0x11,0x2d,0xed,0x7e,0x00,0x00]
movups %xmm5,0x7eed
// CHECK: movups %xmm5, 3133065982
// CHECK: encoding: [0x0f,0x11,0x2d,0xfe,0xca,0xbe,0xba]
movups %xmm5,0xbabecafe
// CHECK: movups %xmm5, 305419896
// CHECK: encoding: [0x0f,0x11,0x2d,0x78,0x56,0x34,0x12]
movups %xmm5,0x12345678
// CHECK: movups %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x10,0xed]
movups %xmm5,%xmm5
// CHECK: mulps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
mulps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: mulps 69, %xmm5
// CHECK: encoding: [0x0f,0x59,0x2d,0x45,0x00,0x00,0x00]
mulps 0x45,%xmm5
// CHECK: mulps 32493, %xmm5
// CHECK: encoding: [0x0f,0x59,0x2d,0xed,0x7e,0x00,0x00]
mulps 0x7eed,%xmm5
// CHECK: mulps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x59,0x2d,0xfe,0xca,0xbe,0xba]
mulps 0xbabecafe,%xmm5
// CHECK: mulps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x59,0x2d,0x78,0x56,0x34,0x12]
mulps 0x12345678,%xmm5
// CHECK: mulps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x59,0xed]
mulps %xmm5,%xmm5
// CHECK: mulss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
mulss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: mulss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0x2d,0x45,0x00,0x00,0x00]
mulss 0x45,%xmm5
// CHECK: mulss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0x2d,0xed,0x7e,0x00,0x00]
mulss 0x7eed,%xmm5
// CHECK: mulss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0x2d,0xfe,0xca,0xbe,0xba]
mulss 0xbabecafe,%xmm5
// CHECK: mulss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0x2d,0x78,0x56,0x34,0x12]
mulss 0x12345678,%xmm5
// CHECK: mulss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x59,0xed]
mulss %xmm5,%xmm5
// CHECK: orps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x56,0xac,0xcb,0xef,0xbe,0xad,0xde]
orps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: orps 69, %xmm5
// CHECK: encoding: [0x0f,0x56,0x2d,0x45,0x00,0x00,0x00]
orps 0x45,%xmm5
// CHECK: orps 32493, %xmm5
// CHECK: encoding: [0x0f,0x56,0x2d,0xed,0x7e,0x00,0x00]
orps 0x7eed,%xmm5
// CHECK: orps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x56,0x2d,0xfe,0xca,0xbe,0xba]
orps 0xbabecafe,%xmm5
// CHECK: orps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x56,0x2d,0x78,0x56,0x34,0x12]
orps 0x12345678,%xmm5
// CHECK: orps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x56,0xed]
orps %xmm5,%xmm5
// CHECK: pavgb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe0,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pavgb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pavgb 69, %mm3
// CHECK: encoding: [0x0f,0xe0,0x1d,0x45,0x00,0x00,0x00]
pavgb 0x45,%mm3
// CHECK: pavgb 32493, %mm3
// CHECK: encoding: [0x0f,0xe0,0x1d,0xed,0x7e,0x00,0x00]
pavgb 0x7eed,%mm3
// CHECK: pavgb 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe0,0x1d,0xfe,0xca,0xbe,0xba]
pavgb 0xbabecafe,%mm3
// CHECK: pavgb 305419896, %mm3
// CHECK: encoding: [0x0f,0xe0,0x1d,0x78,0x56,0x34,0x12]
pavgb 0x12345678,%mm3
// CHECK: pavgb %mm3, %mm3
// CHECK: encoding: [0x0f,0xe0,0xdb]
pavgb %mm3,%mm3
// CHECK: pavgb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0xac,0xcb,0xef,0xbe,0xad,0xde]
pavgb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pavgb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0x2d,0x45,0x00,0x00,0x00]
pavgb 0x45,%xmm5
// CHECK: pavgb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0x2d,0xed,0x7e,0x00,0x00]
pavgb 0x7eed,%xmm5
// CHECK: pavgb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0x2d,0xfe,0xca,0xbe,0xba]
pavgb 0xbabecafe,%xmm5
// CHECK: pavgb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0x2d,0x78,0x56,0x34,0x12]
pavgb 0x12345678,%xmm5
// CHECK: pavgb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe0,0xed]
pavgb %xmm5,%xmm5
// CHECK: pavgw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe3,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pavgw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pavgw 69, %mm3
// CHECK: encoding: [0x0f,0xe3,0x1d,0x45,0x00,0x00,0x00]
pavgw 0x45,%mm3
// CHECK: pavgw 32493, %mm3
// CHECK: encoding: [0x0f,0xe3,0x1d,0xed,0x7e,0x00,0x00]
pavgw 0x7eed,%mm3
// CHECK: pavgw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe3,0x1d,0xfe,0xca,0xbe,0xba]
pavgw 0xbabecafe,%mm3
// CHECK: pavgw 305419896, %mm3
// CHECK: encoding: [0x0f,0xe3,0x1d,0x78,0x56,0x34,0x12]
pavgw 0x12345678,%mm3
// CHECK: pavgw %mm3, %mm3
// CHECK: encoding: [0x0f,0xe3,0xdb]
pavgw %mm3,%mm3
// CHECK: pavgw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0xac,0xcb,0xef,0xbe,0xad,0xde]
pavgw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pavgw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0x2d,0x45,0x00,0x00,0x00]
pavgw 0x45,%xmm5
// CHECK: pavgw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0x2d,0xed,0x7e,0x00,0x00]
pavgw 0x7eed,%xmm5
// CHECK: pavgw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0x2d,0xfe,0xca,0xbe,0xba]
pavgw 0xbabecafe,%xmm5
// CHECK: pavgw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0x2d,0x78,0x56,0x34,0x12]
pavgw 0x12345678,%xmm5
// CHECK: pavgw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe3,0xed]
pavgw %xmm5,%xmm5
// CHECK: pmaxsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xee,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmaxsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmaxsw 69, %mm3
// CHECK: encoding: [0x0f,0xee,0x1d,0x45,0x00,0x00,0x00]
pmaxsw 0x45,%mm3
// CHECK: pmaxsw 32493, %mm3
// CHECK: encoding: [0x0f,0xee,0x1d,0xed,0x7e,0x00,0x00]
pmaxsw 0x7eed,%mm3
// CHECK: pmaxsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xee,0x1d,0xfe,0xca,0xbe,0xba]
pmaxsw 0xbabecafe,%mm3
// CHECK: pmaxsw 305419896, %mm3
// CHECK: encoding: [0x0f,0xee,0x1d,0x78,0x56,0x34,0x12]
pmaxsw 0x12345678,%mm3
// CHECK: pmaxsw %mm3, %mm3
// CHECK: encoding: [0x0f,0xee,0xdb]
pmaxsw %mm3,%mm3
// CHECK: pmaxsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0x2d,0x45,0x00,0x00,0x00]
pmaxsw 0x45,%xmm5
// CHECK: pmaxsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0x2d,0xed,0x7e,0x00,0x00]
pmaxsw 0x7eed,%xmm5
// CHECK: pmaxsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0x2d,0xfe,0xca,0xbe,0xba]
pmaxsw 0xbabecafe,%xmm5
// CHECK: pmaxsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0x2d,0x78,0x56,0x34,0x12]
pmaxsw 0x12345678,%xmm5
// CHECK: pmaxsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xee,0xed]
pmaxsw %xmm5,%xmm5
// CHECK: pmaxub 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xde,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmaxub 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmaxub 69, %mm3
// CHECK: encoding: [0x0f,0xde,0x1d,0x45,0x00,0x00,0x00]
pmaxub 0x45,%mm3
// CHECK: pmaxub 32493, %mm3
// CHECK: encoding: [0x0f,0xde,0x1d,0xed,0x7e,0x00,0x00]
pmaxub 0x7eed,%mm3
// CHECK: pmaxub 3133065982, %mm3
// CHECK: encoding: [0x0f,0xde,0x1d,0xfe,0xca,0xbe,0xba]
pmaxub 0xbabecafe,%mm3
// CHECK: pmaxub 305419896, %mm3
// CHECK: encoding: [0x0f,0xde,0x1d,0x78,0x56,0x34,0x12]
pmaxub 0x12345678,%mm3
// CHECK: pmaxub %mm3, %mm3
// CHECK: encoding: [0x0f,0xde,0xdb]
pmaxub %mm3,%mm3
// CHECK: pmaxub 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxub 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxub 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0x2d,0x45,0x00,0x00,0x00]
pmaxub 0x45,%xmm5
// CHECK: pmaxub 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0x2d,0xed,0x7e,0x00,0x00]
pmaxub 0x7eed,%xmm5
// CHECK: pmaxub 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0x2d,0xfe,0xca,0xbe,0xba]
pmaxub 0xbabecafe,%xmm5
// CHECK: pmaxub 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0x2d,0x78,0x56,0x34,0x12]
pmaxub 0x12345678,%xmm5
// CHECK: pmaxub %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xde,0xed]
pmaxub %xmm5,%xmm5
// CHECK: pminsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xea,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pminsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pminsw 69, %mm3
// CHECK: encoding: [0x0f,0xea,0x1d,0x45,0x00,0x00,0x00]
pminsw 0x45,%mm3
// CHECK: pminsw 32493, %mm3
// CHECK: encoding: [0x0f,0xea,0x1d,0xed,0x7e,0x00,0x00]
pminsw 0x7eed,%mm3
// CHECK: pminsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xea,0x1d,0xfe,0xca,0xbe,0xba]
pminsw 0xbabecafe,%mm3
// CHECK: pminsw 305419896, %mm3
// CHECK: encoding: [0x0f,0xea,0x1d,0x78,0x56,0x34,0x12]
pminsw 0x12345678,%mm3
// CHECK: pminsw %mm3, %mm3
// CHECK: encoding: [0x0f,0xea,0xdb]
pminsw %mm3,%mm3
// CHECK: pminsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0x2d,0x45,0x00,0x00,0x00]
pminsw 0x45,%xmm5
// CHECK: pminsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0x2d,0xed,0x7e,0x00,0x00]
pminsw 0x7eed,%xmm5
// CHECK: pminsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0x2d,0xfe,0xca,0xbe,0xba]
pminsw 0xbabecafe,%xmm5
// CHECK: pminsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0x2d,0x78,0x56,0x34,0x12]
pminsw 0x12345678,%xmm5
// CHECK: pminsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xea,0xed]
pminsw %xmm5,%xmm5
// CHECK: pminub 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xda,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pminub 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pminub 69, %mm3
// CHECK: encoding: [0x0f,0xda,0x1d,0x45,0x00,0x00,0x00]
pminub 0x45,%mm3
// CHECK: pminub 32493, %mm3
// CHECK: encoding: [0x0f,0xda,0x1d,0xed,0x7e,0x00,0x00]
pminub 0x7eed,%mm3
// CHECK: pminub 3133065982, %mm3
// CHECK: encoding: [0x0f,0xda,0x1d,0xfe,0xca,0xbe,0xba]
pminub 0xbabecafe,%mm3
// CHECK: pminub 305419896, %mm3
// CHECK: encoding: [0x0f,0xda,0x1d,0x78,0x56,0x34,0x12]
pminub 0x12345678,%mm3
// CHECK: pminub %mm3, %mm3
// CHECK: encoding: [0x0f,0xda,0xdb]
pminub %mm3,%mm3
// CHECK: pminub 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminub 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminub 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0x2d,0x45,0x00,0x00,0x00]
pminub 0x45,%xmm5
// CHECK: pminub 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0x2d,0xed,0x7e,0x00,0x00]
pminub 0x7eed,%xmm5
// CHECK: pminub 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0x2d,0xfe,0xca,0xbe,0xba]
pminub 0xbabecafe,%xmm5
// CHECK: pminub 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0x2d,0x78,0x56,0x34,0x12]
pminub 0x12345678,%xmm5
// CHECK: pminub %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xda,0xed]
pminub %xmm5,%xmm5
// CHECK: pmovmskb %mm3, %ecx
// CHECK: encoding: [0x0f,0xd7,0xcb]
pmovmskb %mm3,%ecx
// CHECK: pmovmskb %xmm5, %ecx
// CHECK: encoding: [0x66,0x0f,0xd7,0xcd]
pmovmskb %xmm5,%ecx
// CHECK: pmulhuw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xe4,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmulhuw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmulhuw 69, %mm3
// CHECK: encoding: [0x0f,0xe4,0x1d,0x45,0x00,0x00,0x00]
pmulhuw 0x45,%mm3
// CHECK: pmulhuw 32493, %mm3
// CHECK: encoding: [0x0f,0xe4,0x1d,0xed,0x7e,0x00,0x00]
pmulhuw 0x7eed,%mm3
// CHECK: pmulhuw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xe4,0x1d,0xfe,0xca,0xbe,0xba]
pmulhuw 0xbabecafe,%mm3
// CHECK: pmulhuw 305419896, %mm3
// CHECK: encoding: [0x0f,0xe4,0x1d,0x78,0x56,0x34,0x12]
pmulhuw 0x12345678,%mm3
// CHECK: pmulhuw %mm3, %mm3
// CHECK: encoding: [0x0f,0xe4,0xdb]
pmulhuw %mm3,%mm3
// CHECK: pmulhuw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmulhuw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmulhuw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0x2d,0x45,0x00,0x00,0x00]
pmulhuw 0x45,%xmm5
// CHECK: pmulhuw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0x2d,0xed,0x7e,0x00,0x00]
pmulhuw 0x7eed,%xmm5
// CHECK: pmulhuw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0x2d,0xfe,0xca,0xbe,0xba]
pmulhuw 0xbabecafe,%xmm5
// CHECK: pmulhuw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0x2d,0x78,0x56,0x34,0x12]
pmulhuw 0x12345678,%xmm5
// CHECK: pmulhuw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xe4,0xed]
pmulhuw %xmm5,%xmm5
// CHECK: prefetchnta 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x18,0x84,0xcb,0xef,0xbe,0xad,0xde]
prefetchnta 0xdeadbeef(%ebx,%ecx,8)
// CHECK: prefetchnta 32493
// CHECK: encoding: [0x0f,0x18,0x05,0xed,0x7e,0x00,0x00]
prefetchnta 0x7eed
// CHECK: prefetchnta 3133065982
// CHECK: encoding: [0x0f,0x18,0x05,0xfe,0xca,0xbe,0xba]
prefetchnta 0xbabecafe
// CHECK: prefetchnta 305419896
// CHECK: encoding: [0x0f,0x18,0x05,0x78,0x56,0x34,0x12]
prefetchnta 0x12345678
// CHECK: prefetcht0 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x18,0x8c,0xcb,0xef,0xbe,0xad,0xde]
prefetcht0 0xdeadbeef(%ebx,%ecx,8)
// CHECK: prefetcht0 32493
// CHECK: encoding: [0x0f,0x18,0x0d,0xed,0x7e,0x00,0x00]
prefetcht0 0x7eed
// CHECK: prefetcht0 3133065982
// CHECK: encoding: [0x0f,0x18,0x0d,0xfe,0xca,0xbe,0xba]
prefetcht0 0xbabecafe
// CHECK: prefetcht0 305419896
// CHECK: encoding: [0x0f,0x18,0x0d,0x78,0x56,0x34,0x12]
prefetcht0 0x12345678
// CHECK: prefetcht1 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x18,0x94,0xcb,0xef,0xbe,0xad,0xde]
prefetcht1 0xdeadbeef(%ebx,%ecx,8)
// CHECK: prefetcht1 32493
// CHECK: encoding: [0x0f,0x18,0x15,0xed,0x7e,0x00,0x00]
prefetcht1 0x7eed
// CHECK: prefetcht1 3133065982
// CHECK: encoding: [0x0f,0x18,0x15,0xfe,0xca,0xbe,0xba]
prefetcht1 0xbabecafe
// CHECK: prefetcht1 305419896
// CHECK: encoding: [0x0f,0x18,0x15,0x78,0x56,0x34,0x12]
prefetcht1 0x12345678
// CHECK: prefetcht2 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x18,0x9c,0xcb,0xef,0xbe,0xad,0xde]
prefetcht2 0xdeadbeef(%ebx,%ecx,8)
// CHECK: prefetcht2 32493
// CHECK: encoding: [0x0f,0x18,0x1d,0xed,0x7e,0x00,0x00]
prefetcht2 0x7eed
// CHECK: prefetcht2 3133065982
// CHECK: encoding: [0x0f,0x18,0x1d,0xfe,0xca,0xbe,0xba]
prefetcht2 0xbabecafe
// CHECK: prefetcht2 305419896
// CHECK: encoding: [0x0f,0x18,0x1d,0x78,0x56,0x34,0x12]
prefetcht2 0x12345678
// CHECK: psadbw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf6,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psadbw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psadbw 69, %mm3
// CHECK: encoding: [0x0f,0xf6,0x1d,0x45,0x00,0x00,0x00]
psadbw 0x45,%mm3
// CHECK: psadbw 32493, %mm3
// CHECK: encoding: [0x0f,0xf6,0x1d,0xed,0x7e,0x00,0x00]
psadbw 0x7eed,%mm3
// CHECK: psadbw 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf6,0x1d,0xfe,0xca,0xbe,0xba]
psadbw 0xbabecafe,%mm3
// CHECK: psadbw 305419896, %mm3
// CHECK: encoding: [0x0f,0xf6,0x1d,0x78,0x56,0x34,0x12]
psadbw 0x12345678,%mm3
// CHECK: psadbw %mm3, %mm3
// CHECK: encoding: [0x0f,0xf6,0xdb]
psadbw %mm3,%mm3
// CHECK: psadbw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0xac,0xcb,0xef,0xbe,0xad,0xde]
psadbw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psadbw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0x2d,0x45,0x00,0x00,0x00]
psadbw 0x45,%xmm5
// CHECK: psadbw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0x2d,0xed,0x7e,0x00,0x00]
psadbw 0x7eed,%xmm5
// CHECK: psadbw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0x2d,0xfe,0xca,0xbe,0xba]
psadbw 0xbabecafe,%xmm5
// CHECK: psadbw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0x2d,0x78,0x56,0x34,0x12]
psadbw 0x12345678,%xmm5
// CHECK: psadbw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf6,0xed]
psadbw %xmm5,%xmm5
// CHECK: rcpps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x53,0xac,0xcb,0xef,0xbe,0xad,0xde]
rcpps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: rcpps 69, %xmm5
// CHECK: encoding: [0x0f,0x53,0x2d,0x45,0x00,0x00,0x00]
rcpps 0x45,%xmm5
// CHECK: rcpps 32493, %xmm5
// CHECK: encoding: [0x0f,0x53,0x2d,0xed,0x7e,0x00,0x00]
rcpps 0x7eed,%xmm5
// CHECK: rcpps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x53,0x2d,0xfe,0xca,0xbe,0xba]
rcpps 0xbabecafe,%xmm5
// CHECK: rcpps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x53,0x2d,0x78,0x56,0x34,0x12]
rcpps 0x12345678,%xmm5
// CHECK: rcpps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x53,0xed]
rcpps %xmm5,%xmm5
// CHECK: rcpss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0xac,0xcb,0xef,0xbe,0xad,0xde]
rcpss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: rcpss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0x2d,0x45,0x00,0x00,0x00]
rcpss 0x45,%xmm5
// CHECK: rcpss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0x2d,0xed,0x7e,0x00,0x00]
rcpss 0x7eed,%xmm5
// CHECK: rcpss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0x2d,0xfe,0xca,0xbe,0xba]
rcpss 0xbabecafe,%xmm5
// CHECK: rcpss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0x2d,0x78,0x56,0x34,0x12]
rcpss 0x12345678,%xmm5
// CHECK: rcpss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x53,0xed]
rcpss %xmm5,%xmm5
// CHECK: rsqrtps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x52,0xac,0xcb,0xef,0xbe,0xad,0xde]
rsqrtps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: rsqrtps 69, %xmm5
// CHECK: encoding: [0x0f,0x52,0x2d,0x45,0x00,0x00,0x00]
rsqrtps 0x45,%xmm5
// CHECK: rsqrtps 32493, %xmm5
// CHECK: encoding: [0x0f,0x52,0x2d,0xed,0x7e,0x00,0x00]
rsqrtps 0x7eed,%xmm5
// CHECK: rsqrtps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x52,0x2d,0xfe,0xca,0xbe,0xba]
rsqrtps 0xbabecafe,%xmm5
// CHECK: rsqrtps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x52,0x2d,0x78,0x56,0x34,0x12]
rsqrtps 0x12345678,%xmm5
// CHECK: rsqrtps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x52,0xed]
rsqrtps %xmm5,%xmm5
// CHECK: rsqrtss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0xac,0xcb,0xef,0xbe,0xad,0xde]
rsqrtss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: rsqrtss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0x2d,0x45,0x00,0x00,0x00]
rsqrtss 0x45,%xmm5
// CHECK: rsqrtss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0x2d,0xed,0x7e,0x00,0x00]
rsqrtss 0x7eed,%xmm5
// CHECK: rsqrtss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0x2d,0xfe,0xca,0xbe,0xba]
rsqrtss 0xbabecafe,%xmm5
// CHECK: rsqrtss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0x2d,0x78,0x56,0x34,0x12]
rsqrtss 0x12345678,%xmm5
// CHECK: rsqrtss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x52,0xed]
rsqrtss %xmm5,%xmm5
// CHECK: sqrtps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x51,0xac,0xcb,0xef,0xbe,0xad,0xde]
sqrtps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: sqrtps 69, %xmm5
// CHECK: encoding: [0x0f,0x51,0x2d,0x45,0x00,0x00,0x00]
sqrtps 0x45,%xmm5
// CHECK: sqrtps 32493, %xmm5
// CHECK: encoding: [0x0f,0x51,0x2d,0xed,0x7e,0x00,0x00]
sqrtps 0x7eed,%xmm5
// CHECK: sqrtps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x51,0x2d,0xfe,0xca,0xbe,0xba]
sqrtps 0xbabecafe,%xmm5
// CHECK: sqrtps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x51,0x2d,0x78,0x56,0x34,0x12]
sqrtps 0x12345678,%xmm5
// CHECK: sqrtps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x51,0xed]
sqrtps %xmm5,%xmm5
// CHECK: sqrtss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0xac,0xcb,0xef,0xbe,0xad,0xde]
sqrtss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: sqrtss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0x2d,0x45,0x00,0x00,0x00]
sqrtss 0x45,%xmm5
// CHECK: sqrtss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0x2d,0xed,0x7e,0x00,0x00]
sqrtss 0x7eed,%xmm5
// CHECK: sqrtss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0x2d,0xfe,0xca,0xbe,0xba]
sqrtss 0xbabecafe,%xmm5
// CHECK: sqrtss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0x2d,0x78,0x56,0x34,0x12]
sqrtss 0x12345678,%xmm5
// CHECK: sqrtss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x51,0xed]
sqrtss %xmm5,%xmm5
// CHECK: stmxcsr 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0x9c,0xcb,0xef,0xbe,0xad,0xde]
stmxcsr 0xdeadbeef(%ebx,%ecx,8)
// CHECK: stmxcsr 32493
// CHECK: encoding: [0x0f,0xae,0x1d,0xed,0x7e,0x00,0x00]
stmxcsr 0x7eed
// CHECK: stmxcsr 3133065982
// CHECK: encoding: [0x0f,0xae,0x1d,0xfe,0xca,0xbe,0xba]
stmxcsr 0xbabecafe
// CHECK: stmxcsr 305419896
// CHECK: encoding: [0x0f,0xae,0x1d,0x78,0x56,0x34,0x12]
stmxcsr 0x12345678
// CHECK: subps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
subps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: subps 69, %xmm5
// CHECK: encoding: [0x0f,0x5c,0x2d,0x45,0x00,0x00,0x00]
subps 0x45,%xmm5
// CHECK: subps 32493, %xmm5
// CHECK: encoding: [0x0f,0x5c,0x2d,0xed,0x7e,0x00,0x00]
subps 0x7eed,%xmm5
// CHECK: subps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5c,0x2d,0xfe,0xca,0xbe,0xba]
subps 0xbabecafe,%xmm5
// CHECK: subps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5c,0x2d,0x78,0x56,0x34,0x12]
subps 0x12345678,%xmm5
// CHECK: subps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5c,0xed]
subps %xmm5,%xmm5
// CHECK: subss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
subss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: subss 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0x2d,0x45,0x00,0x00,0x00]
subss 0x45,%xmm5
// CHECK: subss 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0x2d,0xed,0x7e,0x00,0x00]
subss 0x7eed,%xmm5
// CHECK: subss 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0x2d,0xfe,0xca,0xbe,0xba]
subss 0xbabecafe,%xmm5
// CHECK: subss 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0x2d,0x78,0x56,0x34,0x12]
subss 0x12345678,%xmm5
// CHECK: subss %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5c,0xed]
subss %xmm5,%xmm5
// CHECK: ucomiss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x2e,0xac,0xcb,0xef,0xbe,0xad,0xde]
ucomiss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: ucomiss 69, %xmm5
// CHECK: encoding: [0x0f,0x2e,0x2d,0x45,0x00,0x00,0x00]
ucomiss 0x45,%xmm5
// CHECK: ucomiss 32493, %xmm5
// CHECK: encoding: [0x0f,0x2e,0x2d,0xed,0x7e,0x00,0x00]
ucomiss 0x7eed,%xmm5
// CHECK: ucomiss 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x2e,0x2d,0xfe,0xca,0xbe,0xba]
ucomiss 0xbabecafe,%xmm5
// CHECK: ucomiss 305419896, %xmm5
// CHECK: encoding: [0x0f,0x2e,0x2d,0x78,0x56,0x34,0x12]
ucomiss 0x12345678,%xmm5
// CHECK: ucomiss %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x2e,0xed]
ucomiss %xmm5,%xmm5
// CHECK: unpckhps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x15,0xac,0xcb,0xef,0xbe,0xad,0xde]
unpckhps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: unpckhps 69, %xmm5
// CHECK: encoding: [0x0f,0x15,0x2d,0x45,0x00,0x00,0x00]
unpckhps 0x45,%xmm5
// CHECK: unpckhps 32493, %xmm5
// CHECK: encoding: [0x0f,0x15,0x2d,0xed,0x7e,0x00,0x00]
unpckhps 0x7eed,%xmm5
// CHECK: unpckhps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x15,0x2d,0xfe,0xca,0xbe,0xba]
unpckhps 0xbabecafe,%xmm5
// CHECK: unpckhps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x15,0x2d,0x78,0x56,0x34,0x12]
unpckhps 0x12345678,%xmm5
// CHECK: unpckhps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x15,0xed]
unpckhps %xmm5,%xmm5
// CHECK: unpcklps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x14,0xac,0xcb,0xef,0xbe,0xad,0xde]
unpcklps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: unpcklps 69, %xmm5
// CHECK: encoding: [0x0f,0x14,0x2d,0x45,0x00,0x00,0x00]
unpcklps 0x45,%xmm5
// CHECK: unpcklps 32493, %xmm5
// CHECK: encoding: [0x0f,0x14,0x2d,0xed,0x7e,0x00,0x00]
unpcklps 0x7eed,%xmm5
// CHECK: unpcklps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x14,0x2d,0xfe,0xca,0xbe,0xba]
unpcklps 0xbabecafe,%xmm5
// CHECK: unpcklps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x14,0x2d,0x78,0x56,0x34,0x12]
unpcklps 0x12345678,%xmm5
// CHECK: unpcklps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x14,0xed]
unpcklps %xmm5,%xmm5
// CHECK: xorps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x57,0xac,0xcb,0xef,0xbe,0xad,0xde]
xorps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: xorps 69, %xmm5
// CHECK: encoding: [0x0f,0x57,0x2d,0x45,0x00,0x00,0x00]
xorps 0x45,%xmm5
// CHECK: xorps 32493, %xmm5
// CHECK: encoding: [0x0f,0x57,0x2d,0xed,0x7e,0x00,0x00]
xorps 0x7eed,%xmm5
// CHECK: xorps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x57,0x2d,0xfe,0xca,0xbe,0xba]
xorps 0xbabecafe,%xmm5
// CHECK: xorps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x57,0x2d,0x78,0x56,0x34,0x12]
xorps 0x12345678,%xmm5
// CHECK: xorps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x57,0xed]
xorps %xmm5,%xmm5
// CHECK: addpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
addpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0x2d,0x45,0x00,0x00,0x00]
addpd 0x45,%xmm5
// CHECK: addpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0x2d,0xed,0x7e,0x00,0x00]
addpd 0x7eed,%xmm5
// CHECK: addpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0x2d,0xfe,0xca,0xbe,0xba]
addpd 0xbabecafe,%xmm5
// CHECK: addpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0x2d,0x78,0x56,0x34,0x12]
addpd 0x12345678,%xmm5
// CHECK: addpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x58,0xed]
addpd %xmm5,%xmm5
// CHECK: addsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
addsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0x2d,0x45,0x00,0x00,0x00]
addsd 0x45,%xmm5
// CHECK: addsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0x2d,0xed,0x7e,0x00,0x00]
addsd 0x7eed,%xmm5
// CHECK: addsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0x2d,0xfe,0xca,0xbe,0xba]
addsd 0xbabecafe,%xmm5
// CHECK: addsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0x2d,0x78,0x56,0x34,0x12]
addsd 0x12345678,%xmm5
// CHECK: addsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x58,0xed]
addsd %xmm5,%xmm5
// CHECK: andnpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0xac,0xcb,0xef,0xbe,0xad,0xde]
andnpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: andnpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0x2d,0x45,0x00,0x00,0x00]
andnpd 0x45,%xmm5
// CHECK: andnpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0x2d,0xed,0x7e,0x00,0x00]
andnpd 0x7eed,%xmm5
// CHECK: andnpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0x2d,0xfe,0xca,0xbe,0xba]
andnpd 0xbabecafe,%xmm5
// CHECK: andnpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0x2d,0x78,0x56,0x34,0x12]
andnpd 0x12345678,%xmm5
// CHECK: andnpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x55,0xed]
andnpd %xmm5,%xmm5
// CHECK: andpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0xac,0xcb,0xef,0xbe,0xad,0xde]
andpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: andpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0x2d,0x45,0x00,0x00,0x00]
andpd 0x45,%xmm5
// CHECK: andpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0x2d,0xed,0x7e,0x00,0x00]
andpd 0x7eed,%xmm5
// CHECK: andpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0x2d,0xfe,0xca,0xbe,0xba]
andpd 0xbabecafe,%xmm5
// CHECK: andpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0x2d,0x78,0x56,0x34,0x12]
andpd 0x12345678,%xmm5
// CHECK: andpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x54,0xed]
andpd %xmm5,%xmm5
// CHECK: comisd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0xac,0xcb,0xef,0xbe,0xad,0xde]
comisd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: comisd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0x2d,0x45,0x00,0x00,0x00]
comisd 0x45,%xmm5
// CHECK: comisd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0x2d,0xed,0x7e,0x00,0x00]
comisd 0x7eed,%xmm5
// CHECK: comisd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0x2d,0xfe,0xca,0xbe,0xba]
comisd 0xbabecafe,%xmm5
// CHECK: comisd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0x2d,0x78,0x56,0x34,0x12]
comisd 0x12345678,%xmm5
// CHECK: comisd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2f,0xed]
comisd %xmm5,%xmm5
// CHECK: cvtpi2pd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtpi2pd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtpi2pd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
cvtpi2pd 0x45,%xmm5
// CHECK: cvtpi2pd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
cvtpi2pd 0x7eed,%xmm5
// CHECK: cvtpi2pd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
cvtpi2pd 0xbabecafe,%xmm5
// CHECK: cvtpi2pd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
cvtpi2pd 0x12345678,%xmm5
// CHECK: cvtpi2pd %mm3, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2a,0xeb]
cvtpi2pd %mm3,%xmm5
// CHECK: cvtsi2sdl %ecx, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0xe9]
cvtsi2sdl %ecx,%xmm5
// CHECK: cvtsi2sdl 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtsi2sdl 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtsi2sdl 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
cvtsi2sdl 0x45,%xmm5
// CHECK: cvtsi2sdl 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
cvtsi2sdl 0x7eed,%xmm5
// CHECK: cvtsi2sdl 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
cvtsi2sdl 0xbabecafe,%xmm5
// CHECK: cvtsi2sdl 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
cvtsi2sdl 0x12345678,%xmm5
// CHECK: divpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
divpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: divpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0x2d,0x45,0x00,0x00,0x00]
divpd 0x45,%xmm5
// CHECK: divpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0x2d,0xed,0x7e,0x00,0x00]
divpd 0x7eed,%xmm5
// CHECK: divpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0x2d,0xfe,0xca,0xbe,0xba]
divpd 0xbabecafe,%xmm5
// CHECK: divpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0x2d,0x78,0x56,0x34,0x12]
divpd 0x12345678,%xmm5
// CHECK: divpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0xed]
divpd %xmm5,%xmm5
// CHECK: divsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
divsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: divsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0x2d,0x45,0x00,0x00,0x00]
divsd 0x45,%xmm5
// CHECK: divsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0x2d,0xed,0x7e,0x00,0x00]
divsd 0x7eed,%xmm5
// CHECK: divsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0x2d,0xfe,0xca,0xbe,0xba]
divsd 0xbabecafe,%xmm5
// CHECK: divsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0x2d,0x78,0x56,0x34,0x12]
divsd 0x12345678,%xmm5
// CHECK: divsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5e,0xed]
divsd %xmm5,%xmm5
// CHECK: maxpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0xac,0xcb,0xef,0xbe,0xad,0xde]
maxpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: maxpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0x2d,0x45,0x00,0x00,0x00]
maxpd 0x45,%xmm5
// CHECK: maxpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0x2d,0xed,0x7e,0x00,0x00]
maxpd 0x7eed,%xmm5
// CHECK: maxpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0x2d,0xfe,0xca,0xbe,0xba]
maxpd 0xbabecafe,%xmm5
// CHECK: maxpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0x2d,0x78,0x56,0x34,0x12]
maxpd 0x12345678,%xmm5
// CHECK: maxpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5f,0xed]
maxpd %xmm5,%xmm5
// CHECK: maxsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0xac,0xcb,0xef,0xbe,0xad,0xde]
maxsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: maxsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0x2d,0x45,0x00,0x00,0x00]
maxsd 0x45,%xmm5
// CHECK: maxsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0x2d,0xed,0x7e,0x00,0x00]
maxsd 0x7eed,%xmm5
// CHECK: maxsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0x2d,0xfe,0xca,0xbe,0xba]
maxsd 0xbabecafe,%xmm5
// CHECK: maxsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0x2d,0x78,0x56,0x34,0x12]
maxsd 0x12345678,%xmm5
// CHECK: maxsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5f,0xed]
maxsd %xmm5,%xmm5
// CHECK: minpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0xac,0xcb,0xef,0xbe,0xad,0xde]
minpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: minpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0x2d,0x45,0x00,0x00,0x00]
minpd 0x45,%xmm5
// CHECK: minpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0x2d,0xed,0x7e,0x00,0x00]
minpd 0x7eed,%xmm5
// CHECK: minpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0x2d,0xfe,0xca,0xbe,0xba]
minpd 0xbabecafe,%xmm5
// CHECK: minpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0x2d,0x78,0x56,0x34,0x12]
minpd 0x12345678,%xmm5
// CHECK: minpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5d,0xed]
minpd %xmm5,%xmm5
// CHECK: minsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0xac,0xcb,0xef,0xbe,0xad,0xde]
minsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: minsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0x2d,0x45,0x00,0x00,0x00]
minsd 0x45,%xmm5
// CHECK: minsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0x2d,0xed,0x7e,0x00,0x00]
minsd 0x7eed,%xmm5
// CHECK: minsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0x2d,0xfe,0xca,0xbe,0xba]
minsd 0xbabecafe,%xmm5
// CHECK: minsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0x2d,0x78,0x56,0x34,0x12]
minsd 0x12345678,%xmm5
// CHECK: minsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5d,0xed]
minsd %xmm5,%xmm5
// CHECK: movapd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0xac,0xcb,0xef,0xbe,0xad,0xde]
movapd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movapd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0x2d,0x45,0x00,0x00,0x00]
movapd 0x45,%xmm5
// CHECK: movapd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0x2d,0xed,0x7e,0x00,0x00]
movapd 0x7eed,%xmm5
// CHECK: movapd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0x2d,0xfe,0xca,0xbe,0xba]
movapd 0xbabecafe,%xmm5
// CHECK: movapd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0x2d,0x78,0x56,0x34,0x12]
movapd 0x12345678,%xmm5
// CHECK: movapd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0xed]
movapd %xmm5,%xmm5
// CHECK: movapd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x29,0xac,0xcb,0xef,0xbe,0xad,0xde]
movapd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movapd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x29,0x2d,0x45,0x00,0x00,0x00]
movapd %xmm5,0x45
// CHECK: movapd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x29,0x2d,0xed,0x7e,0x00,0x00]
movapd %xmm5,0x7eed
// CHECK: movapd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x29,0x2d,0xfe,0xca,0xbe,0xba]
movapd %xmm5,0xbabecafe
// CHECK: movapd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x29,0x2d,0x78,0x56,0x34,0x12]
movapd %xmm5,0x12345678
// CHECK: movapd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x28,0xed]
movapd %xmm5,%xmm5
// CHECK: movhpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x16,0xac,0xcb,0xef,0xbe,0xad,0xde]
movhpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movhpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x16,0x2d,0x45,0x00,0x00,0x00]
movhpd 0x45,%xmm5
// CHECK: movhpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x16,0x2d,0xed,0x7e,0x00,0x00]
movhpd 0x7eed,%xmm5
// CHECK: movhpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x16,0x2d,0xfe,0xca,0xbe,0xba]
movhpd 0xbabecafe,%xmm5
// CHECK: movhpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x16,0x2d,0x78,0x56,0x34,0x12]
movhpd 0x12345678,%xmm5
// CHECK: movhpd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x17,0xac,0xcb,0xef,0xbe,0xad,0xde]
movhpd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movhpd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x17,0x2d,0x45,0x00,0x00,0x00]
movhpd %xmm5,0x45
// CHECK: movhpd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x17,0x2d,0xed,0x7e,0x00,0x00]
movhpd %xmm5,0x7eed
// CHECK: movhpd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x17,0x2d,0xfe,0xca,0xbe,0xba]
movhpd %xmm5,0xbabecafe
// CHECK: movhpd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x17,0x2d,0x78,0x56,0x34,0x12]
movhpd %xmm5,0x12345678
// CHECK: movlpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x12,0xac,0xcb,0xef,0xbe,0xad,0xde]
movlpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movlpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x12,0x2d,0x45,0x00,0x00,0x00]
movlpd 0x45,%xmm5
// CHECK: movlpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x12,0x2d,0xed,0x7e,0x00,0x00]
movlpd 0x7eed,%xmm5
// CHECK: movlpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x12,0x2d,0xfe,0xca,0xbe,0xba]
movlpd 0xbabecafe,%xmm5
// CHECK: movlpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x12,0x2d,0x78,0x56,0x34,0x12]
movlpd 0x12345678,%xmm5
// CHECK: movlpd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x13,0xac,0xcb,0xef,0xbe,0xad,0xde]
movlpd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movlpd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x13,0x2d,0x45,0x00,0x00,0x00]
movlpd %xmm5,0x45
// CHECK: movlpd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x13,0x2d,0xed,0x7e,0x00,0x00]
movlpd %xmm5,0x7eed
// CHECK: movlpd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x13,0x2d,0xfe,0xca,0xbe,0xba]
movlpd %xmm5,0xbabecafe
// CHECK: movlpd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x13,0x2d,0x78,0x56,0x34,0x12]
movlpd %xmm5,0x12345678
// CHECK: movmskpd %xmm5, %ecx
// CHECK: encoding: [0x66,0x0f,0x50,0xcd]
movmskpd %xmm5,%ecx
// CHECK: movntpd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x2b,0xac,0xcb,0xef,0xbe,0xad,0xde]
movntpd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movntpd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x2b,0x2d,0x45,0x00,0x00,0x00]
movntpd %xmm5,0x45
// CHECK: movntpd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x2b,0x2d,0xed,0x7e,0x00,0x00]
movntpd %xmm5,0x7eed
// CHECK: movntpd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x2b,0x2d,0xfe,0xca,0xbe,0xba]
movntpd %xmm5,0xbabecafe
// CHECK: movntpd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x2b,0x2d,0x78,0x56,0x34,0x12]
movntpd %xmm5,0x12345678
// CHECK: movsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0xac,0xcb,0xef,0xbe,0xad,0xde]
movsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0x2d,0x45,0x00,0x00,0x00]
movsd 0x45,%xmm5
// CHECK: movsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0x2d,0xed,0x7e,0x00,0x00]
movsd 0x7eed,%xmm5
// CHECK: movsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0x2d,0xfe,0xca,0xbe,0xba]
movsd 0xbabecafe,%xmm5
// CHECK: movsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0x2d,0x78,0x56,0x34,0x12]
movsd 0x12345678,%xmm5
// CHECK: movsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0xed]
movsd %xmm5,%xmm5
// CHECK: movsd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf2,0x0f,0x11,0xac,0xcb,0xef,0xbe,0xad,0xde]
movsd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movsd %xmm5, 69
// CHECK: encoding: [0xf2,0x0f,0x11,0x2d,0x45,0x00,0x00,0x00]
movsd %xmm5,0x45
// CHECK: movsd %xmm5, 32493
// CHECK: encoding: [0xf2,0x0f,0x11,0x2d,0xed,0x7e,0x00,0x00]
movsd %xmm5,0x7eed
// CHECK: movsd %xmm5, 3133065982
// CHECK: encoding: [0xf2,0x0f,0x11,0x2d,0xfe,0xca,0xbe,0xba]
movsd %xmm5,0xbabecafe
// CHECK: movsd %xmm5, 305419896
// CHECK: encoding: [0xf2,0x0f,0x11,0x2d,0x78,0x56,0x34,0x12]
movsd %xmm5,0x12345678
// CHECK: movsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x10,0xed]
movsd %xmm5,%xmm5
// CHECK: movupd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0xac,0xcb,0xef,0xbe,0xad,0xde]
movupd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movupd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0x2d,0x45,0x00,0x00,0x00]
movupd 0x45,%xmm5
// CHECK: movupd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0x2d,0xed,0x7e,0x00,0x00]
movupd 0x7eed,%xmm5
// CHECK: movupd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0x2d,0xfe,0xca,0xbe,0xba]
movupd 0xbabecafe,%xmm5
// CHECK: movupd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0x2d,0x78,0x56,0x34,0x12]
movupd 0x12345678,%xmm5
// CHECK: movupd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0xed]
movupd %xmm5,%xmm5
// CHECK: movupd %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x11,0xac,0xcb,0xef,0xbe,0xad,0xde]
movupd %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movupd %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x11,0x2d,0x45,0x00,0x00,0x00]
movupd %xmm5,0x45
// CHECK: movupd %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x11,0x2d,0xed,0x7e,0x00,0x00]
movupd %xmm5,0x7eed
// CHECK: movupd %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x11,0x2d,0xfe,0xca,0xbe,0xba]
movupd %xmm5,0xbabecafe
// CHECK: movupd %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x11,0x2d,0x78,0x56,0x34,0x12]
movupd %xmm5,0x12345678
// CHECK: movupd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x10,0xed]
movupd %xmm5,%xmm5
// CHECK: mulpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
mulpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: mulpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0x2d,0x45,0x00,0x00,0x00]
mulpd 0x45,%xmm5
// CHECK: mulpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0x2d,0xed,0x7e,0x00,0x00]
mulpd 0x7eed,%xmm5
// CHECK: mulpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0x2d,0xfe,0xca,0xbe,0xba]
mulpd 0xbabecafe,%xmm5
// CHECK: mulpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0x2d,0x78,0x56,0x34,0x12]
mulpd 0x12345678,%xmm5
// CHECK: mulpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x59,0xed]
mulpd %xmm5,%xmm5
// CHECK: mulsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
mulsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: mulsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0x2d,0x45,0x00,0x00,0x00]
mulsd 0x45,%xmm5
// CHECK: mulsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0x2d,0xed,0x7e,0x00,0x00]
mulsd 0x7eed,%xmm5
// CHECK: mulsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0x2d,0xfe,0xca,0xbe,0xba]
mulsd 0xbabecafe,%xmm5
// CHECK: mulsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0x2d,0x78,0x56,0x34,0x12]
mulsd 0x12345678,%xmm5
// CHECK: mulsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x59,0xed]
mulsd %xmm5,%xmm5
// CHECK: orpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0xac,0xcb,0xef,0xbe,0xad,0xde]
orpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: orpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0x2d,0x45,0x00,0x00,0x00]
orpd 0x45,%xmm5
// CHECK: orpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0x2d,0xed,0x7e,0x00,0x00]
orpd 0x7eed,%xmm5
// CHECK: orpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0x2d,0xfe,0xca,0xbe,0xba]
orpd 0xbabecafe,%xmm5
// CHECK: orpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0x2d,0x78,0x56,0x34,0x12]
orpd 0x12345678,%xmm5
// CHECK: orpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x56,0xed]
orpd %xmm5,%xmm5
// CHECK: sqrtpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0xac,0xcb,0xef,0xbe,0xad,0xde]
sqrtpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: sqrtpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0x2d,0x45,0x00,0x00,0x00]
sqrtpd 0x45,%xmm5
// CHECK: sqrtpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0x2d,0xed,0x7e,0x00,0x00]
sqrtpd 0x7eed,%xmm5
// CHECK: sqrtpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0x2d,0xfe,0xca,0xbe,0xba]
sqrtpd 0xbabecafe,%xmm5
// CHECK: sqrtpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0x2d,0x78,0x56,0x34,0x12]
sqrtpd 0x12345678,%xmm5
// CHECK: sqrtpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x51,0xed]
sqrtpd %xmm5,%xmm5
// CHECK: sqrtsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0xac,0xcb,0xef,0xbe,0xad,0xde]
sqrtsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: sqrtsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0x2d,0x45,0x00,0x00,0x00]
sqrtsd 0x45,%xmm5
// CHECK: sqrtsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0x2d,0xed,0x7e,0x00,0x00]
sqrtsd 0x7eed,%xmm5
// CHECK: sqrtsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0x2d,0xfe,0xca,0xbe,0xba]
sqrtsd 0xbabecafe,%xmm5
// CHECK: sqrtsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0x2d,0x78,0x56,0x34,0x12]
sqrtsd 0x12345678,%xmm5
// CHECK: sqrtsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x51,0xed]
sqrtsd %xmm5,%xmm5
// CHECK: subpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
subpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: subpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0x2d,0x45,0x00,0x00,0x00]
subpd 0x45,%xmm5
// CHECK: subpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0x2d,0xed,0x7e,0x00,0x00]
subpd 0x7eed,%xmm5
// CHECK: subpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0x2d,0xfe,0xca,0xbe,0xba]
subpd 0xbabecafe,%xmm5
// CHECK: subpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0x2d,0x78,0x56,0x34,0x12]
subpd 0x12345678,%xmm5
// CHECK: subpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5c,0xed]
subpd %xmm5,%xmm5
// CHECK: subsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
subsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: subsd 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0x2d,0x45,0x00,0x00,0x00]
subsd 0x45,%xmm5
// CHECK: subsd 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0x2d,0xed,0x7e,0x00,0x00]
subsd 0x7eed,%xmm5
// CHECK: subsd 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0x2d,0xfe,0xca,0xbe,0xba]
subsd 0xbabecafe,%xmm5
// CHECK: subsd 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0x2d,0x78,0x56,0x34,0x12]
subsd 0x12345678,%xmm5
// CHECK: subsd %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5c,0xed]
subsd %xmm5,%xmm5
// CHECK: ucomisd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0xac,0xcb,0xef,0xbe,0xad,0xde]
ucomisd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: ucomisd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0x2d,0x45,0x00,0x00,0x00]
ucomisd 0x45,%xmm5
// CHECK: ucomisd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0x2d,0xed,0x7e,0x00,0x00]
ucomisd 0x7eed,%xmm5
// CHECK: ucomisd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0x2d,0xfe,0xca,0xbe,0xba]
ucomisd 0xbabecafe,%xmm5
// CHECK: ucomisd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0x2d,0x78,0x56,0x34,0x12]
ucomisd 0x12345678,%xmm5
// CHECK: ucomisd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x2e,0xed]
ucomisd %xmm5,%xmm5
// CHECK: unpckhpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0xac,0xcb,0xef,0xbe,0xad,0xde]
unpckhpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: unpckhpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0x2d,0x45,0x00,0x00,0x00]
unpckhpd 0x45,%xmm5
// CHECK: unpckhpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0x2d,0xed,0x7e,0x00,0x00]
unpckhpd 0x7eed,%xmm5
// CHECK: unpckhpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0x2d,0xfe,0xca,0xbe,0xba]
unpckhpd 0xbabecafe,%xmm5
// CHECK: unpckhpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0x2d,0x78,0x56,0x34,0x12]
unpckhpd 0x12345678,%xmm5
// CHECK: unpckhpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x15,0xed]
unpckhpd %xmm5,%xmm5
// CHECK: unpcklpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0xac,0xcb,0xef,0xbe,0xad,0xde]
unpcklpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: unpcklpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0x2d,0x45,0x00,0x00,0x00]
unpcklpd 0x45,%xmm5
// CHECK: unpcklpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0x2d,0xed,0x7e,0x00,0x00]
unpcklpd 0x7eed,%xmm5
// CHECK: unpcklpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0x2d,0xfe,0xca,0xbe,0xba]
unpcklpd 0xbabecafe,%xmm5
// CHECK: unpcklpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0x2d,0x78,0x56,0x34,0x12]
unpcklpd 0x12345678,%xmm5
// CHECK: unpcklpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x14,0xed]
unpcklpd %xmm5,%xmm5
// CHECK: xorpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0xac,0xcb,0xef,0xbe,0xad,0xde]
xorpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: xorpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0x2d,0x45,0x00,0x00,0x00]
xorpd 0x45,%xmm5
// CHECK: xorpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0x2d,0xed,0x7e,0x00,0x00]
xorpd 0x7eed,%xmm5
// CHECK: xorpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0x2d,0xfe,0xca,0xbe,0xba]
xorpd 0xbabecafe,%xmm5
// CHECK: xorpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0x2d,0x78,0x56,0x34,0x12]
xorpd 0x12345678,%xmm5
// CHECK: xorpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x57,0xed]
xorpd %xmm5,%xmm5
// CHECK: cvtdq2pd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtdq2pd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtdq2pd 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0x2d,0x45,0x00,0x00,0x00]
cvtdq2pd 0x45,%xmm5
// CHECK: cvtdq2pd 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0x2d,0xed,0x7e,0x00,0x00]
cvtdq2pd 0x7eed,%xmm5
// CHECK: cvtdq2pd 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0x2d,0xfe,0xca,0xbe,0xba]
cvtdq2pd 0xbabecafe,%xmm5
// CHECK: cvtdq2pd 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0x2d,0x78,0x56,0x34,0x12]
cvtdq2pd 0x12345678,%xmm5
// CHECK: cvtdq2pd %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xe6,0xed]
cvtdq2pd %xmm5,%xmm5
// CHECK: cvtpd2dq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtpd2dq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtpd2dq 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0x2d,0x45,0x00,0x00,0x00]
cvtpd2dq 0x45,%xmm5
// CHECK: cvtpd2dq 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0x2d,0xed,0x7e,0x00,0x00]
cvtpd2dq 0x7eed,%xmm5
// CHECK: cvtpd2dq 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0x2d,0xfe,0xca,0xbe,0xba]
cvtpd2dq 0xbabecafe,%xmm5
// CHECK: cvtpd2dq 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0x2d,0x78,0x56,0x34,0x12]
cvtpd2dq 0x12345678,%xmm5
// CHECK: cvtpd2dq %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xe6,0xed]
cvtpd2dq %xmm5,%xmm5
// CHECK: cvtdq2ps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5b,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtdq2ps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtdq2ps 69, %xmm5
// CHECK: encoding: [0x0f,0x5b,0x2d,0x45,0x00,0x00,0x00]
cvtdq2ps 0x45,%xmm5
// CHECK: cvtdq2ps 32493, %xmm5
// CHECK: encoding: [0x0f,0x5b,0x2d,0xed,0x7e,0x00,0x00]
cvtdq2ps 0x7eed,%xmm5
// CHECK: cvtdq2ps 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5b,0x2d,0xfe,0xca,0xbe,0xba]
cvtdq2ps 0xbabecafe,%xmm5
// CHECK: cvtdq2ps 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5b,0x2d,0x78,0x56,0x34,0x12]
cvtdq2ps 0x12345678,%xmm5
// CHECK: cvtdq2ps %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5b,0xed]
cvtdq2ps %xmm5,%xmm5
// CHECK: cvtpd2pi 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0x9c,0xcb,0xef,0xbe,0xad,0xde]
cvtpd2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: cvtpd2pi 69, %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0x1d,0x45,0x00,0x00,0x00]
cvtpd2pi 0x45,%mm3
// CHECK: cvtpd2pi 32493, %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0x1d,0xed,0x7e,0x00,0x00]
cvtpd2pi 0x7eed,%mm3
// CHECK: cvtpd2pi 3133065982, %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0x1d,0xfe,0xca,0xbe,0xba]
cvtpd2pi 0xbabecafe,%mm3
// CHECK: cvtpd2pi 305419896, %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0x1d,0x78,0x56,0x34,0x12]
cvtpd2pi 0x12345678,%mm3
// CHECK: cvtpd2pi %xmm5, %mm3
// CHECK: encoding: [0x66,0x0f,0x2d,0xdd]
cvtpd2pi %xmm5,%mm3
// CHECK: cvtpd2ps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtpd2ps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtpd2ps 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0x2d,0x45,0x00,0x00,0x00]
cvtpd2ps 0x45,%xmm5
// CHECK: cvtpd2ps 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0x2d,0xed,0x7e,0x00,0x00]
cvtpd2ps 0x7eed,%xmm5
// CHECK: cvtpd2ps 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0x2d,0xfe,0xca,0xbe,0xba]
cvtpd2ps 0xbabecafe,%xmm5
// CHECK: cvtpd2ps 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0x2d,0x78,0x56,0x34,0x12]
cvtpd2ps 0x12345678,%xmm5
// CHECK: cvtpd2ps %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5a,0xed]
cvtpd2ps %xmm5,%xmm5
// CHECK: cvtps2pd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x0f,0x5a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtps2pd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtps2pd 69, %xmm5
// CHECK: encoding: [0x0f,0x5a,0x2d,0x45,0x00,0x00,0x00]
cvtps2pd 0x45,%xmm5
// CHECK: cvtps2pd 32493, %xmm5
// CHECK: encoding: [0x0f,0x5a,0x2d,0xed,0x7e,0x00,0x00]
cvtps2pd 0x7eed,%xmm5
// CHECK: cvtps2pd 3133065982, %xmm5
// CHECK: encoding: [0x0f,0x5a,0x2d,0xfe,0xca,0xbe,0xba]
cvtps2pd 0xbabecafe,%xmm5
// CHECK: cvtps2pd 305419896, %xmm5
// CHECK: encoding: [0x0f,0x5a,0x2d,0x78,0x56,0x34,0x12]
cvtps2pd 0x12345678,%xmm5
// CHECK: cvtps2pd %xmm5, %xmm5
// CHECK: encoding: [0x0f,0x5a,0xed]
cvtps2pd %xmm5,%xmm5
// CHECK: cvtps2dq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtps2dq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtps2dq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0x2d,0x45,0x00,0x00,0x00]
cvtps2dq 0x45,%xmm5
// CHECK: cvtps2dq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0x2d,0xed,0x7e,0x00,0x00]
cvtps2dq 0x7eed,%xmm5
// CHECK: cvtps2dq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0x2d,0xfe,0xca,0xbe,0xba]
cvtps2dq 0xbabecafe,%xmm5
// CHECK: cvtps2dq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0x2d,0x78,0x56,0x34,0x12]
cvtps2dq 0x12345678,%xmm5
// CHECK: cvtps2dq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x5b,0xed]
cvtps2dq %xmm5,%xmm5
// CHECK: cvtsd2ss 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtsd2ss 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtsd2ss 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0x2d,0x45,0x00,0x00,0x00]
cvtsd2ss 0x45,%xmm5
// CHECK: cvtsd2ss 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0x2d,0xed,0x7e,0x00,0x00]
cvtsd2ss 0x7eed,%xmm5
// CHECK: cvtsd2ss 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0x2d,0xfe,0xca,0xbe,0xba]
cvtsd2ss 0xbabecafe,%xmm5
// CHECK: cvtsd2ss 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0x2d,0x78,0x56,0x34,0x12]
cvtsd2ss 0x12345678,%xmm5
// CHECK: cvtsd2ss %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x5a,0xed]
cvtsd2ss %xmm5,%xmm5
// CHECK: cvtss2sd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvtss2sd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvtss2sd 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0x2d,0x45,0x00,0x00,0x00]
cvtss2sd 0x45,%xmm5
// CHECK: cvtss2sd 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0x2d,0xed,0x7e,0x00,0x00]
cvtss2sd 0x7eed,%xmm5
// CHECK: cvtss2sd 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0x2d,0xfe,0xca,0xbe,0xba]
cvtss2sd 0xbabecafe,%xmm5
// CHECK: cvtss2sd 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0x2d,0x78,0x56,0x34,0x12]
cvtss2sd 0x12345678,%xmm5
// CHECK: cvtss2sd %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5a,0xed]
cvtss2sd %xmm5,%xmm5
// CHECK: cvttpd2pi 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0x9c,0xcb,0xef,0xbe,0xad,0xde]
cvttpd2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: cvttpd2pi 69, %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0x1d,0x45,0x00,0x00,0x00]
cvttpd2pi 0x45,%mm3
// CHECK: cvttpd2pi 32493, %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0x1d,0xed,0x7e,0x00,0x00]
cvttpd2pi 0x7eed,%mm3
// CHECK: cvttpd2pi 3133065982, %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0x1d,0xfe,0xca,0xbe,0xba]
cvttpd2pi 0xbabecafe,%mm3
// CHECK: cvttpd2pi 305419896, %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0x1d,0x78,0x56,0x34,0x12]
cvttpd2pi 0x12345678,%mm3
// CHECK: cvttpd2pi %xmm5, %mm3
// CHECK: encoding: [0x66,0x0f,0x2c,0xdd]
cvttpd2pi %xmm5,%mm3
// CHECK: cvttsd2si 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0x8c,0xcb,0xef,0xbe,0xad,0xde]
cvttsd2si 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: cvttsd2si 69, %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0x0d,0x45,0x00,0x00,0x00]
cvttsd2si 0x45,%ecx
// CHECK: cvttsd2si 32493, %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0x0d,0xed,0x7e,0x00,0x00]
cvttsd2si 0x7eed,%ecx
// CHECK: cvttsd2si 3133065982, %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0x0d,0xfe,0xca,0xbe,0xba]
cvttsd2si 0xbabecafe,%ecx
// CHECK: cvttsd2si 305419896, %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0x0d,0x78,0x56,0x34,0x12]
cvttsd2si 0x12345678,%ecx
// CHECK: cvttsd2si %xmm5, %ecx
// CHECK: encoding: [0xf2,0x0f,0x2c,0xcd]
cvttsd2si %xmm5,%ecx
// CHECK: cvttps2dq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0xac,0xcb,0xef,0xbe,0xad,0xde]
cvttps2dq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvttps2dq 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0x2d,0x45,0x00,0x00,0x00]
cvttps2dq 0x45,%xmm5
// CHECK: cvttps2dq 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0x2d,0xed,0x7e,0x00,0x00]
cvttps2dq 0x7eed,%xmm5
// CHECK: cvttps2dq 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0x2d,0xfe,0xca,0xbe,0xba]
cvttps2dq 0xbabecafe,%xmm5
// CHECK: cvttps2dq 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0x2d,0x78,0x56,0x34,0x12]
cvttps2dq 0x12345678,%xmm5
// CHECK: cvttps2dq %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x5b,0xed]
cvttps2dq %xmm5,%xmm5
// CHECK: maskmovdqu %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf7,0xed]
maskmovdqu %xmm5,%xmm5
// CHECK: movdqa 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0xac,0xcb,0xef,0xbe,0xad,0xde]
movdqa 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movdqa 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0x2d,0x45,0x00,0x00,0x00]
movdqa 0x45,%xmm5
// CHECK: movdqa 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0x2d,0xed,0x7e,0x00,0x00]
movdqa 0x7eed,%xmm5
// CHECK: movdqa 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0x2d,0xfe,0xca,0xbe,0xba]
movdqa 0xbabecafe,%xmm5
// CHECK: movdqa 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0x2d,0x78,0x56,0x34,0x12]
movdqa 0x12345678,%xmm5
// CHECK: movdqa %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0xed]
movdqa %xmm5,%xmm5
// CHECK: movdqa %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0x7f,0xac,0xcb,0xef,0xbe,0xad,0xde]
movdqa %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movdqa %xmm5, 69
// CHECK: encoding: [0x66,0x0f,0x7f,0x2d,0x45,0x00,0x00,0x00]
movdqa %xmm5,0x45
// CHECK: movdqa %xmm5, 32493
// CHECK: encoding: [0x66,0x0f,0x7f,0x2d,0xed,0x7e,0x00,0x00]
movdqa %xmm5,0x7eed
// CHECK: movdqa %xmm5, 3133065982
// CHECK: encoding: [0x66,0x0f,0x7f,0x2d,0xfe,0xca,0xbe,0xba]
movdqa %xmm5,0xbabecafe
// CHECK: movdqa %xmm5, 305419896
// CHECK: encoding: [0x66,0x0f,0x7f,0x2d,0x78,0x56,0x34,0x12]
movdqa %xmm5,0x12345678
// CHECK: movdqa %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6f,0xed]
movdqa %xmm5,%xmm5
// CHECK: movdqu 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x6f,0xac,0xcb,0xef,0xbe,0xad,0xde]
movdqu 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movdqu 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x6f,0x2d,0x45,0x00,0x00,0x00]
movdqu 0x45,%xmm5
// CHECK: movdqu 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x6f,0x2d,0xed,0x7e,0x00,0x00]
movdqu 0x7eed,%xmm5
// CHECK: movdqu 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x6f,0x2d,0xfe,0xca,0xbe,0xba]
movdqu 0xbabecafe,%xmm5
// CHECK: movdqu 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x6f,0x2d,0x78,0x56,0x34,0x12]
movdqu 0x12345678,%xmm5
// CHECK: movdqu %xmm5, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf3,0x0f,0x7f,0xac,0xcb,0xef,0xbe,0xad,0xde]
movdqu %xmm5,0xdeadbeef(%ebx,%ecx,8)
// CHECK: movdqu %xmm5, 69
// CHECK: encoding: [0xf3,0x0f,0x7f,0x2d,0x45,0x00,0x00,0x00]
movdqu %xmm5,0x45
// CHECK: movdqu %xmm5, 32493
// CHECK: encoding: [0xf3,0x0f,0x7f,0x2d,0xed,0x7e,0x00,0x00]
movdqu %xmm5,0x7eed
// CHECK: movdqu %xmm5, 3133065982
// CHECK: encoding: [0xf3,0x0f,0x7f,0x2d,0xfe,0xca,0xbe,0xba]
movdqu %xmm5,0xbabecafe
// CHECK: movdqu %xmm5, 305419896
// CHECK: encoding: [0xf3,0x0f,0x7f,0x2d,0x78,0x56,0x34,0x12]
movdqu %xmm5,0x12345678
// CHECK: movdq2q %xmm5, %mm3
// CHECK: encoding: [0xf2,0x0f,0xd6,0xdd]
movdq2q %xmm5,%mm3
// CHECK: movq2dq %mm3, %xmm5
// CHECK: encoding: [0xf3,0x0f,0xd6,0xeb]
movq2dq %mm3,%xmm5
// CHECK: pmuludq 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0xf4,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmuludq 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmuludq 69, %mm3
// CHECK: encoding: [0x0f,0xf4,0x1d,0x45,0x00,0x00,0x00]
pmuludq 0x45,%mm3
// CHECK: pmuludq 32493, %mm3
// CHECK: encoding: [0x0f,0xf4,0x1d,0xed,0x7e,0x00,0x00]
pmuludq 0x7eed,%mm3
// CHECK: pmuludq 3133065982, %mm3
// CHECK: encoding: [0x0f,0xf4,0x1d,0xfe,0xca,0xbe,0xba]
pmuludq 0xbabecafe,%mm3
// CHECK: pmuludq 305419896, %mm3
// CHECK: encoding: [0x0f,0xf4,0x1d,0x78,0x56,0x34,0x12]
pmuludq 0x12345678,%mm3
// CHECK: pmuludq %mm3, %mm3
// CHECK: encoding: [0x0f,0xf4,0xdb]
pmuludq %mm3,%mm3
// CHECK: pmuludq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmuludq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmuludq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0x2d,0x45,0x00,0x00,0x00]
pmuludq 0x45,%xmm5
// CHECK: pmuludq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0x2d,0xed,0x7e,0x00,0x00]
pmuludq 0x7eed,%xmm5
// CHECK: pmuludq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0x2d,0xfe,0xca,0xbe,0xba]
pmuludq 0xbabecafe,%xmm5
// CHECK: pmuludq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0x2d,0x78,0x56,0x34,0x12]
pmuludq 0x12345678,%xmm5
// CHECK: pmuludq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xf4,0xed]
pmuludq %xmm5,%xmm5
// CHECK: pslldq $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x73,0xfd,0x7f]
pslldq $0x7f,%xmm5
// CHECK: psrldq $127, %xmm5
// CHECK: encoding: [0x66,0x0f,0x73,0xdd,0x7f]
psrldq $0x7f,%xmm5
// CHECK: punpckhqdq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpckhqdq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpckhqdq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0x2d,0x45,0x00,0x00,0x00]
punpckhqdq 0x45,%xmm5
// CHECK: punpckhqdq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0x2d,0xed,0x7e,0x00,0x00]
punpckhqdq 0x7eed,%xmm5
// CHECK: punpckhqdq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0x2d,0xfe,0xca,0xbe,0xba]
punpckhqdq 0xbabecafe,%xmm5
// CHECK: punpckhqdq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0x2d,0x78,0x56,0x34,0x12]
punpckhqdq 0x12345678,%xmm5
// CHECK: punpckhqdq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6d,0xed]
punpckhqdq %xmm5,%xmm5
// CHECK: punpcklqdq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0xac,0xcb,0xef,0xbe,0xad,0xde]
punpcklqdq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: punpcklqdq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0x2d,0x45,0x00,0x00,0x00]
punpcklqdq 0x45,%xmm5
// CHECK: punpcklqdq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0x2d,0xed,0x7e,0x00,0x00]
punpcklqdq 0x7eed,%xmm5
// CHECK: punpcklqdq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0x2d,0xfe,0xca,0xbe,0xba]
punpcklqdq 0xbabecafe,%xmm5
// CHECK: punpcklqdq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0x2d,0x78,0x56,0x34,0x12]
punpcklqdq 0x12345678,%xmm5
// CHECK: punpcklqdq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x6c,0xed]
punpcklqdq %xmm5,%xmm5
// CHECK: addsubpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0xac,0xcb,0xef,0xbe,0xad,0xde]
addsubpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addsubpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0x2d,0x45,0x00,0x00,0x00]
addsubpd 0x45,%xmm5
// CHECK: addsubpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0x2d,0xed,0x7e,0x00,0x00]
addsubpd 0x7eed,%xmm5
// CHECK: addsubpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0x2d,0xfe,0xca,0xbe,0xba]
addsubpd 0xbabecafe,%xmm5
// CHECK: addsubpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0x2d,0x78,0x56,0x34,0x12]
addsubpd 0x12345678,%xmm5
// CHECK: addsubpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0xd0,0xed]
addsubpd %xmm5,%xmm5
// CHECK: addsubps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0xac,0xcb,0xef,0xbe,0xad,0xde]
addsubps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: addsubps 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0x2d,0x45,0x00,0x00,0x00]
addsubps 0x45,%xmm5
// CHECK: addsubps 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0x2d,0xed,0x7e,0x00,0x00]
addsubps 0x7eed,%xmm5
// CHECK: addsubps 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0x2d,0xfe,0xca,0xbe,0xba]
addsubps 0xbabecafe,%xmm5
// CHECK: addsubps 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0x2d,0x78,0x56,0x34,0x12]
addsubps 0x12345678,%xmm5
// CHECK: addsubps %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xd0,0xed]
addsubps %xmm5,%xmm5
// CHECK: fisttpl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdb,0x8c,0xcb,0xef,0xbe,0xad,0xde]
fisttpl 0xdeadbeef(%ebx,%ecx,8)
// CHECK: fisttpl 3133065982
// CHECK: encoding: [0xdb,0x0d,0xfe,0xca,0xbe,0xba]
fisttpl 0xbabecafe
// CHECK: fisttpl 305419896
// CHECK: encoding: [0xdb,0x0d,0x78,0x56,0x34,0x12]
fisttpl 0x12345678
// CHECK: haddpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0xac,0xcb,0xef,0xbe,0xad,0xde]
haddpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: haddpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0x2d,0x45,0x00,0x00,0x00]
haddpd 0x45,%xmm5
// CHECK: haddpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0x2d,0xed,0x7e,0x00,0x00]
haddpd 0x7eed,%xmm5
// CHECK: haddpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0x2d,0xfe,0xca,0xbe,0xba]
haddpd 0xbabecafe,%xmm5
// CHECK: haddpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0x2d,0x78,0x56,0x34,0x12]
haddpd 0x12345678,%xmm5
// CHECK: haddpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7c,0xed]
haddpd %xmm5,%xmm5
// CHECK: haddps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0xac,0xcb,0xef,0xbe,0xad,0xde]
haddps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: haddps 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0x2d,0x45,0x00,0x00,0x00]
haddps 0x45,%xmm5
// CHECK: haddps 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0x2d,0xed,0x7e,0x00,0x00]
haddps 0x7eed,%xmm5
// CHECK: haddps 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0x2d,0xfe,0xca,0xbe,0xba]
haddps 0xbabecafe,%xmm5
// CHECK: haddps 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0x2d,0x78,0x56,0x34,0x12]
haddps 0x12345678,%xmm5
// CHECK: haddps %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7c,0xed]
haddps %xmm5,%xmm5
// CHECK: hsubpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0xac,0xcb,0xef,0xbe,0xad,0xde]
hsubpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: hsubpd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0x2d,0x45,0x00,0x00,0x00]
hsubpd 0x45,%xmm5
// CHECK: hsubpd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0x2d,0xed,0x7e,0x00,0x00]
hsubpd 0x7eed,%xmm5
// CHECK: hsubpd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0x2d,0xfe,0xca,0xbe,0xba]
hsubpd 0xbabecafe,%xmm5
// CHECK: hsubpd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0x2d,0x78,0x56,0x34,0x12]
hsubpd 0x12345678,%xmm5
// CHECK: hsubpd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x7d,0xed]
hsubpd %xmm5,%xmm5
// CHECK: hsubps 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0xac,0xcb,0xef,0xbe,0xad,0xde]
hsubps 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: hsubps 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0x2d,0x45,0x00,0x00,0x00]
hsubps 0x45,%xmm5
// CHECK: hsubps 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0x2d,0xed,0x7e,0x00,0x00]
hsubps 0x7eed,%xmm5
// CHECK: hsubps 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0x2d,0xfe,0xca,0xbe,0xba]
hsubps 0xbabecafe,%xmm5
// CHECK: hsubps 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0x2d,0x78,0x56,0x34,0x12]
hsubps 0x12345678,%xmm5
// CHECK: hsubps %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x7d,0xed]
hsubps %xmm5,%xmm5
// CHECK: lddqu 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0xf0,0xac,0xcb,0xef,0xbe,0xad,0xde]
lddqu 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: lddqu 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xf0,0x2d,0x45,0x00,0x00,0x00]
lddqu 0x45,%xmm5
// CHECK: lddqu 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xf0,0x2d,0xed,0x7e,0x00,0x00]
lddqu 0x7eed,%xmm5
// CHECK: lddqu 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xf0,0x2d,0xfe,0xca,0xbe,0xba]
lddqu 0xbabecafe,%xmm5
// CHECK: lddqu 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0xf0,0x2d,0x78,0x56,0x34,0x12]
lddqu 0x12345678,%xmm5
// CHECK: movddup 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0xac,0xcb,0xef,0xbe,0xad,0xde]
movddup 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movddup 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0x2d,0x45,0x00,0x00,0x00]
movddup 0x45,%xmm5
// CHECK: movddup 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0x2d,0xed,0x7e,0x00,0x00]
movddup 0x7eed,%xmm5
// CHECK: movddup 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0x2d,0xfe,0xca,0xbe,0xba]
movddup 0xbabecafe,%xmm5
// CHECK: movddup 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0x2d,0x78,0x56,0x34,0x12]
movddup 0x12345678,%xmm5
// CHECK: movddup %xmm5, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x12,0xed]
movddup %xmm5,%xmm5
// CHECK: movshdup 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0xac,0xcb,0xef,0xbe,0xad,0xde]
movshdup 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movshdup 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0x2d,0x45,0x00,0x00,0x00]
movshdup 0x45,%xmm5
// CHECK: movshdup 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0x2d,0xed,0x7e,0x00,0x00]
movshdup 0x7eed,%xmm5
// CHECK: movshdup 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0x2d,0xfe,0xca,0xbe,0xba]
movshdup 0xbabecafe,%xmm5
// CHECK: movshdup 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0x2d,0x78,0x56,0x34,0x12]
movshdup 0x12345678,%xmm5
// CHECK: movshdup %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x16,0xed]
movshdup %xmm5,%xmm5
// CHECK: movsldup 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0xac,0xcb,0xef,0xbe,0xad,0xde]
movsldup 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movsldup 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0x2d,0x45,0x00,0x00,0x00]
movsldup 0x45,%xmm5
// CHECK: movsldup 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0x2d,0xed,0x7e,0x00,0x00]
movsldup 0x7eed,%xmm5
// CHECK: movsldup 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0x2d,0xfe,0xca,0xbe,0xba]
movsldup 0xbabecafe,%xmm5
// CHECK: movsldup 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0x2d,0x78,0x56,0x34,0x12]
movsldup 0x12345678,%xmm5
// CHECK: movsldup %xmm5, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x12,0xed]
movsldup %xmm5,%xmm5
// CHECK: vmclear 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0xc7,0xb4,0xcb,0xef,0xbe,0xad,0xde]
vmclear 0xdeadbeef(%ebx,%ecx,8)
// CHECK: vmclear 32493
// CHECK: encoding: [0x66,0x0f,0xc7,0x35,0xed,0x7e,0x00,0x00]
vmclear 0x7eed
// CHECK: vmclear 3133065982
// CHECK: encoding: [0x66,0x0f,0xc7,0x35,0xfe,0xca,0xbe,0xba]
vmclear 0xbabecafe
// CHECK: vmclear 305419896
// CHECK: encoding: [0x66,0x0f,0xc7,0x35,0x78,0x56,0x34,0x12]
vmclear 0x12345678
// CHECK: vmptrld 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0xb4,0xcb,0xef,0xbe,0xad,0xde]
vmptrld 0xdeadbeef(%ebx,%ecx,8)
// CHECK: vmptrld 32493
// CHECK: encoding: [0x0f,0xc7,0x35,0xed,0x7e,0x00,0x00]
vmptrld 0x7eed
// CHECK: vmptrld 3133065982
// CHECK: encoding: [0x0f,0xc7,0x35,0xfe,0xca,0xbe,0xba]
vmptrld 0xbabecafe
// CHECK: vmptrld 305419896
// CHECK: encoding: [0x0f,0xc7,0x35,0x78,0x56,0x34,0x12]
vmptrld 0x12345678
// CHECK: vmptrst 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0xbc,0xcb,0xef,0xbe,0xad,0xde]
vmptrst 0xdeadbeef(%ebx,%ecx,8)
// CHECK: vmptrst 32493
// CHECK: encoding: [0x0f,0xc7,0x3d,0xed,0x7e,0x00,0x00]
vmptrst 0x7eed
// CHECK: vmptrst 3133065982
// CHECK: encoding: [0x0f,0xc7,0x3d,0xfe,0xca,0xbe,0xba]
vmptrst 0xbabecafe
// CHECK: vmptrst 305419896
// CHECK: encoding: [0x0f,0xc7,0x3d,0x78,0x56,0x34,0x12]
vmptrst 0x12345678
// CHECK: phaddw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phaddw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phaddw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0x1d,0x45,0x00,0x00,0x00]
phaddw 0x45,%mm3
// CHECK: phaddw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0x1d,0xed,0x7e,0x00,0x00]
phaddw 0x7eed,%mm3
// CHECK: phaddw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0x1d,0xfe,0xca,0xbe,0xba]
phaddw 0xbabecafe,%mm3
// CHECK: phaddw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0x1d,0x78,0x56,0x34,0x12]
phaddw 0x12345678,%mm3
// CHECK: phaddw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x01,0xdb]
phaddw %mm3,%mm3
// CHECK: phaddw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0xac,0xcb,0xef,0xbe,0xad,0xde]
phaddw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phaddw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0x2d,0x45,0x00,0x00,0x00]
phaddw 0x45,%xmm5
// CHECK: phaddw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0x2d,0xed,0x7e,0x00,0x00]
phaddw 0x7eed,%xmm5
// CHECK: phaddw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0x2d,0xfe,0xca,0xbe,0xba]
phaddw 0xbabecafe,%xmm5
// CHECK: phaddw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0x2d,0x78,0x56,0x34,0x12]
phaddw 0x12345678,%xmm5
// CHECK: phaddw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x01,0xed]
phaddw %xmm5,%xmm5
// CHECK: phaddd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phaddd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phaddd 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0x1d,0x45,0x00,0x00,0x00]
phaddd 0x45,%mm3
// CHECK: phaddd 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0x1d,0xed,0x7e,0x00,0x00]
phaddd 0x7eed,%mm3
// CHECK: phaddd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0x1d,0xfe,0xca,0xbe,0xba]
phaddd 0xbabecafe,%mm3
// CHECK: phaddd 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0x1d,0x78,0x56,0x34,0x12]
phaddd 0x12345678,%mm3
// CHECK: phaddd %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x02,0xdb]
phaddd %mm3,%mm3
// CHECK: phaddd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0xac,0xcb,0xef,0xbe,0xad,0xde]
phaddd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phaddd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0x2d,0x45,0x00,0x00,0x00]
phaddd 0x45,%xmm5
// CHECK: phaddd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0x2d,0xed,0x7e,0x00,0x00]
phaddd 0x7eed,%xmm5
// CHECK: phaddd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0x2d,0xfe,0xca,0xbe,0xba]
phaddd 0xbabecafe,%xmm5
// CHECK: phaddd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0x2d,0x78,0x56,0x34,0x12]
phaddd 0x12345678,%xmm5
// CHECK: phaddd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x02,0xed]
phaddd %xmm5,%xmm5
// CHECK: phaddsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phaddsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phaddsw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0x1d,0x45,0x00,0x00,0x00]
phaddsw 0x45,%mm3
// CHECK: phaddsw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0x1d,0xed,0x7e,0x00,0x00]
phaddsw 0x7eed,%mm3
// CHECK: phaddsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0x1d,0xfe,0xca,0xbe,0xba]
phaddsw 0xbabecafe,%mm3
// CHECK: phaddsw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0x1d,0x78,0x56,0x34,0x12]
phaddsw 0x12345678,%mm3
// CHECK: phaddsw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x03,0xdb]
phaddsw %mm3,%mm3
// CHECK: phaddsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0xac,0xcb,0xef,0xbe,0xad,0xde]
phaddsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phaddsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0x2d,0x45,0x00,0x00,0x00]
phaddsw 0x45,%xmm5
// CHECK: phaddsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0x2d,0xed,0x7e,0x00,0x00]
phaddsw 0x7eed,%xmm5
// CHECK: phaddsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0x2d,0xfe,0xca,0xbe,0xba]
phaddsw 0xbabecafe,%xmm5
// CHECK: phaddsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0x2d,0x78,0x56,0x34,0x12]
phaddsw 0x12345678,%xmm5
// CHECK: phaddsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x03,0xed]
phaddsw %xmm5,%xmm5
// CHECK: phsubw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phsubw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phsubw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0x1d,0x45,0x00,0x00,0x00]
phsubw 0x45,%mm3
// CHECK: phsubw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0x1d,0xed,0x7e,0x00,0x00]
phsubw 0x7eed,%mm3
// CHECK: phsubw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0x1d,0xfe,0xca,0xbe,0xba]
phsubw 0xbabecafe,%mm3
// CHECK: phsubw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0x1d,0x78,0x56,0x34,0x12]
phsubw 0x12345678,%mm3
// CHECK: phsubw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x05,0xdb]
phsubw %mm3,%mm3
// CHECK: phsubw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0xac,0xcb,0xef,0xbe,0xad,0xde]
phsubw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phsubw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0x2d,0x45,0x00,0x00,0x00]
phsubw 0x45,%xmm5
// CHECK: phsubw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0x2d,0xed,0x7e,0x00,0x00]
phsubw 0x7eed,%xmm5
// CHECK: phsubw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0x2d,0xfe,0xca,0xbe,0xba]
phsubw 0xbabecafe,%xmm5
// CHECK: phsubw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0x2d,0x78,0x56,0x34,0x12]
phsubw 0x12345678,%xmm5
// CHECK: phsubw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x05,0xed]
phsubw %xmm5,%xmm5
// CHECK: phsubd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phsubd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phsubd 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0x1d,0x45,0x00,0x00,0x00]
phsubd 0x45,%mm3
// CHECK: phsubd 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0x1d,0xed,0x7e,0x00,0x00]
phsubd 0x7eed,%mm3
// CHECK: phsubd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0x1d,0xfe,0xca,0xbe,0xba]
phsubd 0xbabecafe,%mm3
// CHECK: phsubd 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0x1d,0x78,0x56,0x34,0x12]
phsubd 0x12345678,%mm3
// CHECK: phsubd %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x06,0xdb]
phsubd %mm3,%mm3
// CHECK: phsubd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0xac,0xcb,0xef,0xbe,0xad,0xde]
phsubd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phsubd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0x2d,0x45,0x00,0x00,0x00]
phsubd 0x45,%xmm5
// CHECK: phsubd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0x2d,0xed,0x7e,0x00,0x00]
phsubd 0x7eed,%xmm5
// CHECK: phsubd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0x2d,0xfe,0xca,0xbe,0xba]
phsubd 0xbabecafe,%xmm5
// CHECK: phsubd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0x2d,0x78,0x56,0x34,0x12]
phsubd 0x12345678,%xmm5
// CHECK: phsubd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x06,0xed]
phsubd %xmm5,%xmm5
// CHECK: phsubsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0x9c,0xcb,0xef,0xbe,0xad,0xde]
phsubsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: phsubsw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0x1d,0x45,0x00,0x00,0x00]
phsubsw 0x45,%mm3
// CHECK: phsubsw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0x1d,0xed,0x7e,0x00,0x00]
phsubsw 0x7eed,%mm3
// CHECK: phsubsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0x1d,0xfe,0xca,0xbe,0xba]
phsubsw 0xbabecafe,%mm3
// CHECK: phsubsw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0x1d,0x78,0x56,0x34,0x12]
phsubsw 0x12345678,%mm3
// CHECK: phsubsw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x07,0xdb]
phsubsw %mm3,%mm3
// CHECK: phsubsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0xac,0xcb,0xef,0xbe,0xad,0xde]
phsubsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phsubsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0x2d,0x45,0x00,0x00,0x00]
phsubsw 0x45,%xmm5
// CHECK: phsubsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0x2d,0xed,0x7e,0x00,0x00]
phsubsw 0x7eed,%xmm5
// CHECK: phsubsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0x2d,0xfe,0xca,0xbe,0xba]
phsubsw 0xbabecafe,%xmm5
// CHECK: phsubsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0x2d,0x78,0x56,0x34,0x12]
phsubsw 0x12345678,%xmm5
// CHECK: phsubsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x07,0xed]
phsubsw %xmm5,%xmm5
// CHECK: pmaddubsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmaddubsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmaddubsw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0x1d,0x45,0x00,0x00,0x00]
pmaddubsw 0x45,%mm3
// CHECK: pmaddubsw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0x1d,0xed,0x7e,0x00,0x00]
pmaddubsw 0x7eed,%mm3
// CHECK: pmaddubsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0x1d,0xfe,0xca,0xbe,0xba]
pmaddubsw 0xbabecafe,%mm3
// CHECK: pmaddubsw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0x1d,0x78,0x56,0x34,0x12]
pmaddubsw 0x12345678,%mm3
// CHECK: pmaddubsw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x04,0xdb]
pmaddubsw %mm3,%mm3
// CHECK: pmaddubsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaddubsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaddubsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0x2d,0x45,0x00,0x00,0x00]
pmaddubsw 0x45,%xmm5
// CHECK: pmaddubsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0x2d,0xed,0x7e,0x00,0x00]
pmaddubsw 0x7eed,%xmm5
// CHECK: pmaddubsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0x2d,0xfe,0xca,0xbe,0xba]
pmaddubsw 0xbabecafe,%xmm5
// CHECK: pmaddubsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0x2d,0x78,0x56,0x34,0x12]
pmaddubsw 0x12345678,%xmm5
// CHECK: pmaddubsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x04,0xed]
pmaddubsw %xmm5,%xmm5
// CHECK: pmulhrsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pmulhrsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pmulhrsw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0x1d,0x45,0x00,0x00,0x00]
pmulhrsw 0x45,%mm3
// CHECK: pmulhrsw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0x1d,0xed,0x7e,0x00,0x00]
pmulhrsw 0x7eed,%mm3
// CHECK: pmulhrsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0x1d,0xfe,0xca,0xbe,0xba]
pmulhrsw 0xbabecafe,%mm3
// CHECK: pmulhrsw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0x1d,0x78,0x56,0x34,0x12]
pmulhrsw 0x12345678,%mm3
// CHECK: pmulhrsw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x0b,0xdb]
pmulhrsw %mm3,%mm3
// CHECK: pmulhrsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmulhrsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmulhrsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0x2d,0x45,0x00,0x00,0x00]
pmulhrsw 0x45,%xmm5
// CHECK: pmulhrsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0x2d,0xed,0x7e,0x00,0x00]
pmulhrsw 0x7eed,%xmm5
// CHECK: pmulhrsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0x2d,0xfe,0xca,0xbe,0xba]
pmulhrsw 0xbabecafe,%xmm5
// CHECK: pmulhrsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0x2d,0x78,0x56,0x34,0x12]
pmulhrsw 0x12345678,%xmm5
// CHECK: pmulhrsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0b,0xed]
pmulhrsw %xmm5,%xmm5
// CHECK: pshufb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pshufb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pshufb 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0x1d,0x45,0x00,0x00,0x00]
pshufb 0x45,%mm3
// CHECK: pshufb 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0x1d,0xed,0x7e,0x00,0x00]
pshufb 0x7eed,%mm3
// CHECK: pshufb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0x1d,0xfe,0xca,0xbe,0xba]
pshufb 0xbabecafe,%mm3
// CHECK: pshufb 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0x1d,0x78,0x56,0x34,0x12]
pshufb 0x12345678,%mm3
// CHECK: pshufb %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x00,0xdb]
pshufb %mm3,%mm3
// CHECK: pshufb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0xac,0xcb,0xef,0xbe,0xad,0xde]
pshufb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pshufb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0x2d,0x45,0x00,0x00,0x00]
pshufb 0x45,%xmm5
// CHECK: pshufb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0x2d,0xed,0x7e,0x00,0x00]
pshufb 0x7eed,%xmm5
// CHECK: pshufb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0x2d,0xfe,0xca,0xbe,0xba]
pshufb 0xbabecafe,%xmm5
// CHECK: pshufb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0x2d,0x78,0x56,0x34,0x12]
pshufb 0x12345678,%xmm5
// CHECK: pshufb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x00,0xed]
pshufb %xmm5,%xmm5
// CHECK: psignb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psignb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psignb 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0x1d,0x45,0x00,0x00,0x00]
psignb 0x45,%mm3
// CHECK: psignb 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0x1d,0xed,0x7e,0x00,0x00]
psignb 0x7eed,%mm3
// CHECK: psignb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0x1d,0xfe,0xca,0xbe,0xba]
psignb 0xbabecafe,%mm3
// CHECK: psignb 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0x1d,0x78,0x56,0x34,0x12]
psignb 0x12345678,%mm3
// CHECK: psignb %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x08,0xdb]
psignb %mm3,%mm3
// CHECK: psignb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0xac,0xcb,0xef,0xbe,0xad,0xde]
psignb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psignb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0x2d,0x45,0x00,0x00,0x00]
psignb 0x45,%xmm5
// CHECK: psignb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0x2d,0xed,0x7e,0x00,0x00]
psignb 0x7eed,%xmm5
// CHECK: psignb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0x2d,0xfe,0xca,0xbe,0xba]
psignb 0xbabecafe,%xmm5
// CHECK: psignb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0x2d,0x78,0x56,0x34,0x12]
psignb 0x12345678,%xmm5
// CHECK: psignb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x08,0xed]
psignb %xmm5,%xmm5
// CHECK: psignw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psignw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psignw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0x1d,0x45,0x00,0x00,0x00]
psignw 0x45,%mm3
// CHECK: psignw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0x1d,0xed,0x7e,0x00,0x00]
psignw 0x7eed,%mm3
// CHECK: psignw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0x1d,0xfe,0xca,0xbe,0xba]
psignw 0xbabecafe,%mm3
// CHECK: psignw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0x1d,0x78,0x56,0x34,0x12]
psignw 0x12345678,%mm3
// CHECK: psignw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x09,0xdb]
psignw %mm3,%mm3
// CHECK: psignw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0xac,0xcb,0xef,0xbe,0xad,0xde]
psignw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psignw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0x2d,0x45,0x00,0x00,0x00]
psignw 0x45,%xmm5
// CHECK: psignw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0x2d,0xed,0x7e,0x00,0x00]
psignw 0x7eed,%xmm5
// CHECK: psignw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0x2d,0xfe,0xca,0xbe,0xba]
psignw 0xbabecafe,%xmm5
// CHECK: psignw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0x2d,0x78,0x56,0x34,0x12]
psignw 0x12345678,%xmm5
// CHECK: psignw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x09,0xed]
psignw %xmm5,%xmm5
// CHECK: psignd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0x9c,0xcb,0xef,0xbe,0xad,0xde]
psignd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: psignd 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0x1d,0x45,0x00,0x00,0x00]
psignd 0x45,%mm3
// CHECK: psignd 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0x1d,0xed,0x7e,0x00,0x00]
psignd 0x7eed,%mm3
// CHECK: psignd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0x1d,0xfe,0xca,0xbe,0xba]
psignd 0xbabecafe,%mm3
// CHECK: psignd 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0x1d,0x78,0x56,0x34,0x12]
psignd 0x12345678,%mm3
// CHECK: psignd %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x0a,0xdb]
psignd %mm3,%mm3
// CHECK: psignd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0xac,0xcb,0xef,0xbe,0xad,0xde]
psignd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: psignd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0x2d,0x45,0x00,0x00,0x00]
psignd 0x45,%xmm5
// CHECK: psignd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0x2d,0xed,0x7e,0x00,0x00]
psignd 0x7eed,%xmm5
// CHECK: psignd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0x2d,0xfe,0xca,0xbe,0xba]
psignd 0xbabecafe,%xmm5
// CHECK: psignd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0x2d,0x78,0x56,0x34,0x12]
psignd 0x12345678,%xmm5
// CHECK: psignd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x0a,0xed]
psignd %xmm5,%xmm5
// CHECK: pabsb 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pabsb 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pabsb 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0x1d,0x45,0x00,0x00,0x00]
pabsb 0x45,%mm3
// CHECK: pabsb 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0x1d,0xed,0x7e,0x00,0x00]
pabsb 0x7eed,%mm3
// CHECK: pabsb 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0x1d,0xfe,0xca,0xbe,0xba]
pabsb 0xbabecafe,%mm3
// CHECK: pabsb 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0x1d,0x78,0x56,0x34,0x12]
pabsb 0x12345678,%mm3
// CHECK: pabsb %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x1c,0xdb]
pabsb %mm3,%mm3
// CHECK: pabsb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0xac,0xcb,0xef,0xbe,0xad,0xde]
pabsb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pabsb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0x2d,0x45,0x00,0x00,0x00]
pabsb 0x45,%xmm5
// CHECK: pabsb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0x2d,0xed,0x7e,0x00,0x00]
pabsb 0x7eed,%xmm5
// CHECK: pabsb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0x2d,0xfe,0xca,0xbe,0xba]
pabsb 0xbabecafe,%xmm5
// CHECK: pabsb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0x2d,0x78,0x56,0x34,0x12]
pabsb 0x12345678,%xmm5
// CHECK: pabsb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1c,0xed]
pabsb %xmm5,%xmm5
// CHECK: pabsw 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pabsw 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pabsw 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0x1d,0x45,0x00,0x00,0x00]
pabsw 0x45,%mm3
// CHECK: pabsw 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0x1d,0xed,0x7e,0x00,0x00]
pabsw 0x7eed,%mm3
// CHECK: pabsw 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0x1d,0xfe,0xca,0xbe,0xba]
pabsw 0xbabecafe,%mm3
// CHECK: pabsw 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0x1d,0x78,0x56,0x34,0x12]
pabsw 0x12345678,%mm3
// CHECK: pabsw %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x1d,0xdb]
pabsw %mm3,%mm3
// CHECK: pabsw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0xac,0xcb,0xef,0xbe,0xad,0xde]
pabsw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pabsw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0x2d,0x45,0x00,0x00,0x00]
pabsw 0x45,%xmm5
// CHECK: pabsw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0x2d,0xed,0x7e,0x00,0x00]
pabsw 0x7eed,%xmm5
// CHECK: pabsw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0x2d,0xfe,0xca,0xbe,0xba]
pabsw 0xbabecafe,%xmm5
// CHECK: pabsw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0x2d,0x78,0x56,0x34,0x12]
pabsw 0x12345678,%xmm5
// CHECK: pabsw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1d,0xed]
pabsw %xmm5,%xmm5
// CHECK: pabsd 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0x9c,0xcb,0xef,0xbe,0xad,0xde]
pabsd 0xdeadbeef(%ebx,%ecx,8),%mm3
// CHECK: pabsd 69, %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0x1d,0x45,0x00,0x00,0x00]
pabsd 0x45,%mm3
// CHECK: pabsd 32493, %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0x1d,0xed,0x7e,0x00,0x00]
pabsd 0x7eed,%mm3
// CHECK: pabsd 3133065982, %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0x1d,0xfe,0xca,0xbe,0xba]
pabsd 0xbabecafe,%mm3
// CHECK: pabsd 305419896, %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0x1d,0x78,0x56,0x34,0x12]
pabsd 0x12345678,%mm3
// CHECK: pabsd %mm3, %mm3
// CHECK: encoding: [0x0f,0x38,0x1e,0xdb]
pabsd %mm3,%mm3
// CHECK: pabsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0xac,0xcb,0xef,0xbe,0xad,0xde]
pabsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pabsd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0x2d,0x45,0x00,0x00,0x00]
pabsd 0x45,%xmm5
// CHECK: pabsd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0x2d,0xed,0x7e,0x00,0x00]
pabsd 0x7eed,%xmm5
// CHECK: pabsd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0x2d,0xfe,0xca,0xbe,0xba]
pabsd 0xbabecafe,%xmm5
// CHECK: pabsd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0x2d,0x78,0x56,0x34,0x12]
pabsd 0x12345678,%xmm5
// CHECK: pabsd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x1e,0xed]
pabsd %xmm5,%xmm5
// CHECK: femms
// CHECK: encoding: [0x0f,0x0e]
femms
// CHECK: movntdqa 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
movntdqa 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: movntdqa 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2a,0x2d,0x45,0x00,0x00,0x00]
movntdqa 0x45,%xmm5
// CHECK: movntdqa 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2a,0x2d,0xed,0x7e,0x00,0x00]
movntdqa 0x7eed,%xmm5
// CHECK: movntdqa 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
movntdqa 0xbabecafe,%xmm5
// CHECK: movntdqa 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2a,0x2d,0x78,0x56,0x34,0x12]
movntdqa 0x12345678,%xmm5
// CHECK: packusdw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0xac,0xcb,0xef,0xbe,0xad,0xde]
packusdw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: packusdw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0x2d,0x45,0x00,0x00,0x00]
packusdw 0x45,%xmm5
// CHECK: packusdw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0x2d,0xed,0x7e,0x00,0x00]
packusdw 0x7eed,%xmm5
// CHECK: packusdw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0x2d,0xfe,0xca,0xbe,0xba]
packusdw 0xbabecafe,%xmm5
// CHECK: packusdw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0x2d,0x78,0x56,0x34,0x12]
packusdw 0x12345678,%xmm5
// CHECK: packusdw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x2b,0xed]
packusdw %xmm5,%xmm5
// CHECK: pcmpeqq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpeqq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpeqq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0x2d,0x45,0x00,0x00,0x00]
pcmpeqq 0x45,%xmm5
// CHECK: pcmpeqq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0x2d,0xed,0x7e,0x00,0x00]
pcmpeqq 0x7eed,%xmm5
// CHECK: pcmpeqq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0x2d,0xfe,0xca,0xbe,0xba]
pcmpeqq 0xbabecafe,%xmm5
// CHECK: pcmpeqq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0x2d,0x78,0x56,0x34,0x12]
pcmpeqq 0x12345678,%xmm5
// CHECK: pcmpeqq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x29,0xed]
pcmpeqq %xmm5,%xmm5
// CHECK: phminposuw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0xac,0xcb,0xef,0xbe,0xad,0xde]
phminposuw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: phminposuw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0x2d,0x45,0x00,0x00,0x00]
phminposuw 0x45,%xmm5
// CHECK: phminposuw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0x2d,0xed,0x7e,0x00,0x00]
phminposuw 0x7eed,%xmm5
// CHECK: phminposuw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0x2d,0xfe,0xca,0xbe,0xba]
phminposuw 0xbabecafe,%xmm5
// CHECK: phminposuw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0x2d,0x78,0x56,0x34,0x12]
phminposuw 0x12345678,%xmm5
// CHECK: phminposuw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x41,0xed]
phminposuw %xmm5,%xmm5
// CHECK: pmaxsb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxsb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxsb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0x2d,0x45,0x00,0x00,0x00]
pmaxsb 0x45,%xmm5
// CHECK: pmaxsb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0x2d,0xed,0x7e,0x00,0x00]
pmaxsb 0x7eed,%xmm5
// CHECK: pmaxsb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0x2d,0xfe,0xca,0xbe,0xba]
pmaxsb 0xbabecafe,%xmm5
// CHECK: pmaxsb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0x2d,0x78,0x56,0x34,0x12]
pmaxsb 0x12345678,%xmm5
// CHECK: pmaxsb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3c,0xed]
pmaxsb %xmm5,%xmm5
// CHECK: pmaxsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxsd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0x2d,0x45,0x00,0x00,0x00]
pmaxsd 0x45,%xmm5
// CHECK: pmaxsd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0x2d,0xed,0x7e,0x00,0x00]
pmaxsd 0x7eed,%xmm5
// CHECK: pmaxsd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0x2d,0xfe,0xca,0xbe,0xba]
pmaxsd 0xbabecafe,%xmm5
// CHECK: pmaxsd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0x2d,0x78,0x56,0x34,0x12]
pmaxsd 0x12345678,%xmm5
// CHECK: pmaxsd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3d,0xed]
pmaxsd %xmm5,%xmm5
// CHECK: pmaxud 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxud 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxud 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0x2d,0x45,0x00,0x00,0x00]
pmaxud 0x45,%xmm5
// CHECK: pmaxud 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0x2d,0xed,0x7e,0x00,0x00]
pmaxud 0x7eed,%xmm5
// CHECK: pmaxud 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0x2d,0xfe,0xca,0xbe,0xba]
pmaxud 0xbabecafe,%xmm5
// CHECK: pmaxud 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0x2d,0x78,0x56,0x34,0x12]
pmaxud 0x12345678,%xmm5
// CHECK: pmaxud %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3f,0xed]
pmaxud %xmm5,%xmm5
// CHECK: pmaxuw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmaxuw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmaxuw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0x2d,0x45,0x00,0x00,0x00]
pmaxuw 0x45,%xmm5
// CHECK: pmaxuw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0x2d,0xed,0x7e,0x00,0x00]
pmaxuw 0x7eed,%xmm5
// CHECK: pmaxuw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0x2d,0xfe,0xca,0xbe,0xba]
pmaxuw 0xbabecafe,%xmm5
// CHECK: pmaxuw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0x2d,0x78,0x56,0x34,0x12]
pmaxuw 0x12345678,%xmm5
// CHECK: pmaxuw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3e,0xed]
pmaxuw %xmm5,%xmm5
// CHECK: pminsb 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminsb 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminsb 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0x2d,0x45,0x00,0x00,0x00]
pminsb 0x45,%xmm5
// CHECK: pminsb 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0x2d,0xed,0x7e,0x00,0x00]
pminsb 0x7eed,%xmm5
// CHECK: pminsb 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0x2d,0xfe,0xca,0xbe,0xba]
pminsb 0xbabecafe,%xmm5
// CHECK: pminsb 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0x2d,0x78,0x56,0x34,0x12]
pminsb 0x12345678,%xmm5
// CHECK: pminsb %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x38,0xed]
pminsb %xmm5,%xmm5
// CHECK: pminsd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminsd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminsd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0x2d,0x45,0x00,0x00,0x00]
pminsd 0x45,%xmm5
// CHECK: pminsd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0x2d,0xed,0x7e,0x00,0x00]
pminsd 0x7eed,%xmm5
// CHECK: pminsd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0x2d,0xfe,0xca,0xbe,0xba]
pminsd 0xbabecafe,%xmm5
// CHECK: pminsd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0x2d,0x78,0x56,0x34,0x12]
pminsd 0x12345678,%xmm5
// CHECK: pminsd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x39,0xed]
pminsd %xmm5,%xmm5
// CHECK: pminud 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminud 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminud 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0x2d,0x45,0x00,0x00,0x00]
pminud 0x45,%xmm5
// CHECK: pminud 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0x2d,0xed,0x7e,0x00,0x00]
pminud 0x7eed,%xmm5
// CHECK: pminud 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0x2d,0xfe,0xca,0xbe,0xba]
pminud 0xbabecafe,%xmm5
// CHECK: pminud 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0x2d,0x78,0x56,0x34,0x12]
pminud 0x12345678,%xmm5
// CHECK: pminud %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3b,0xed]
pminud %xmm5,%xmm5
// CHECK: pminuw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0xac,0xcb,0xef,0xbe,0xad,0xde]
pminuw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pminuw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0x2d,0x45,0x00,0x00,0x00]
pminuw 0x45,%xmm5
// CHECK: pminuw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0x2d,0xed,0x7e,0x00,0x00]
pminuw 0x7eed,%xmm5
// CHECK: pminuw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0x2d,0xfe,0xca,0xbe,0xba]
pminuw 0xbabecafe,%xmm5
// CHECK: pminuw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0x2d,0x78,0x56,0x34,0x12]
pminuw 0x12345678,%xmm5
// CHECK: pminuw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x3a,0xed]
pminuw %xmm5,%xmm5
// CHECK: pmovsxbw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxbw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxbw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0x2d,0x45,0x00,0x00,0x00]
pmovsxbw 0x45,%xmm5
// CHECK: pmovsxbw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0x2d,0xed,0x7e,0x00,0x00]
pmovsxbw 0x7eed,%xmm5
// CHECK: pmovsxbw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxbw 0xbabecafe,%xmm5
// CHECK: pmovsxbw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0x2d,0x78,0x56,0x34,0x12]
pmovsxbw 0x12345678,%xmm5
// CHECK: pmovsxbw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x20,0xed]
pmovsxbw %xmm5,%xmm5
// CHECK: pmovsxbd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxbd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxbd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0x2d,0x45,0x00,0x00,0x00]
pmovsxbd 0x45,%xmm5
// CHECK: pmovsxbd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0x2d,0xed,0x7e,0x00,0x00]
pmovsxbd 0x7eed,%xmm5
// CHECK: pmovsxbd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxbd 0xbabecafe,%xmm5
// CHECK: pmovsxbd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0x2d,0x78,0x56,0x34,0x12]
pmovsxbd 0x12345678,%xmm5
// CHECK: pmovsxbd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x21,0xed]
pmovsxbd %xmm5,%xmm5
// CHECK: pmovsxbq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxbq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxbq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0x2d,0x45,0x00,0x00,0x00]
pmovsxbq 0x45,%xmm5
// CHECK: pmovsxbq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0x2d,0xed,0x7e,0x00,0x00]
pmovsxbq 0x7eed,%xmm5
// CHECK: pmovsxbq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxbq 0xbabecafe,%xmm5
// CHECK: pmovsxbq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0x2d,0x78,0x56,0x34,0x12]
pmovsxbq 0x12345678,%xmm5
// CHECK: pmovsxbq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x22,0xed]
pmovsxbq %xmm5,%xmm5
// CHECK: pmovsxwd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxwd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxwd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0x2d,0x45,0x00,0x00,0x00]
pmovsxwd 0x45,%xmm5
// CHECK: pmovsxwd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0x2d,0xed,0x7e,0x00,0x00]
pmovsxwd 0x7eed,%xmm5
// CHECK: pmovsxwd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxwd 0xbabecafe,%xmm5
// CHECK: pmovsxwd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0x2d,0x78,0x56,0x34,0x12]
pmovsxwd 0x12345678,%xmm5
// CHECK: pmovsxwd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x23,0xed]
pmovsxwd %xmm5,%xmm5
// CHECK: pmovsxwq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxwq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxwq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0x2d,0x45,0x00,0x00,0x00]
pmovsxwq 0x45,%xmm5
// CHECK: pmovsxwq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0x2d,0xed,0x7e,0x00,0x00]
pmovsxwq 0x7eed,%xmm5
// CHECK: pmovsxwq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxwq 0xbabecafe,%xmm5
// CHECK: pmovsxwq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0x2d,0x78,0x56,0x34,0x12]
pmovsxwq 0x12345678,%xmm5
// CHECK: pmovsxwq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x24,0xed]
pmovsxwq %xmm5,%xmm5
// CHECK: pmovsxdq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovsxdq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovsxdq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0x2d,0x45,0x00,0x00,0x00]
pmovsxdq 0x45,%xmm5
// CHECK: pmovsxdq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0x2d,0xed,0x7e,0x00,0x00]
pmovsxdq 0x7eed,%xmm5
// CHECK: pmovsxdq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0x2d,0xfe,0xca,0xbe,0xba]
pmovsxdq 0xbabecafe,%xmm5
// CHECK: pmovsxdq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0x2d,0x78,0x56,0x34,0x12]
pmovsxdq 0x12345678,%xmm5
// CHECK: pmovsxdq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x25,0xed]
pmovsxdq %xmm5,%xmm5
// CHECK: pmovzxbw 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxbw 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxbw 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0x2d,0x45,0x00,0x00,0x00]
pmovzxbw 0x45,%xmm5
// CHECK: pmovzxbw 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0x2d,0xed,0x7e,0x00,0x00]
pmovzxbw 0x7eed,%xmm5
// CHECK: pmovzxbw 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxbw 0xbabecafe,%xmm5
// CHECK: pmovzxbw 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0x2d,0x78,0x56,0x34,0x12]
pmovzxbw 0x12345678,%xmm5
// CHECK: pmovzxbw %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x30,0xed]
pmovzxbw %xmm5,%xmm5
// CHECK: pmovzxbd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxbd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxbd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0x2d,0x45,0x00,0x00,0x00]
pmovzxbd 0x45,%xmm5
// CHECK: pmovzxbd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0x2d,0xed,0x7e,0x00,0x00]
pmovzxbd 0x7eed,%xmm5
// CHECK: pmovzxbd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxbd 0xbabecafe,%xmm5
// CHECK: pmovzxbd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0x2d,0x78,0x56,0x34,0x12]
pmovzxbd 0x12345678,%xmm5
// CHECK: pmovzxbd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x31,0xed]
pmovzxbd %xmm5,%xmm5
// CHECK: pmovzxbq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxbq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxbq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0x2d,0x45,0x00,0x00,0x00]
pmovzxbq 0x45,%xmm5
// CHECK: pmovzxbq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0x2d,0xed,0x7e,0x00,0x00]
pmovzxbq 0x7eed,%xmm5
// CHECK: pmovzxbq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxbq 0xbabecafe,%xmm5
// CHECK: pmovzxbq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0x2d,0x78,0x56,0x34,0x12]
pmovzxbq 0x12345678,%xmm5
// CHECK: pmovzxbq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x32,0xed]
pmovzxbq %xmm5,%xmm5
// CHECK: pmovzxwd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxwd 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxwd 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0x2d,0x45,0x00,0x00,0x00]
pmovzxwd 0x45,%xmm5
// CHECK: pmovzxwd 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0x2d,0xed,0x7e,0x00,0x00]
pmovzxwd 0x7eed,%xmm5
// CHECK: pmovzxwd 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxwd 0xbabecafe,%xmm5
// CHECK: pmovzxwd 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0x2d,0x78,0x56,0x34,0x12]
pmovzxwd 0x12345678,%xmm5
// CHECK: pmovzxwd %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x33,0xed]
pmovzxwd %xmm5,%xmm5
// CHECK: pmovzxwq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxwq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxwq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0x2d,0x45,0x00,0x00,0x00]
pmovzxwq 0x45,%xmm5
// CHECK: pmovzxwq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0x2d,0xed,0x7e,0x00,0x00]
pmovzxwq 0x7eed,%xmm5
// CHECK: pmovzxwq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxwq 0xbabecafe,%xmm5
// CHECK: pmovzxwq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0x2d,0x78,0x56,0x34,0x12]
pmovzxwq 0x12345678,%xmm5
// CHECK: pmovzxwq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x34,0xed]
pmovzxwq %xmm5,%xmm5
// CHECK: pmovzxdq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmovzxdq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmovzxdq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0x2d,0x45,0x00,0x00,0x00]
pmovzxdq 0x45,%xmm5
// CHECK: pmovzxdq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0x2d,0xed,0x7e,0x00,0x00]
pmovzxdq 0x7eed,%xmm5
// CHECK: pmovzxdq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0x2d,0xfe,0xca,0xbe,0xba]
pmovzxdq 0xbabecafe,%xmm5
// CHECK: pmovzxdq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0x2d,0x78,0x56,0x34,0x12]
pmovzxdq 0x12345678,%xmm5
// CHECK: pmovzxdq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x35,0xed]
pmovzxdq %xmm5,%xmm5
// CHECK: pmuldq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmuldq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmuldq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0x2d,0x45,0x00,0x00,0x00]
pmuldq 0x45,%xmm5
// CHECK: pmuldq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0x2d,0xed,0x7e,0x00,0x00]
pmuldq 0x7eed,%xmm5
// CHECK: pmuldq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0x2d,0xfe,0xca,0xbe,0xba]
pmuldq 0xbabecafe,%xmm5
// CHECK: pmuldq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0x2d,0x78,0x56,0x34,0x12]
pmuldq 0x12345678,%xmm5
// CHECK: pmuldq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x28,0xed]
pmuldq %xmm5,%xmm5
// CHECK: pmulld 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0xac,0xcb,0xef,0xbe,0xad,0xde]
pmulld 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pmulld 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0x2d,0x45,0x00,0x00,0x00]
pmulld 0x45,%xmm5
// CHECK: pmulld 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0x2d,0xed,0x7e,0x00,0x00]
pmulld 0x7eed,%xmm5
// CHECK: pmulld 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0x2d,0xfe,0xca,0xbe,0xba]
pmulld 0xbabecafe,%xmm5
// CHECK: pmulld 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0x2d,0x78,0x56,0x34,0x12]
pmulld 0x12345678,%xmm5
// CHECK: pmulld %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x40,0xed]
pmulld %xmm5,%xmm5
// CHECK: ptest 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0xac,0xcb,0xef,0xbe,0xad,0xde]
ptest 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: ptest 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0x2d,0x45,0x00,0x00,0x00]
ptest 0x45,%xmm5
// CHECK: ptest 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0x2d,0xed,0x7e,0x00,0x00]
ptest 0x7eed,%xmm5
// CHECK: ptest 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0x2d,0xfe,0xca,0xbe,0xba]
ptest 0xbabecafe,%xmm5
// CHECK: ptest 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0x2d,0x78,0x56,0x34,0x12]
ptest 0x12345678,%xmm5
// CHECK: ptest %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x17,0xed]
ptest %xmm5,%xmm5
// CHECK: pcmpgtq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0xac,0xcb,0xef,0xbe,0xad,0xde]
pcmpgtq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: pcmpgtq 69, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0x2d,0x45,0x00,0x00,0x00]
pcmpgtq 0x45,%xmm5
// CHECK: pcmpgtq 32493, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0x2d,0xed,0x7e,0x00,0x00]
pcmpgtq 0x7eed,%xmm5
// CHECK: pcmpgtq 3133065982, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0x2d,0xfe,0xca,0xbe,0xba]
pcmpgtq 0xbabecafe,%xmm5
// CHECK: pcmpgtq 305419896, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0x2d,0x78,0x56,0x34,0x12]
pcmpgtq 0x12345678,%xmm5
// CHECK: pcmpgtq %xmm5, %xmm5
// CHECK: encoding: [0x66,0x0f,0x38,0x37,0xed]
pcmpgtq %xmm5,%xmm5
// CHECK: crc32b %bl, %eax
// CHECK: encoding: [0xf2,0x0f,0x38,0xf0,0xc3]
crc32b %bl, %eax
// CHECK: crc32b 4(%ebx), %eax
// CHECK: encoding: [0xf2,0x0f,0x38,0xf0,0x43,0x04]
crc32b 4(%ebx), %eax
// CHECK: crc32w %bx, %eax
// CHECK: encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xc3]
crc32w %bx, %eax
// CHECK: crc32w 4(%ebx), %eax
// CHECK: encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x43,0x04]
crc32w 4(%ebx), %eax
// CHECK: crc32l %ebx, %eax
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0xc3]
crc32l %ebx, %eax
// CHECK: crc32l 4(%ebx), %eax
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0x43,0x04]
crc32l 4(%ebx), %eax
// CHECK: crc32l 3735928559(%ebx,%ecx,8), %ecx
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0x8c,0xcb,0xef,0xbe,0xad,0xde]
crc32l 0xdeadbeef(%ebx,%ecx,8),%ecx
// CHECK: crc32l 69, %ecx
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0x0d,0x45,0x00,0x00,0x00]
crc32l 0x45,%ecx
// CHECK: crc32l 32493, %ecx
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0x0d,0xed,0x7e,0x00,0x00]
crc32l 0x7eed,%ecx
// CHECK: crc32l 3133065982, %ecx
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0x0d,0xfe,0xca,0xbe,0xba]
crc32l 0xbabecafe,%ecx
// CHECK: crc32l %ecx, %ecx
// CHECK: encoding: [0xf2,0x0f,0x38,0xf1,0xc9]
crc32l %ecx,%ecx
// CHECK: pcmpistrm $125, %xmm1, %xmm2
// CHECK: encoding: [0x66,0x0f,0x3a,0x62,0xd1,0x7d]
pcmpistrm $125, %xmm1, %xmm2
// CHECK: pcmpistrm $125, (%edx,%eax,4), %xmm2
// CHECK: encoding: [0x66,0x0f,0x3a,0x62,0x14,0x82,0x7d]
pcmpistrm $125, (%edx,%eax,4), %xmm2
// CHECK: aesimc %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x38,0xdb,0xc8]
aesimc %xmm0,%xmm1
// CHECK: aesimc (%eax), %xmm1
// CHECK: encoding: [0x66,0x0f,0x38,0xdb,0x08]
aesimc (%eax),%xmm1
// CHECK: aesenc %xmm1, %xmm2
// CHECK: encoding: [0x66,0x0f,0x38,0xdc,0xd1]
aesenc %xmm1,%xmm2
// CHECK: aesenc 4(%ebx), %xmm2
// CHECK: encoding: [0x66,0x0f,0x38,0xdc,0x53,0x04]
aesenc 4(%ebx),%xmm2
// CHECK: aesenclast %xmm3, %xmm4
// CHECK: encoding: [0x66,0x0f,0x38,0xdd,0xe3]
aesenclast %xmm3,%xmm4
// CHECK: aesenclast 4(%edx,%edi), %xmm4
// CHECK: encoding: [0x66,0x0f,0x38,0xdd,0x64,0x3a,0x04]
aesenclast 4(%edx,%edi),%xmm4
// CHECK: aesdec %xmm5, %xmm6
// CHECK: encoding: [0x66,0x0f,0x38,0xde,0xf5]
aesdec %xmm5,%xmm6
// CHECK: aesdec 4(%ecx,%eax,8), %xmm6
// CHECK: encoding: [0x66,0x0f,0x38,0xde,0x74,0xc1,0x04]
aesdec 4(%ecx,%eax,8),%xmm6
// CHECK: aesdeclast %xmm7, %xmm0
// CHECK: encoding: [0x66,0x0f,0x38,0xdf,0xc7]
aesdeclast %xmm7,%xmm0
// CHECK: aesdeclast 3405691582, %xmm0
// CHECK: encoding: [0x66,0x0f,0x38,0xdf,0x05,0xbe,0xba,0xfe,0xca]
aesdeclast 0xcafebabe,%xmm0
// CHECK: aeskeygenassist $125, %xmm1, %xmm2
// CHECK: encoding: [0x66,0x0f,0x3a,0xdf,0xd1,0x7d]
aeskeygenassist $125, %xmm1, %xmm2
// CHECK: aeskeygenassist $125, (%edx,%eax,4), %xmm2
// CHECK: encoding: [0x66,0x0f,0x3a,0xdf,0x14,0x82,0x7d]
aeskeygenassist $125, (%edx,%eax,4), %xmm2
// rdar://8017638
// CHECK: aeskeygenassist $128, %xmm1, %xmm2
// CHECK: encoding: [0x66,0x0f,0x3a,0xdf,0xd1,0x80]
aeskeygenassist $128, %xmm1, %xmm2
// rdar://7910087
// CHECK: bsfw %bx, %bx
// CHECK: encoding: [0x66,0x0f,0xbc,0xdb]
bsfw %bx, %bx
// CHECK: bsfw 3735928559(%ebx,%ecx,8), %bx
// CHECK: encoding: [0x66,0x0f,0xbc,0x9c,0xcb,0xef,0xbe,0xad,0xde]
bsfw 3735928559(%ebx,%ecx,8), %bx
// CHECK: bsrw %bx, %bx
// CHECK: encoding: [0x66,0x0f,0xbd,0xdb]
bsrw %bx, %bx
// CHECK: bsrw 305419896, %bx
// CHECK: encoding: [0x66,0x0f,0xbd,0x1d,0x78,0x56,0x34,0x12]
bsrw 305419896, %bx
// radr://7901779
// CHECK: pushl $127
// CHECK: encoding: [0x6a,0x7f]
pushl $127
// CHECK: pushw $254
// CHECK: encoding: [0x66,0x68,0xfe,0x00]
pushw $254
// CHECK: pushl $254
// CHECK: encoding: [0x68,0xfe,0x00,0x00,0x00]
pushl $254
// radr://7928400
// CHECK: movq %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x7f,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movq %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: movd %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x7e,0x9c,0xcb,0xef,0xbe,0xad,0xde]
movd %mm3, 3735928559(%ebx,%ecx,8)
// CHECK: movq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x7e,0xac,0xcb,0xef,0xbe,0xad,0xde]
movq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: movd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x6e,0xac,0xcb,0xef,0xbe,0xad,0xde]
movd 3735928559(%ebx,%ecx,8), %xmm5
// radr://7914715
// CHECK: fcoml 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0x94,0xcb,0xef,0xbe,0xad,0xde]
fcoml 3735928559(%ebx,%ecx,8)
// CHECK: fcoms 32493
// CHECK: encoding: [0xd8,0x15,0xed,0x7e,0x00,0x00]
fcoms 32493
// CHECK: fcompl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xdc,0x9c,0xcb,0xef,0xbe,0xad,0xde]
fcompl 3735928559(%ebx,%ecx,8)
// CHECK: fcomps 32493
// CHECK: encoding: [0xd8,0x1d,0xed,0x7e,0x00,0x00]
fcomps 32493
// CHECK: ficoml 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x94,0xcb,0xef,0xbe,0xad,0xde]
ficoml 3735928559(%ebx,%ecx,8)
// CHECK: ficoms 32493
// CHECK: encoding: [0xde,0x15,0xed,0x7e,0x00,0x00]
ficoms 32493
// CHECK: ficompl 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xda,0x9c,0xcb,0xef,0xbe,0xad,0xde]
ficompl 3735928559(%ebx,%ecx,8)
// CHECK: ficomps 32493
// CHECK: encoding: [0xde,0x1d,0xed,0x7e,0x00,0x00]
ficomps 32493
// CHECK: movl 57005(,%eiz), %ebx
// CHECK: encoding: [0x8b,0x1c,0x25,0xad,0xde,0x00,0x00]
movl 57005(,%eiz), %ebx
// CHECK: movl 48879(,%eiz), %eax
// CHECK: encoding: [0x8b,0x04,0x25,0xef,0xbe,0x00,0x00]
movl 48879(,%eiz), %eax
// CHECK: movl -4(,%eiz,8), %eax
// CHECK: encoding: [0x8b,0x04,0xe5,0xfc,0xff,0xff,0xff]
movl -4(,%eiz,8), %eax
// CHECK: movl (%ecx,%eiz), %eax
// CHECK: encoding: [0x8b,0x04,0x21]
movl (%ecx,%eiz), %eax
// CHECK: movl (%ecx,%eiz,8), %eax
// CHECK: encoding: [0x8b,0x04,0xe1]
movl (%ecx,%eiz,8), %eax
// CHECK: addl $4294967295, %eax # encoding: [0x83,0xc0,0xff]
addl $0xFFFFFFFF, %eax
// CHECK: addw $65535, %ax # encoding: [0x66,0x83,0xc0,0xff]
addw $0xFFFF, %ax
// CHECK: pushf
pushfl
// CHECK: pushfl
pushfl
// CHECK: popf
popfl
// CHECK: popfl
popfl
// CHECK: rcll $0, 3735928559(%ebx,%ecx,8)
rcll $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rcll $0, 69
rcll $0,0x45
// CHECK: rcll $0, 32493
rcll $0,0x7eed
// CHECK: rcll $0, 3133065982
rcll $0,0xbabecafe
// CHECK: rcll $0, 305419896
rcll $0,0x12345678
// CHECK: rclb $127, 3735928559(%ebx,%ecx,8)
rclb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rclb $127, 69
rclb $0x7f,0x45
// CHECK: rclb $127, 32493
rclb $0x7f,0x7eed
// CHECK: rclb $127, 3133065982
rclb $0x7f,0xbabecafe
// CHECK: rclb $127, 305419896
rclb $0x7f,0x12345678
// CHECK: rcrl $0, 3735928559(%ebx,%ecx,8)
rcrl $0,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rcrl $0, 69
rcrl $0,0x45
// CHECK: rcrl $0, 32493
rcrl $0,0x7eed
// CHECK: rcrl $0, 3133065982
rcrl $0,0xbabecafe
// CHECK: rcrl $0, 305419896
rcrl $0,0x12345678
// CHECK: rcrb $127, 3735928559(%ebx,%ecx,8)
rcrb $0x7f,0xdeadbeef(%ebx,%ecx,8)
// CHECK: rcrb $127, 69
rcrb $0x7f,0x45
// CHECK: rcrb $127, 32493
rcrb $0x7f,0x7eed
// CHECK: rcrb $127, 3133065982
rcrb $0x7f,0xbabecafe
// CHECK: rcrb $127, 305419896
rcrb $0x7f,0x12345678
// CHECK: calll 3133065982
calll 0xbabecafe
// CHECK: calll *3735928559(%ebx,%ecx,8)
calll *0xdeadbeef(%ebx,%ecx,8)
// CHECK: calll 305419896
calll 0x12345678
// CHECK: calll *3135175374
call *0xbadeface
// CHECK: calll *3735928559(%ebx,%ecx,8)
call *0xdeadbeef(%ebx,%ecx,8)
// CHECK: calll 32493
call 0x7eed
// CHECK: calll 3133065982
call 0xbabecafe
// CHECK: calll 305419896
call 0x12345678
// CHECK: calll *3135175374
call *0xbadeface
// CHECK: lcallw *32493
lcallw *0x7eed
// CHECK: jmp 32493
jmp 0x7eed
// CHECK: jmp 3133065982
jmp 0xbabecafe
// CHECK: jmp 305419896
jmp 0x12345678
// CHECK: jmpl *3735928559(%ebx,%ecx,8)
jmp *0xdeadbeef(%ebx,%ecx,8)
// CHECK: jmp 32493
jmp 0x7eed
// CHECK: jmp 3133065982
jmp 0xbabecafe
// CHECK: jmp 305419896
jmp 0x12345678
// CHECK: jmpl *3135175374
jmp *0xbadeface
// CHECK: jmpl *3735928559(%ebx,%ecx,8)
jmp *0xdeadbeef(%ebx,%ecx,8)
// CHECK: jmp 32493
jmp 0x7eed
// CHECK: jmp 3133065982
jmp 0xbabecafe
// CHECK: jmp 305419896
jmp 0x12345678
// CHECK: jmpl *3135175374
jmp *0xbadeface
// CHECK: ljmpl *3735928559(%ebx,%ecx,8)
ljmpl *0xdeadbeef(%ebx,%ecx,8)
// CHECK: ljmpw *32493
ljmpw *0x7eed
// CHECK: ljmpl *3133065982
ljmpl *0xbabecafe
// CHECK: ljmpl *305419896
ljmpl *0x12345678
// CHECK: enter $31438, $127
enter $0x7ace,$0x7f
// CHECK: jo 32493
jo 0x7eed
// CHECK: jo 3133065982
jo 0xbabecafe
// CHECK: jo 305419896
jo 0x12345678
// CHECK: jno 32493
jno 0x7eed
// CHECK: jno 3133065982
jno 0xbabecafe
// CHECK: jno 305419896
jno 0x12345678
// CHECK: jb 32493
jb 0x7eed
// CHECK: jb 3133065982
jb 0xbabecafe
// CHECK: jb 305419896
jb 0x12345678
// CHECK: jae 32493
jae 0x7eed
// CHECK: jae 3133065982
jae 0xbabecafe
// CHECK: jae 305419896
jae 0x12345678
// CHECK: je 32493
je 0x7eed
// CHECK: je 3133065982
je 0xbabecafe
// CHECK: je 305419896
je 0x12345678
// CHECK: jne 32493
jne 0x7eed
// CHECK: jne 3133065982
jne 0xbabecafe
// CHECK: jne 305419896
jne 0x12345678
// CHECK: jbe 32493
jbe 0x7eed
// CHECK: jbe 3133065982
jbe 0xbabecafe
// CHECK: jbe 305419896
jbe 0x12345678
// CHECK: ja 32493
ja 0x7eed
// CHECK: ja 3133065982
ja 0xbabecafe
// CHECK: ja 305419896
ja 0x12345678
// CHECK: js 32493
js 0x7eed
// CHECK: js 3133065982
js 0xbabecafe
// CHECK: js 305419896
js 0x12345678
// CHECK: jns 32493
jns 0x7eed
// CHECK: jns 3133065982
jns 0xbabecafe
// CHECK: jns 305419896
jns 0x12345678
// CHECK: jp 32493
jp 0x7eed
// CHECK: jp 3133065982
jp 0xbabecafe
// CHECK: jp 305419896
jp 0x12345678
// CHECK: jnp 32493
jnp 0x7eed
// CHECK: jnp 3133065982
jnp 0xbabecafe
// CHECK: jnp 305419896
jnp 0x12345678
// CHECK: jl 32493
jl 0x7eed
// CHECK: jl 3133065982
jl 0xbabecafe
// CHECK: jl 305419896
jl 0x12345678
// CHECK: jge 32493
jge 0x7eed
// CHECK: jge 3133065982
jge 0xbabecafe
// CHECK: jge 305419896
jge 0x12345678
// CHECK: jle 32493
jle 0x7eed
// CHECK: jle 3133065982
jle 0xbabecafe
// CHECK: jle 305419896
jle 0x12345678
// CHECK: jg 32493
jg 0x7eed
// CHECK: jg 3133065982
jg 0xbabecafe
// CHECK: jg 305419896
jg 0x12345678
// CHECK: int $127
int $0x7f
// CHECK: pause
pause
// CHECK: sfence
sfence
// CHECK: lfence
lfence
// CHECK: mfence
mfence
// CHECK: monitor
monitor
// CHECK: mwait
mwait
// CHECK: vmcall
vmcall
// CHECK: vmfunc
vmfunc
// CHECK: vmlaunch
vmlaunch
// CHECK: vmresume
vmresume
// CHECK: vmxoff
vmxoff
// CHECK: vmxon 3735928559(%ebx,%ecx,8)
vmxon 0xdeadbeef(%ebx,%ecx,8)
// CHECK: vmxon 32493
vmxon 0x7eed
// CHECK: vmxon 3133065982
vmxon 0xbabecafe
// CHECK: vmxon 305419896
vmxon 0x12345678
// CHECK: vmrun %eax
vmrun %eax
// CHECK: vmmcall
vmmcall
// CHECK: vmload %eax
vmload %eax
// CHECK: vmsave %eax
vmsave %eax
// CHECK: stgi
stgi
// CHECK: clgi
clgi
// CHECK: skinit %eax
skinit %eax
// CHECK: invlpga %eax, %ecx
invlpga %eax, %ecx
// CHECK: blendvps %xmm0, (%eax), %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0x08]
blendvps (%eax), %xmm1
// CHECK: blendvps %xmm0, %xmm2, %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0xca]
blendvps %xmm2, %xmm1
// These instructions can take an unsigned 8-bit mask as well as a signed 8-bit
// immediate. Check both forms here.
// CHECK: blendps $129, %xmm2, %xmm1
blendps $0x81, %xmm2, %xmm1
// CHECK: blendps $192, %xmm2, %xmm1
blendps $-64, %xmm2, %xmm1
// CHECK: blendpd $129, %xmm2, %xmm1
blendpd $0x81, %xmm2, %xmm1
// CHECK: blendpd $192, %xmm2, %xmm1
blendpd $-64, %xmm2, %xmm1
// CHECK: pblendw $129, %xmm2, %xmm1
pblendw $0x81, %xmm2, %xmm1
// CHECK: pblendw $192, %xmm2, %xmm1
pblendw $-64, %xmm2, %xmm1
// CHECK: mpsadbw $129, %xmm2, %xmm1
mpsadbw $0x81, %xmm2, %xmm1
// CHECK: mpsadbw $192, %xmm2, %xmm1
mpsadbw $-64, %xmm2, %xmm1
// CHECK: dpps $129, %xmm2, %xmm1
dpps $0x81, %xmm2, %xmm1
// CHECK: dpps $192, %xmm2, %xmm1
dpps $-64, %xmm2, %xmm1
// CHECK: dppd $129, %xmm2, %xmm1
dppd $0x81, %xmm2, %xmm1
// CHECK: dppd $192, %xmm2, %xmm1
dppd $-64, %xmm2, %xmm1
// CHECK: insertps $129, %xmm2, %xmm1
insertps $0x81, %xmm2, %xmm1
// CHECK: insertps $192, %xmm2, %xmm1
insertps $-64, %xmm2, %xmm1
// PR13253 handle implicit optional third argument that must always be xmm0
// CHECK: pblendvb %xmm0, %xmm2, %xmm1
pblendvb %xmm2, %xmm1
// CHECK: pblendvb %xmm0, %xmm2, %xmm1
pblendvb %xmm0, %xmm2, %xmm1
// CHECK: pblendvb %xmm0, (%eax), %xmm1
pblendvb (%eax), %xmm1
// CHECK: pblendvb %xmm0, (%eax), %xmm1
pblendvb %xmm0, (%eax), %xmm1
// CHECK: blendvpd %xmm0, %xmm2, %xmm1
blendvpd %xmm2, %xmm1
// CHECK: blendvpd %xmm0, %xmm2, %xmm1
blendvpd %xmm0, %xmm2, %xmm1
// CHECK: blendvpd %xmm0, (%eax), %xmm1
blendvpd (%eax), %xmm1
// CHECK: blendvpd %xmm0, (%eax), %xmm1
blendvpd %xmm0, (%eax), %xmm1
// CHECK: blendvps %xmm0, %xmm2, %xmm1
blendvps %xmm2, %xmm1
// CHECK: blendvps %xmm0, %xmm2, %xmm1
blendvps %xmm0, %xmm2, %xmm1
// CHECK: blendvps %xmm0, (%eax), %xmm1
blendvps (%eax), %xmm1
// CHECK: blendvps %xmm0, (%eax), %xmm1
blendvps %xmm0, (%eax), %xmm1
// CHECK: btl $4, (%eax)
// CHECK: btw $4, (%eax)
// CHECK: btl $4, (%eax)
// CHECK: btsl $4, (%eax)
// CHECK: btsw $4, (%eax)
// CHECK: btsl $4, (%eax)
// CHECK: btrl $4, (%eax)
// CHECK: btrw $4, (%eax)
// CHECK: btrl $4, (%eax)
// CHECK: btcl $4, (%eax)
// CHECK: btcw $4, (%eax)
// CHECK: btcl $4, (%eax)
bt $4, (%eax)
btw $4, (%eax)
btl $4, (%eax)
bts $4, (%eax)
btsw $4, (%eax)
btsl $4, (%eax)
btr $4, (%eax)
btrw $4, (%eax)
btrl $4, (%eax)
btc $4, (%eax)
btcw $4, (%eax)
btcl $4, (%eax)
// CHECK: clflushopt 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0xae,0xbc,0xcb,0xef,0xbe,0xad,0xde]
clflushopt 0xdeadbeef(%ebx,%ecx,8)
// CHECK: clflushopt 32493
// CHECK: encoding: [0x66,0x0f,0xae,0x3d,0xed,0x7e,0x00,0x00]
clflushopt 0x7eed
// CHECK: clflushopt 3133065982
// CHECK: encoding: [0x66,0x0f,0xae,0x3d,0xfe,0xca,0xbe,0xba]
clflushopt 0xbabecafe
// CHECK: clflushopt 305419896
// CHECK: encoding: [0x66,0x0f,0xae,0x3d,0x78,0x56,0x34,0x12]
clflushopt 0x12345678
// CHECK: clwb 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x66,0x0f,0xae,0xb4,0xcb,0xef,0xbe,0xad,0xde]
clwb 0xdeadbeef(%ebx,%ecx,8)
// CHECK: clwb 32493
// CHECK: encoding: [0x66,0x0f,0xae,0x35,0xed,0x7e,0x00,0x00]
clwb 0x7eed
// CHECK: clwb 3133065982
// CHECK: encoding: [0x66,0x0f,0xae,0x35,0xfe,0xca,0xbe,0xba]
clwb 0xbabecafe
// CHECK: clwb 305419896
// CHECK: encoding: [0x66,0x0f,0xae,0x35,0x78,0x56,0x34,0x12]
clwb 0x12345678
// CHECK: xsave 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0xa4,0xcb,0xef,0xbe,0xad,0xde]
xsave 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xsave 32493
// CHECK: encoding: [0x0f,0xae,0x25,0xed,0x7e,0x00,0x00]
xsave 0x7eed
// CHECK: xsave 3133065982
// CHECK: encoding: [0x0f,0xae,0x25,0xfe,0xca,0xbe,0xba]
xsave 0xbabecafe
// CHECK: xsave 305419896
// CHECK: encoding: [0x0f,0xae,0x25,0x78,0x56,0x34,0x12]
xsave 0x12345678
// CHECK: xrstor 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0xac,0xcb,0xef,0xbe,0xad,0xde]
xrstor 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xrstor 32493
// CHECK: encoding: [0x0f,0xae,0x2d,0xed,0x7e,0x00,0x00]
xrstor 0x7eed
// CHECK: xrstor 3133065982
// CHECK: encoding: [0x0f,0xae,0x2d,0xfe,0xca,0xbe,0xba]
xrstor 0xbabecafe
// CHECK: xrstor 305419896
// CHECK: encoding: [0x0f,0xae,0x2d,0x78,0x56,0x34,0x12]
xrstor 0x12345678
// CHECK: xsaveopt 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0xb4,0xcb,0xef,0xbe,0xad,0xde]
xsaveopt 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xsaveopt 32493
// CHECK: encoding: [0x0f,0xae,0x35,0xed,0x7e,0x00,0x00]
xsaveopt 0x7eed
// CHECK: xsaveopt 3133065982
// CHECK: encoding: [0x0f,0xae,0x35,0xfe,0xca,0xbe,0xba]
xsaveopt 0xbabecafe
// CHECK: xsaveopt 305419896
// CHECK: encoding: [0x0f,0xae,0x35,0x78,0x56,0x34,0x12]
xsaveopt 0x12345678
// CHECK: xsaves 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0xac,0xcb,0xef,0xbe,0xad,0xde]
xsaves 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xsaves 32493
// CHECK: encoding: [0x0f,0xc7,0x2d,0xed,0x7e,0x00,0x00]
xsaves 0x7eed
// CHECK: xsaves 3133065982
// CHECK: encoding: [0x0f,0xc7,0x2d,0xfe,0xca,0xbe,0xba]
xsaves 0xbabecafe
// CHECK: xsaves 305419896
// CHECK: encoding: [0x0f,0xc7,0x2d,0x78,0x56,0x34,0x12]
xsaves 0x12345678
// CHECK: xsavec 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0xa4,0xcb,0xef,0xbe,0xad,0xde]
xsavec 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xsavec 32493
// CHECK: encoding: [0x0f,0xc7,0x25,0xed,0x7e,0x00,0x00]
xsavec 0x7eed
// CHECK: xsavec 3133065982
// CHECK: encoding: [0x0f,0xc7,0x25,0xfe,0xca,0xbe,0xba]
xsavec 0xbabecafe
// CHECK: xsavec 305419896
// CHECK: encoding: [0x0f,0xc7,0x25,0x78,0x56,0x34,0x12]
xsavec 0x12345678
// CHECK: xrstors 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xc7,0x9c,0xcb,0xef,0xbe,0xad,0xde]
xrstors 0xdeadbeef(%ebx,%ecx,8)
// CHECK: xrstors 32493
// CHECK: encoding: [0x0f,0xc7,0x1d,0xed,0x7e,0x00,0x00]
xrstors 0x7eed
// CHECK: xrstors 3133065982
// CHECK: encoding: [0x0f,0xc7,0x1d,0xfe,0xca,0xbe,0xba]
xrstors 0xbabecafe
// CHECK: xrstors 305419896
// CHECK: encoding: [0x0f,0xc7,0x1d,0x78,0x56,0x34,0x12]
xrstors 0x12345678
// CHECK: getsec
// CHECK: encoding: [0x0f,0x37]
getsec
// CHECK: monitorx
// CHECK: encoding: [0x0f,0x01,0xfa]
monitorx
// CHECK: monitorx
// CHECK: encoding: [0x0f,0x01,0xfa]
monitorx %eax, %ecx, %edx
// CHECK: mwaitx
// CHECK: encoding: [0x0f,0x01,0xfb]
mwaitx
// CHECK: mwaitx
// CHECK: encoding: [0x0f,0x01,0xfb]
mwaitx %eax, %ecx, %ebx
// CHECK: clzero
// CHECK: encoding: [0x0f,0x01,0xfc]
clzero
// CHECK: lock addl %esi, (%edi)
// INTEL: lock add dword ptr [edi], esi
// CHECK: encoding: [0xf0,0x01,0x37]
lock add %esi, (%edi)
// CHECK: cldemote 4(%eax)
// CHECK: encoding: [0x0f,0x1c,0x40,0x04]
cldemote 4(%eax)
// CHECK: cldemote 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0x1c,0x84,0xcb,0xef,0xbe,0xad,0xde]
cldemote 0xdeadbeef(%ebx,%ecx,8)
// CHECK: umonitor %eax
// CHECK: encoding: [0xf3,0x0f,0xae,0xf0]
umonitor %eax
// CHECK: umonitor %ax
// CHECK: encoding: [0x67,0xf3,0x0f,0xae,0xf0]
umonitor %ax
// CHECK: umwait %eax
// CHECK: encoding: [0xf2,0x0f,0xae,0xf0]
umwait %eax
// CHECK: tpause %eax
// CHECK: encoding: [0x66,0x0f,0xae,0xf0]
tpause %eax
// CHECK: movdiri %eax, 64(%edx,%edi)
// CHECK: # encoding: [0x0f,0x38,0xf9,0x44,0x3a,0x40]
movdiri %eax, 64(%edx,%edi)
// CHECK: movdir64b 485498096, %ecx
// CHECK: # encoding: [0x66,0x0f,0x38,0xf8,0x0d,0xf0,0x1c,0xf0,0x1c]
movdir64b 485498096, %ecx
// CHECK: movdir64b 485498096, %cx
// CHECK: # encoding: [0x67,0x66,0x0f,0x38,0xf8,0x0d,0xf0,0x1c,0xf0,0x1c]
movdir64b 485498096, %cx
// CHECK: movdir64b (%edx), %eax
// CHECK: # encoding: [0x66,0x0f,0x38,0xf8,0x02]
movdir64b (%edx), %eax
// CHECK: movdir64b (%esi), %eax
// CHECK: # encoding: [0x66,0x0f,0x38,0xf8,0x06]
movdir64b (%esi), %eax
// CHECK: movdir64b (%si), %ax
// CHECK: # encoding: [0x67,0x66,0x0f,0x38,0xf8,0x04]
movdir64b (%si), %ax
// CHECK: pconfig
// CHECK: # encoding: [0x0f,0x01,0xc5]
pconfig
// CHECK: encls
// CHECK: encoding: [0x0f,0x01,0xcf]
encls
// CHECK: enclu
// CHECK: encoding: [0x0f,0x01,0xd7]
enclu
// CHECK: enclv
// CHECK: encoding: [0x0f,0x01,0xc0]
enclv
Index: vendor/llvm/dist-release_80/test/MC/X86/x86-32.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/x86-32.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/x86-32.s (revision 344166)
@@ -1,1107 +1,1107 @@
// RUN: llvm-mc -triple i386-unknown-unknown --show-encoding %s | FileCheck %s
pause
// CHECK: pause
// CHECK: encoding: [0xf3,0x90]
sfence
// CHECK: sfence
// CHECK: encoding: [0x0f,0xae,0xf8]
lfence
// CHECK: lfence
// CHECK: encoding: [0x0f,0xae,0xe8]
mfence
// CHECK: mfence
// CHECK: encoding: [0x0f,0xae,0xf0]
monitor
// CHECK: monitor
// CHECK: encoding: [0x0f,0x01,0xc8]
monitor %eax, %ecx, %edx
// CHECK: monitor
// CHECK: encoding: [0x0f,0x01,0xc8]
mwait
// CHECK: mwait
// CHECK: encoding: [0x0f,0x01,0xc9]
mwait %eax, %ecx
// CHECK: mwait
// CHECK: encoding: [0x0f,0x01,0xc9]
vmcall
// CHECK: vmcall
// CHECK: encoding: [0x0f,0x01,0xc1]
vmfunc
// CHECK: vmfunc
// CHECK: encoding: [0x0f,0x01,0xd4]
vmlaunch
// CHECK: vmlaunch
// CHECK: encoding: [0x0f,0x01,0xc2]
vmresume
// CHECK: vmresume
// CHECK: encoding: [0x0f,0x01,0xc3]
vmxoff
// CHECK: vmxoff
// CHECK: encoding: [0x0f,0x01,0xc4]
swapgs
// CHECK: swapgs
// CHECK: encoding: [0x0f,0x01,0xf8]
vmrun %eax
// CHECK: vmrun %eax
// CHECK: encoding: [0x0f,0x01,0xd8]
vmmcall
// CHECK: vmmcall
// CHECK: encoding: [0x0f,0x01,0xd9]
vmload %eax
// CHECK: vmload %eax
// CHECK: encoding: [0x0f,0x01,0xda]
vmsave %eax
// CHECK: vmsave %eax
// CHECK: encoding: [0x0f,0x01,0xdb]
stgi
// CHECK: stgi
// CHECK: encoding: [0x0f,0x01,0xdc]
clgi
// CHECK: clgi
// CHECK: encoding: [0x0f,0x01,0xdd]
skinit %eax
// CHECK: skinit %eax
// CHECK: encoding: [0x0f,0x01,0xde]
invlpga %eax, %ecx
// CHECK: invlpga %eax, %ecx
// CHECK: encoding: [0x0f,0x01,0xdf]
rdtscp
// CHECK: rdtscp
// CHECK: encoding: [0x0f,0x01,0xf9]
// CHECK: movl %eax, 16(%ebp) # encoding: [0x89,0x45,0x10]
movl %eax, 16(%ebp)
// CHECK: movl %eax, -16(%ebp) # encoding: [0x89,0x45,0xf0]
movl %eax, -16(%ebp)
// CHECK: testb %bl, %cl # encoding: [0x84,0xd9]
testb %bl, %cl
// CHECK: cmpl %eax, %ebx # encoding: [0x39,0xc3]
cmpl %eax, %ebx
// CHECK: addw %ax, %ax # encoding: [0x66,0x01,0xc0]
addw %ax, %ax
// CHECK: shrl %eax # encoding: [0xd1,0xe8]
shrl $1, %eax
// CHECK: shll %eax # encoding: [0xd1,0xe0]
sall $1, %eax
// CHECK: shll %eax # encoding: [0xd1,0xe0]
sal $1, %eax
// moffset forms of moves, rdar://7947184
movb 0, %al // CHECK: movb 0, %al # encoding: [0xa0,0x00,0x00,0x00,0x00]
movw 0, %ax // CHECK: movw 0, %ax # encoding: [0x66,0xa1,0x00,0x00,0x00,0x00]
movl 0, %eax // CHECK: movl 0, %eax # encoding: [0xa1,0x00,0x00,0x00,0x00]
// rdar://7973775
into
// CHECK: into
// CHECK: encoding: [0xce]
int3
// CHECK: int3
// CHECK: encoding: [0xcc]
int $4
// CHECK: int $4
// CHECK: encoding: [0xcd,0x04]
int $255
// CHECK: int $255
// CHECK: encoding: [0xcd,0xff]
// CHECK: pushfl # encoding: [0x9c]
pushf
// CHECK: pushfl # encoding: [0x9c]
pushfl
// CHECK: popfl # encoding: [0x9d]
popf
// CHECK: popfl # encoding: [0x9d]
popfl
// rdar://8014869
retl
// CHECK: ret
// CHECK: encoding: [0xc3]
// rdar://7973854
// CHECK: cmoval %eax, %edx
// CHECK: encoding: [0x0f,0x47,0xd0]
cmoval %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x0f,0x43,0xd0]
cmovael %eax,%edx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x0f,0x46,0xd0]
cmovbel %eax,%edx
// CHECK: cmovbl %eax, %edx
// CHECK: encoding: [0x0f,0x42,0xd0]
cmovbl %eax,%edx
// CHECK: cmovbw %bx, %bx
cmovnae %bx,%bx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x0f,0x46,0xd0]
cmovbel %eax,%edx
// CHECK: cmovbl %eax, %edx
// CHECK: encoding: [0x0f,0x42,0xd0]
cmovcl %eax,%edx
// CHECK: cmovel %eax, %edx
// CHECK: encoding: [0x0f,0x44,0xd0]
cmovel %eax,%edx
// CHECK: cmovgl %eax, %edx
// CHECK: encoding: [0x0f,0x4f,0xd0]
cmovgl %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x0f,0x4d,0xd0]
cmovgel %eax,%edx
// CHECK: cmovll %eax, %edx
// CHECK: encoding: [0x0f,0x4c,0xd0]
cmovll %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x0f,0x4e,0xd0]
cmovlel %eax,%edx
// CHECK: cmovbel %eax, %edx
// CHECK: encoding: [0x0f,0x46,0xd0]
cmovnal %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x0f,0x43,0xd0]
cmovnbl %eax,%edx
// CHECK: cmoval %eax, %edx
// CHECK: encoding: [0x0f,0x47,0xd0]
cmovnbel %eax,%edx
// CHECK: cmovael %eax, %edx
// CHECK: encoding: [0x0f,0x43,0xd0]
cmovncl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x0f,0x4e,0xd0]
cmovngl %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x0f,0x4d,0xd0]
cmovnl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x0f,0x45,0xd0]
cmovnel %eax,%edx
// CHECK: cmovlel %eax, %edx
// CHECK: encoding: [0x0f,0x4e,0xd0]
cmovngl %eax,%edx
// CHECK: cmovll %eax, %edx
// CHECK: encoding: [0x0f,0x4c,0xd0]
cmovngel %eax,%edx
// CHECK: cmovgel %eax, %edx
// CHECK: encoding: [0x0f,0x4d,0xd0]
cmovnll %eax,%edx
// CHECK: cmovgl %eax, %edx
// CHECK: encoding: [0x0f,0x4f,0xd0]
cmovnlel %eax,%edx
// CHECK: cmovnol %eax, %edx
// CHECK: encoding: [0x0f,0x41,0xd0]
cmovnol %eax,%edx
// CHECK: cmovnpl %eax, %edx
// CHECK: encoding: [0x0f,0x4b,0xd0]
cmovnpl %eax,%edx
// CHECK: cmovnsl %eax, %edx
// CHECK: encoding: [0x0f,0x49,0xd0]
cmovnsl %eax,%edx
// CHECK: cmovnel %eax, %edx
// CHECK: encoding: [0x0f,0x45,0xd0]
cmovnzl %eax,%edx
// CHECK: cmovol %eax, %edx
// CHECK: encoding: [0x0f,0x40,0xd0]
cmovol %eax,%edx
// CHECK: cmovpl %eax, %edx
// CHECK: encoding: [0x0f,0x4a,0xd0]
cmovpl %eax,%edx
// CHECK: cmovsl %eax, %edx
// CHECK: encoding: [0x0f,0x48,0xd0]
cmovsl %eax,%edx
// CHECK: cmovel %eax, %edx
// CHECK: encoding: [0x0f,0x44,0xd0]
cmovzl %eax,%edx
// CHECK: cmpps $0, %xmm0, %xmm1
// CHECK: encoding: [0x0f,0xc2,0xc8,0x00]
cmpps $0, %xmm0, %xmm1
// CHECK: cmpps $0, (%eax), %xmm1
// CHECK: encoding: [0x0f,0xc2,0x08,0x00]
cmpps $0, 0(%eax), %xmm1
// CHECK: cmppd $0, %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0xc2,0xc8,0x00]
cmppd $0, %xmm0, %xmm1
// CHECK: cmppd $0, (%eax), %xmm1
// CHECK: encoding: [0x66,0x0f,0xc2,0x08,0x00]
cmppd $0, 0(%eax), %xmm1
// CHECK: cmpss $0, %xmm0, %xmm1
// CHECK: encoding: [0xf3,0x0f,0xc2,0xc8,0x00]
cmpss $0, %xmm0, %xmm1
// CHECK: cmpss $0, (%eax), %xmm1
// CHECK: encoding: [0xf3,0x0f,0xc2,0x08,0x00]
cmpss $0, 0(%eax), %xmm1
// CHECK: cmpsd $0, %xmm0, %xmm1
// CHECK: encoding: [0xf2,0x0f,0xc2,0xc8,0x00]
cmpsd $0, %xmm0, %xmm1
// CHECK: cmpsd $0, (%eax), %xmm1
// CHECK: encoding: [0xf2,0x0f,0xc2,0x08,0x00]
cmpsd $0, 0(%eax), %xmm1
// Check matching of instructions which embed the SSE comparison code.
// CHECK: cmpeqps %xmm0, %xmm1
// CHECK: encoding: [0x0f,0xc2,0xc8,0x00]
cmpeqps %xmm0, %xmm1
// CHECK: cmpltpd %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0xc2,0xc8,0x01]
cmpltpd %xmm0, %xmm1
// CHECK: cmpless %xmm0, %xmm1
// CHECK: encoding: [0xf3,0x0f,0xc2,0xc8,0x02]
cmpless %xmm0, %xmm1
// CHECK: cmpunordpd %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0xc2,0xc8,0x03]
cmpunordpd %xmm0, %xmm1
// CHECK: cmpneqps %xmm0, %xmm1
// CHECK: encoding: [0x0f,0xc2,0xc8,0x04]
cmpneqps %xmm0, %xmm1
// CHECK: cmpnltpd %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0xc2,0xc8,0x05]
cmpnltpd %xmm0, %xmm1
// CHECK: cmpnless %xmm0, %xmm1
// CHECK: encoding: [0xf3,0x0f,0xc2,0xc8,0x06]
cmpnless %xmm0, %xmm1
// CHECK: cmpordsd %xmm0, %xmm1
// CHECK: encoding: [0xf2,0x0f,0xc2,0xc8,0x07]
cmpordsd %xmm0, %xmm1
// rdar://7995856
// CHECK: fmul %st(0)
// CHECK: encoding: [0xd8,0xc8]
fmul %st(0), %st
// CHECK: fadd %st(0)
// CHECK: encoding: [0xd8,0xc0]
fadd %st(0), %st
// CHECK: fsub %st(0)
// CHECK: encoding: [0xd8,0xe0]
fsub %st(0), %st
// CHECK: fsubr %st(0)
// CHECK: encoding: [0xd8,0xe8]
fsubr %st(0), %st
// CHECK: fdivr %st(0)
// CHECK: encoding: [0xd8,0xf8]
fdivr %st(0), %st
// CHECK: fdiv %st(0)
// CHECK: encoding: [0xd8,0xf0]
fdiv %st(0), %st
// radr://8017519
// CHECK: movl %cs, %eax
// CHECK: encoding: [0x8c,0xc8]
movl %cs, %eax
// CHECK: movw %cs, %ax
// CHECK: encoding: [0x66,0x8c,0xc8]
movw %cs, %ax
// CHECK: movw %cs, (%eax)
// CHECK: encoding: [0x8c,0x08]
mov %cs, (%eax)
// CHECK: movw %cs, (%eax)
// CHECK: encoding: [0x8c,0x08]
movw %cs, (%eax)
// CHECK: movl %eax, %cs
// CHECK: encoding: [0x8e,0xc8]
movl %eax, %cs
// CHECK: movl %eax, %cs
// CHECK: encoding: [0x8e,0xc8]
movw %ax, %cs
// CHECK: movl %eax, %cs
// CHECK: encoding: [0x8e,0xc8]
mov %eax, %cs
// CHECK: movl %eax, %cs
// CHECK: encoding: [0x8e,0xc8]
mov %ax, %cs
// CHECK: movw (%eax), %cs
// CHECK: encoding: [0x8e,0x08]
mov (%eax), %cs
// CHECK: movw (%eax), %cs
// CHECK: encoding: [0x8e,0x08]
movw (%eax), %cs
// radr://8033374
// CHECK: movl %cr0, %eax
// CHECK: encoding: [0x0f,0x20,0xc0]
movl %cr0,%eax
// CHECK: movl %cr1, %eax
// CHECK: encoding: [0x0f,0x20,0xc8]
movl %cr1,%eax
// CHECK: movl %cr2, %eax
// CHECK: encoding: [0x0f,0x20,0xd0]
movl %cr2,%eax
// CHECK: movl %cr3, %eax
// CHECK: encoding: [0x0f,0x20,0xd8]
movl %cr3,%eax
// CHECK: movl %cr4, %eax
// CHECK: encoding: [0x0f,0x20,0xe0]
movl %cr4,%eax
// CHECK: movl %dr0, %eax
// CHECK: encoding: [0x0f,0x21,0xc0]
movl %dr0,%eax
// CHECK: movl %dr1, %eax
// CHECK: encoding: [0x0f,0x21,0xc8]
movl %dr1,%eax
// CHECK: movl %dr1, %eax
// CHECK: encoding: [0x0f,0x21,0xc8]
movl %dr1,%eax
// CHECK: movl %dr2, %eax
// CHECK: encoding: [0x0f,0x21,0xd0]
movl %dr2,%eax
// CHECK: movl %dr3, %eax
// CHECK: encoding: [0x0f,0x21,0xd8]
movl %dr3,%eax
// CHECK: movl %dr4, %eax
// CHECK: encoding: [0x0f,0x21,0xe0]
movl %dr4,%eax
// CHECK: movl %dr5, %eax
// CHECK: encoding: [0x0f,0x21,0xe8]
movl %dr5,%eax
// CHECK: movl %dr6, %eax
// CHECK: encoding: [0x0f,0x21,0xf0]
movl %dr6,%eax
// CHECK: movl %dr7, %eax
// CHECK: encoding: [0x0f,0x21,0xf8]
movl %dr7,%eax
// CHECK: clzero
// CHECK: encoding: [0x0f,0x01,0xfc]
clzero
// CHECK: clzero
// CHECK: encoding: [0x0f,0x01,0xfc]
clzero %eax
// radr://8017522
// CHECK: wait
// CHECK: encoding: [0x9b]
fwait
// rdar://7873482
// CHECK: [0x65,0xa1,0x7c,0x00,0x00,0x00]
movl %gs:124, %eax
// CHECK: [0x65,0xa3,0x7c,0x00,0x00,0x00]
movl %eax, %gs:124
// CHECK: pushal
// CHECK: encoding: [0x60]
pusha
// CHECK: popal
// CHECK: encoding: [0x61]
popa
// CHECK: pushaw
// CHECK: encoding: [0x66,0x60]
pushaw
// CHECK: popaw
// CHECK: encoding: [0x66,0x61]
popaw
// CHECK: pushal
// CHECK: encoding: [0x60]
pushal
// CHECK: popal
// CHECK: encoding: [0x61]
popal
// CHECK: jmpl *8(%eax)
// CHECK: encoding: [0xff,0x60,0x08]
jmp *8(%eax)
// PR7465
// CHECK: lcalll $2, $4660
// CHECK: encoding: [0x9a,0x34,0x12,0x00,0x00,0x02,0x00]
lcalll $0x2, $0x1234
// rdar://8061602
L1:
jcxz L1
// CHECK: jcxz L1
// CHECK: encoding: [0x67,0xe3,A]
jecxz L1
// CHECK: jecxz L1
// CHECK: encoding: [0xe3,A]
// rdar://8403974
iret
// CHECK: iretl
// CHECK: encoding: [0xcf]
iretw
// CHECK: iretw
// CHECK: encoding: [0x66,0xcf]
iretl
// CHECK: iretl
// CHECK: encoding: [0xcf]
// rdar://8403907
sysret
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
sysretl
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
// rdar://8018260
testl %ecx, -24(%ebp)
// CHECK: testl %ecx, -24(%ebp)
testl -24(%ebp), %ecx
// CHECK: testl %ecx, -24(%ebp)
// rdar://8407242
push %cs
// CHECK: pushl %cs
// CHECK: encoding: [0x0e]
push %ds
// CHECK: pushl %ds
// CHECK: encoding: [0x1e]
push %ss
// CHECK: pushl %ss
// CHECK: encoding: [0x16]
push %es
// CHECK: pushl %es
// CHECK: encoding: [0x06]
push %fs
// CHECK: pushl %fs
// CHECK: encoding: [0x0f,0xa0]
push %gs
// CHECK: pushl %gs
// CHECK: encoding: [0x0f,0xa8]
pushw %cs
// CHECK: pushw %cs
// CHECK: encoding: [0x66,0x0e]
pushw %ds
// CHECK: pushw %ds
// CHECK: encoding: [0x66,0x1e]
pushw %ss
// CHECK: pushw %ss
// CHECK: encoding: [0x66,0x16]
pushw %es
// CHECK: pushw %es
// CHECK: encoding: [0x66,0x06]
pushw %fs
// CHECK: pushw %fs
// CHECK: encoding: [0x66,0x0f,0xa0]
pushw %gs
// CHECK: pushw %gs
// CHECK: encoding: [0x66,0x0f,0xa8]
pop %ss
// CHECK: popl %ss
// CHECK: encoding: [0x17]
pop %ds
// CHECK: popl %ds
// CHECK: encoding: [0x1f]
pop %es
// CHECK: popl %es
// CHECK: encoding: [0x07]
// rdar://8408129
pushfd
// CHECK: pushfl
popfd
// CHECK: popfl
pushfl
// CHECK: pushfl
popfl
// CHECK: popfl
// rdar://8416805
setc %bl
setnae %bl
setnb %bl
setnc %bl
setna %bl
setnbe %bl
setpe %bl
setpo %bl
setnge %bl
setnl %bl
setng %bl
setnle %bl
// PR8686
setneb %cl // CHECK: setne %cl
setcb %bl // CHECK: setb %bl
setnaeb %bl // CHECK: setb %bl
// PR8114
out %al, (%dx)
// CHECK: outb %al, %dx
outb %al, (%dx)
// CHECK: outb %al, %dx
out %ax, (%dx)
// CHECK: outw %ax, %dx
outw %ax, (%dx)
// CHECK: outw %ax, %dx
out %eax, (%dx)
// CHECK: outl %eax, %dx
outl %eax, (%dx)
// CHECK: outl %eax, %dx
in (%dx), %al
// CHECK: inb %dx, %al
inb (%dx), %al
// CHECK: inb %dx, %al
in (%dx), %ax
// CHECK: inw %dx, %ax
inw (%dx), %ax
// CHECK: inw %dx, %ax
in (%dx), %eax
// CHECK: inl %dx, %eax
inl (%dx), %eax
// CHECK: inl %dx, %eax
//PR15455
outsb (%esi), (%dx)
// CHECK: outsb (%esi), %dx
outsw (%esi), (%dx)
// CHECK: outsw (%esi), %dx
outsl (%esi), (%dx)
// CHECK: outsl (%esi), %dx
insb (%dx), %es:(%edi)
// CHECK: insb %dx, %es:(%edi)
insw (%dx), %es:(%edi)
// CHECK: insw %dx, %es:(%edi)
insl (%dx), %es:(%edi)
// CHECK: insl %dx, %es:(%edi)
// CHECK: lcalll $31438, $31438
// CHECK: lcalll $31438, $31438
// CHECK: ljmpl $31438, $31438
// CHECK: ljmpl $31438, $31438
calll $0x7ace,$0x7ace
lcalll $0x7ace,$0x7ace
jmpl $0x7ace,$0x7ace
ljmpl $0x7ace,$0x7ace
// CHECK: lcallw $31438, $31438
// CHECK: lcallw $31438, $31438
// CHECK: ljmpw $31438, $31438
// CHECK: ljmpw $31438, $31438
callw $0x7ace,$0x7ace
lcallw $0x7ace,$0x7ace
jmpw $0x7ace,$0x7ace
ljmpw $0x7ace,$0x7ace
// CHECK: lcalll $31438, $31438
// CHECK: lcalll $31438, $31438
// CHECK: ljmpl $31438, $31438
// CHECK: ljmpl $31438, $31438
call $0x7ace,$0x7ace
lcall $0x7ace,$0x7ace
jmp $0x7ace,$0x7ace
ljmp $0x7ace,$0x7ace
// rdar://8456370
// CHECK: calll a
calll a
// CHECK: incb %al # encoding: [0xfe,0xc0]
incb %al
// CHECK: incw %ax # encoding: [0x66,0x40]
incw %ax
// CHECK: incl %eax # encoding: [0x40]
incl %eax
// CHECK: decb %al # encoding: [0xfe,0xc8]
decb %al
// CHECK: decw %ax # encoding: [0x66,0x48]
decw %ax
// CHECK: decl %eax # encoding: [0x48]
decl %eax
// CHECK: pshufw $14, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x0e]
pshufw $14, %mm4, %mm0
// CHECK: pshufw $90, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x5a]
// PR8288
pshufw $90, %mm4, %mm0
// rdar://8416805
// CHECK: aaa
// CHECK: encoding: [0x37]
aaa
// CHECK: aad $1
// CHECK: encoding: [0xd5,0x01]
aad $1
// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad $0xA
// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad
// CHECK: aam $2
// CHECK: encoding: [0xd4,0x02]
aam $2
// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam $0xA
// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam
// CHECK: aas
// CHECK: encoding: [0x3f]
aas
// CHECK: daa
// CHECK: encoding: [0x27]
daa
// CHECK: das
// CHECK: encoding: [0x2f]
das
// CHECK: retw $31438
// CHECK: encoding: [0x66,0xc2,0xce,0x7a]
retw $0x7ace
// CHECK: lretw $31438
// CHECK: encoding: [0x66,0xca,0xce,0x7a]
lretw $0x7ace
// CHECK: bound %bx, 2(%eax)
// CHECK: encoding: [0x66,0x62,0x58,0x02]
bound %bx,2(%eax)
// CHECK: bound %ecx, 4(%ebx)
// CHECK: encoding: [0x62,0x4b,0x04]
bound %ecx,4(%ebx)
// CHECK: arpl %bx, %bx
// CHECK: encoding: [0x63,0xdb]
arpl %bx,%bx
// CHECK: arpl %bx, 6(%ecx)
// CHECK: encoding: [0x63,0x59,0x06]
arpl %bx,6(%ecx)
// CHECK: lgdtw 4(%eax)
// CHECK: encoding: [0x66,0x0f,0x01,0x50,0x04]
lgdtw 4(%eax)
// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdt 4(%eax)
// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdtl 4(%eax)
// CHECK: lidtw 4(%eax)
// CHECK: encoding: [0x66,0x0f,0x01,0x58,0x04]
lidtw 4(%eax)
// CHECK: lidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidt 4(%eax)
// CHECK: lidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidtl 4(%eax)
// CHECK: sgdtw 4(%eax)
// CHECK: encoding: [0x66,0x0f,0x01,0x40,0x04]
sgdtw 4(%eax)
// CHECK: sgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdt 4(%eax)
// CHECK: sgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdtl 4(%eax)
// CHECK: sidtw 4(%eax)
// CHECK: encoding: [0x66,0x0f,0x01,0x48,0x04]
sidtw 4(%eax)
// CHECK: sidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidt 4(%eax)
// CHECK: sidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidtl 4(%eax)
// CHECK: fcompi %st(2)
// CHECK: encoding: [0xdf,0xf2]
fcompi %st(2), %st
// CHECK: fcompi %st(2)
// CHECK: encoding: [0xdf,0xf2]
fcompi %st(2)
// CHECK: fcompi
// CHECK: encoding: [0xdf,0xf1]
fcompi
// CHECK: fucompi %st(2)
// CHECK: encoding: [0xdf,0xea]
fucompi %st(2),%st
// CHECK: fucompi %st(2)
// CHECK: encoding: [0xdf,0xea]
fucompi %st(2)
// CHECK: fucompi
// CHECK: encoding: [0xdf,0xe9]
fucompi
// CHECK: fldcw 32493
// CHECK: encoding: [0xd9,0x2d,0xed,0x7e,0x00,0x00]
fldcww 0x7eed
// CHECK: fldcw 32493
// CHECK: encoding: [0xd9,0x2d,0xed,0x7e,0x00,0x00]
fldcw 0x7eed
// CHECK: fnstcw 32493
// CHECK: encoding: [0xd9,0x3d,0xed,0x7e,0x00,0x00]
fnstcww 0x7eed
// CHECK: fnstcw 32493
// CHECK: encoding: [0xd9,0x3d,0xed,0x7e,0x00,0x00]
fnstcw 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstcww 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstcw 0x7eed
// CHECK: fnstsw 32493
// CHECK: encoding: [0xdd,0x3d,0xed,0x7e,0x00,0x00]
fnstsww 0x7eed
// CHECK: fnstsw 32493
// CHECK: encoding: [0xdd,0x3d,0xed,0x7e,0x00,0x00]
fnstsw 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstsww 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fstsw 0x7eed
// CHECK: verr 32493
// CHECK: encoding: [0x0f,0x00,0x25,0xed,0x7e,0x00,0x00]
verrw 0x7eed
// CHECK: verr 32493
// CHECK: encoding: [0x0f,0x00,0x25,0xed,0x7e,0x00,0x00]
verr 0x7eed
// CHECK: wait
// CHECK: encoding: [0x9b]
fclex
// CHECK: fnclex
// CHECK: encoding: [0xdb,0xe2]
fnclex
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2
// CHECK: ud2
// CHECK: encoding: [0x0f,0x0b]
ud2a
// CHECK: ud2b
// CHECK: encoding: [0x0f,0xb9]
ud2b
// CHECK: loope 0
// CHECK: encoding: [0xe1,A]
loopz 0
// CHECK: loopne 0
// CHECK: encoding: [0xe0,A]
loopnz 0
// CHECK: outsb (%esi), %dx # encoding: [0x6e]
// CHECK: outsb
// CHECK: outsb
outsb
outsb %ds:(%esi), %dx
outsb (%esi), %dx
// CHECK: outsw (%esi), %dx # encoding: [0x66,0x6f]
// CHECK: outsw
// CHECK: outsw
outsw
outsw %ds:(%esi), %dx
outsw (%esi), %dx
// CHECK: outsl (%esi), %dx # encoding: [0x6f]
// CHECK: outsl
outsl
outsl %ds:(%esi), %dx
outsl (%esi), %dx
// CHECK: insb %dx, %es:(%edi) # encoding: [0x6c]
// CHECK: insb
insb
insb %dx, %es:(%edi)
// CHECK: insw %dx, %es:(%edi) # encoding: [0x66,0x6d]
// CHECK: insw
insw
insw %dx, %es:(%edi)
// CHECK: insl %dx, %es:(%edi) # encoding: [0x6d]
// CHECK: insl
insl
insl %dx, %es:(%edi)
// CHECK: movsb (%esi), %es:(%edi) # encoding: [0xa4]
// CHECK: movsb
// CHECK: movsb
movsb
movsb %ds:(%esi), %es:(%edi)
movsb (%esi), %es:(%edi)
// CHECK: movsw (%esi), %es:(%edi) # encoding: [0x66,0xa5]
// CHECK: movsw
// CHECK: movsw
movsw
movsw %ds:(%esi), %es:(%edi)
movsw (%esi), %es:(%edi)
// CHECK: movsl (%esi), %es:(%edi) # encoding: [0xa5]
// CHECK: movsl
// CHECK: movsl
movsl
movsl %ds:(%esi), %es:(%edi)
movsl (%esi), %es:(%edi)
// CHECK: lodsb (%esi), %al # encoding: [0xac]
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
lodsb
lodsb %ds:(%esi), %al
lodsb (%esi), %al
lods %ds:(%esi), %al
lods (%esi), %al
// CHECK: lodsw (%esi), %ax # encoding: [0x66,0xad]
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
lodsw
lodsw %ds:(%esi), %ax
lodsw (%esi), %ax
lods %ds:(%esi), %ax
lods (%esi), %ax
// CHECK: lodsl (%esi), %eax # encoding: [0xad]
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
lodsl
lodsl %ds:(%esi), %eax
lodsl (%esi), %eax
lods %ds:(%esi), %eax
lods (%esi), %eax
// CHECK: stosb %al, %es:(%edi) # encoding: [0xaa]
// CHECK: stosb
// CHECK: stosb
stosb
stosb %al, %es:(%edi)
stos %al, %es:(%edi)
// CHECK: stosw %ax, %es:(%edi) # encoding: [0x66,0xab]
// CHECK: stosw
// CHECK: stosw
stosw
stosw %ax, %es:(%edi)
stos %ax, %es:(%edi)
// CHECK: stosl %eax, %es:(%edi) # encoding: [0xab]
// CHECK: stosl
// CHECK: stosl
stosl
stosl %eax, %es:(%edi)
stos %eax, %es:(%edi)
// CHECK: strw
// CHECK: encoding: [0x66,0x0f,0x00,0xc8]
str %ax
// CHECK: strl
// CHECK: encoding: [0x0f,0x00,0xc8]
str %eax
// PR9378
// CHECK: fsubp
// CHECK: encoding: [0xde,0xe1]
fsubp %st,%st(1)
// PR9164
-// CHECK: fsubp %st(2)
+// CHECK: fsubp %st, %st(2)
// CHECK: encoding: [0xde,0xe2]
fsubp %st, %st(2)
// PR10345
// CHECK: xchgl %eax, %eax
// CHECK: encoding: [0x90]
xchgl %eax, %eax
// CHECK: xchgw %ax, %ax
// CHECK: encoding: [0x66,0x90]
xchgw %ax, %ax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x91]
xchgl %ecx, %eax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x91]
xchgl %eax, %ecx
// CHECK: retw
// CHECK: encoding: [0x66,0xc3]
retw
// CHECK: lretw
// CHECK: encoding: [0x66,0xcb]
lretw
// CHECK: data16
// CHECK: encoding: [0x66]
data16
// CHECK: data16
// CHECK: encoding: [0x66]
// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
data16 lgdt 4(%eax)
// CHECK: rdpid %eax
// CHECK: encoding: [0xf3,0x0f,0xc7,0xf8]
rdpid %eax
// CHECK: ptwritel 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0xf3,0x0f,0xae,0xa4,0xcb,0xef,0xbe,0xad,0xde]
ptwritel 0xdeadbeef(%ebx,%ecx,8)
// CHECK: ptwritel %eax
// CHECK: encoding: [0xf3,0x0f,0xae,0xe0]
ptwritel %eax
Index: vendor/llvm/dist-release_80/test/MC/X86/x86-64.s
===================================================================
--- vendor/llvm/dist-release_80/test/MC/X86/x86-64.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/MC/X86/x86-64.s (revision 344166)
@@ -1,1791 +1,1791 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown -show-encoding %s > %t 2> %t.err
// RUN: FileCheck < %t %s
// RUN: FileCheck --check-prefix=CHECK-STDERR < %t.err %s
monitor
// CHECK: monitor
// CHECK: encoding: [0x0f,0x01,0xc8]
monitor %rax, %rcx, %rdx
// CHECK: monitor
// CHECK: encoding: [0x0f,0x01,0xc8]
mwait
// CHECK: mwait
// CHECK: encoding: [0x0f,0x01,0xc9]
mwait %rax, %rcx
// CHECK: mwait
// CHECK: encoding: [0x0f,0x01,0xc9]
// Suffix inference:
// CHECK: addl $0, %eax
add $0, %eax
// CHECK: addb $255, %al
add $0xFF, %al
// CHECK: orq %rax, %rdx
or %rax, %rdx
// CHECK: shlq $3, %rax
shl $3, %rax
// CHECK: subb %al, %al
subb %al, %al
// CHECK: addl $24, %eax
addl $24, %eax
// CHECK: movl %eax, 10(%ebp)
movl %eax, 10(%ebp)
// CHECK: movl %eax, 10(%ebp,%ebx)
movl %eax, 10(%ebp, %ebx)
// CHECK: movl %eax, 10(%ebp,%ebx,4)
movl %eax, 10(%ebp, %ebx, 4)
// CHECK: movl %eax, 10(,%ebx,4)
movl %eax, 10(, %ebx, 4)
// CHECK: movl 0, %eax
movl 0, %eax
// CHECK: movl $0, %eax
movl $0, %eax
// CHECK: ret
ret
// CHECK: retw
retw
// FIXME: Check that this matches SUB32ri8
// CHECK: subl $1, %eax
subl $1, %eax
// FIXME: Check that this matches SUB32ri8
// CHECK: subl $-1, %eax
subl $-1, %eax
// FIXME: Check that this matches SUB32ri
// CHECK: subl $256, %eax
subl $256, %eax
// FIXME: Check that this matches XOR64ri8
// CHECK: xorq $1, %rax
xorq $1, %rax
// FIXME: Check that this matches XOR64ri32
// CHECK: xorq $256, %rax
xorq $256, %rax
// FIXME: Check that this matches SUB8rr
// CHECK: subb %al, %bl
subb %al, %bl
// FIXME: Check that this matches SUB16rr
// CHECK: subw %ax, %bx
subw %ax, %bx
// FIXME: Check that this matches SUB32rr
// CHECK: subl %eax, %ebx
subl %eax, %ebx
// FIXME: Check that this matches the correct instruction.
// CHECK: callq *%rax
call *%rax
// FIXME: Check that this matches the correct instruction.
// CHECK: shldl %cl, %eax, %ebx
shldl %cl, %eax, %ebx
// CHECK: shll $2, %eax
shll $2, %eax
// CHECK: shll $2, %eax
sall $2, %eax
// CHECK: rep
// CHECK-NEXT: movsb
rep # comment
movsb
// CHECK: rep
// CHECK: insb
rep;insb
// CHECK: rep
// CHECK: outsb
rep;outsb
// CHECK: rep
// CHECK: movsb
rep;movsb
// rdar://8470918
smovb // CHECK: movsb
smovw // CHECK: movsw
smovl // CHECK: movsl
smovq // CHECK: movsq
// rdar://8456361
// CHECK: rep
// CHECK: movsl
rep movsd
// CHECK: rep
// CHECK: lodsb
rep;lodsb
// CHECK: rep
// CHECK: stosb
rep;stosb
// NOTE: repz and repe have the same opcode as rep
// CHECK: rep
// CHECK: cmpsb
repz;cmpsb
// NOTE: repnz has the same opcode as repne
// CHECK: repne
// CHECK: cmpsb
repnz;cmpsb
// NOTE: repe and repz have the same opcode as rep
// CHECK: rep
// CHECK: scasb
repe;scasb
// CHECK: repne
// CHECK: scasb
repne;scasb
// CHECK: lock
// CHECK: cmpxchgb %al, (%ebx)
lock;cmpxchgb %al, 0(%ebx)
// CHECK: cs
// CHECK: movb (%eax), %al
cs;movb 0(%eax), %al
// CHECK: ss
// CHECK: movb (%eax), %al
ss;movb 0(%eax), %al
// CHECK: ds
// CHECK: movb (%eax), %al
ds;movb 0(%eax), %al
// CHECK: es
// CHECK: movb (%eax), %al
es;movb 0(%eax), %al
// CHECK: fs
// CHECK: movb (%eax), %al
fs;movb 0(%eax), %al
// CHECK: gs
// CHECK: movb (%eax), %al
gs;movb 0(%eax), %al
// CHECK: fadd %st(0)
// CHECK: fadd %st(1)
// CHECK: fadd %st(7)
fadd %st(0)
fadd %st(1)
fadd %st(7)
// CHECK: leal 0, %eax
leal 0, %eax
// rdar://7986634 - Insensitivity on opcodes.
// CHECK: int3
INT3
// rdar://8735979 - int $3 -> int3
// CHECK: int3
int $3
// Allow scale factor without index register.
// CHECK: movaps %xmm3, (%esi)
// CHECK-STDERR: warning: scale factor without index register is ignored
movaps %xmm3, (%esi, 2)
// CHECK: imull $12, %eax
imul $12, %eax
// CHECK: imull %ecx, %eax
imull %ecx, %eax
// rdar://8208481
// CHECK: outb %al, $161
outb %al, $161
// CHECK: outw %ax, $128
outw %ax, $128
// CHECK: inb $161, %al
inb $161, %al
// rdar://8017621
// CHECK: pushq $1
push $1
// rdar://9716860
pushq $1
// CHECK: encoding: [0x6a,0x01]
pushq $1111111
// CHECK: encoding: [0x68,0x47,0xf4,0x10,0x00]
// rdar://8017530
// CHECK: sldtw 4
sldt 4
// rdar://8208499
// CHECK: cmovnew %bx, %ax
cmovnz %bx, %ax
// CHECK: cmovneq %rbx, %rax
cmovnzq %rbx, %rax
// rdar://8407928
// CHECK: inb $127, %al
// CHECK: inw %dx, %ax
// CHECK: outb %al, $127
// CHECK: outw %ax, %dx
// CHECK: inl %dx, %eax
inb $0x7f
inw %dx
outb $0x7f
outw %dx
inl %dx
// PR8114
// CHECK: outb %al, %dx
// CHECK: outb %al, %dx
// CHECK: outw %ax, %dx
// CHECK: outw %ax, %dx
// CHECK: outl %eax, %dx
// CHECK: outl %eax, %dx
out %al, (%dx)
outb %al, (%dx)
out %ax, (%dx)
outw %ax, (%dx)
out %eax, (%dx)
outl %eax, (%dx)
// CHECK: inb %dx, %al
// CHECK: inb %dx, %al
// CHECK: inw %dx, %ax
// CHECK: inw %dx, %ax
// CHECK: inl %dx, %eax
// CHECK: inl %dx, %eax
in (%dx), %al
inb (%dx), %al
in (%dx), %ax
inw (%dx), %ax
in (%dx), %eax
inl (%dx), %eax
//PR15455
outsb (%rsi), (%dx)
// CHECK: outsb (%rsi), %dx
outsw (%rsi), (%dx)
// CHECK: outsw (%rsi), %dx
outsl (%rsi), (%dx)
// CHECK: outsl (%rsi), %dx
insb (%dx), %es:(%rdi)
// CHECK: insb %dx, %es:(%rdi)
insw (%dx), %es:(%rdi)
// CHECK: insw %dx, %es:(%rdi)
insl (%dx), %es:(%rdi)
// CHECK: insl %dx, %es:(%rdi)
// rdar://8431422
// CHECK: fxch %st(1)
// CHECK: fucom %st(1)
// CHECK: fucomp %st(1)
-// CHECK: faddp %st(1)
-// CHECK: faddp %st(0)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: faddp %st, %st(0)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
fxch
fucom
fucomp
faddp
faddp %st
fsubp
fsubrp
fmulp
fdivp
fdivrp
// CHECK: fcomi %st(1)
// CHECK: fcomi %st(2)
// CHECK: fucomi %st(1)
// CHECK: fucomi %st(2)
// CHECK: fucomi %st(2)
fcomi
fcomi %st(2)
fucomi
fucomi %st(2)
fucomi %st(2), %st
// CHECK: fnstsw %ax
// CHECK: fnstsw %ax
fnstsw
fnstsw %ax
// rdar://8431880
// CHECK: rclb %bl
// CHECK: rcll 3735928559(%ebx,%ecx,8)
// CHECK: rcrl %ecx
// CHECK: rcrl 305419896
rcl %bl
rcll 0xdeadbeef(%ebx,%ecx,8)
rcr %ecx
rcrl 0x12345678
rclb %bl // CHECK: rclb %bl # encoding: [0xd0,0xd3]
rclb $1, %bl // CHECK: rclb %bl # encoding: [0xd0,0xd3]
rclb $2, %bl // CHECK: rclb $2, %bl # encoding: [0xc0,0xd3,0x02]
// rdar://8418316
// PR12173
// CHECK: shldw %cl, %bx, %dx
// CHECK: shldw %cl, %bx, %dx
// CHECK: shldw $1, %bx, %dx
// CHECK: shldw %cl, %bx, (%rax)
// CHECK: shldw %cl, %bx, (%rax)
// CHECK: shrdw %cl, %bx, %dx
// CHECK: shrdw %cl, %bx, %dx
// CHECK: shrdw $1, %bx, %dx
// CHECK: shrdw %cl, %bx, (%rax)
// CHECK: shrdw %cl, %bx, (%rax)
shld %bx, %dx
shld %cl, %bx, %dx
shld $1, %bx, %dx
shld %bx, (%rax)
shld %cl, %bx, (%rax)
shrd %bx, %dx
shrd %cl, %bx, %dx
shrd $1, %bx, %dx
shrd %bx, (%rax)
shrd %cl, %bx, (%rax)
// CHECK: sldtl %ecx
// CHECK: encoding: [0x0f,0x00,0xc1]
// CHECK: sldtw %cx
// CHECK: encoding: [0x66,0x0f,0x00,0xc1]
sldt %ecx
sldt %cx
// CHECK: lcalll *3135175374
// CHECK: ljmpl *3135175374
// CHECK: lcalll *(%rax)
// CHECK: ljmpl *(%rax)
lcall *0xbadeface
ljmp *0xbadeface
lcall *(%rax)
ljmpl *(%rax)
// rdar://8444631
// CHECK: enter $31438, $0
// CHECK: encoding: [0xc8,0xce,0x7a,0x00]
// CHECK: enter $31438, $1
// CHECK: encoding: [0xc8,0xce,0x7a,0x01]
// CHECK: enter $31438, $127
// CHECK: encoding: [0xc8,0xce,0x7a,0x7f]
enter $0x7ace,$0
enter $0x7ace,$1
enter $0x7ace,$0x7f
// rdar://8456364
// CHECK: movw %cs, %ax
mov %cs, %ax
// rdar://8456391
-fcmovb %st(1), %st(0) // CHECK: fcmovb %st(1), %st(0)
-fcmove %st(1), %st(0) // CHECK: fcmove %st(1), %st(0)
-fcmovbe %st(1), %st(0) // CHECK: fcmovbe %st(1), %st(0)
-fcmovu %st(1), %st(0) // CHECK: fcmovu %st(1), %st(0)
+fcmovb %st(1), %st // CHECK: fcmovb %st(1), %st
+fcmove %st(1), %st // CHECK: fcmove %st(1), %st
+fcmovbe %st(1), %st // CHECK: fcmovbe %st(1), %st
+fcmovu %st(1), %st // CHECK: fcmovu %st(1), %st
-fcmovnb %st(1), %st(0) // CHECK: fcmovnb %st(1), %st(0)
-fcmovne %st(1), %st(0) // CHECK: fcmovne %st(1), %st(0)
-fcmovnbe %st(1), %st(0) // CHECK: fcmovnbe %st(1), %st(0)
-fcmovnu %st(1), %st(0) // CHECK: fcmovnu %st(1), %st(0)
+fcmovnb %st(1), %st // CHECK: fcmovnb %st(1), %st
+fcmovne %st(1), %st // CHECK: fcmovne %st(1), %st
+fcmovnbe %st(1), %st // CHECK: fcmovnbe %st(1), %st
+fcmovnu %st(1), %st // CHECK: fcmovnu %st(1), %st
-fcmovnae %st(1), %st(0) // CHECK: fcmovb %st(1), %st(0)
-fcmovna %st(1), %st(0) // CHECK: fcmovbe %st(1), %st(0)
+fcmovnae %st(1), %st // CHECK: fcmovb %st(1), %st
+fcmovna %st(1), %st // CHECK: fcmovbe %st(1), %st
-fcmovae %st(1), %st(0) // CHECK: fcmovnb %st(1), %st(0)
-fcmova %st(1), %st(0) // CHECK: fcmovnbe %st(1), %st(0)
+fcmovae %st(1), %st // CHECK: fcmovnb %st(1), %st
+fcmova %st(1), %st // CHECK: fcmovnbe %st(1), %st
// rdar://8456417
.byte (88 + 1) & 15 // CHECK: .byte 9
// rdar://8456412
mov %rdx, %cr0
// CHECK: movq %rdx, %cr0
// CHECK: encoding: [0x0f,0x22,0xc2]
mov %rdx, %cr4
// CHECK: movq %rdx, %cr4
// CHECK: encoding: [0x0f,0x22,0xe2]
mov %rdx, %cr8
// CHECK: movq %rdx, %cr8
// CHECK: encoding: [0x44,0x0f,0x22,0xc2]
mov %rdx, %cr15
// CHECK: movq %rdx, %cr15
// CHECK: encoding: [0x44,0x0f,0x22,0xfa]
mov %rdx, %dr15
// CHECK: movq %rdx, %dr15
// CHECK: encoding: [0x44,0x0f,0x23,0xfa]
mov %rdx, %db15
// CHECK: movq %rdx, %dr15
// CHECK: encoding: [0x44,0x0f,0x23,0xfa]
// rdar://8456371 - Handle commutable instructions written backward.
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(2)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(2)
faddp %st, %st(1)
fmulp %st, %st(2)
// rdar://8468087 - Encode these accurately, they are not synonyms.
-// CHECK: fmul %st(0), %st(1)
+// CHECK: fmul %st, %st(1)
// CHECK: encoding: [0xdc,0xc9]
// CHECK: fmul %st(1)
// CHECK: encoding: [0xd8,0xc9]
fmul %st, %st(1)
fmul %st(1), %st
-// CHECK: fadd %st(0), %st(1)
+// CHECK: fadd %st, %st(1)
// CHECK: encoding: [0xdc,0xc1]
// CHECK: fadd %st(1)
// CHECK: encoding: [0xd8,0xc1]
fadd %st, %st(1)
fadd %st(1), %st
// rdar://8416805
// CHECK: xorb %al, %al
// CHECK: encoding: [0x30,0xc0]
// CHECK: xorw %di, %di
// CHECK: encoding: [0x66,0x31,0xff]
// CHECK: xorl %esi, %esi
// CHECK: encoding: [0x31,0xf6]
// CHECK: xorq %rsi, %rsi
// CHECK: encoding: [0x48,0x31,0xf6]
clrb %al
clr %di
clr %esi
clr %rsi
// rdar://8456378
cltq // CHECK: cltq
cdqe // CHECK: cltq
cwde // CHECK: cwtl
cwtl // CHECK: cwtl
// rdar://8416805
cbw // CHECK: cbtw
cwd // CHECK: cwtd
cdq // CHECK: cltd
cqo // CHECK: cqto
// rdar://8456378 and PR7557 - fstsw
fstsw %ax
// CHECK: wait
// CHECK: fnstsw
fstsw (%rax)
// CHECK: wait
// CHECK: fnstsw (%rax)
// PR8259
fstcw (%rsp)
// CHECK: wait
// CHECK: fnstcw (%rsp)
// PR8259
fstcw (%rsp)
// CHECK: wait
// CHECK: fnstcw (%rsp)
// PR8258
finit
// CHECK: wait
// CHECK: fninit
fsave 32493
// CHECK: wait
// CHECK: fnsave 32493
// rdar://8456382 - cvtsd2si support.
cvtsd2si %xmm1, %rax
// CHECK: cvtsd2si %xmm1, %rax
// CHECK: encoding: [0xf2,0x48,0x0f,0x2d,0xc1]
cvtsd2si %xmm1, %eax
// CHECK: cvtsd2si %xmm1, %eax
// CHECK: encoding: [0xf2,0x0f,0x2d,0xc1]
cvtsd2siq %xmm0, %rax // CHECK: cvtsd2si %xmm0, %rax
cvtsd2sil %xmm0, %eax // CHECK: cvtsd2si %xmm0, %eax
cvtsd2si %xmm0, %rax // CHECK: cvtsd2si %xmm0, %rax
cvttpd2dq %xmm1, %xmm0 // CHECK: cvttpd2dq %xmm1, %xmm0
cvttpd2dq (%rax), %xmm0 // CHECK: cvttpd2dq (%rax), %xmm0
cvttps2dq %xmm1, %xmm0 // CHECK: cvttps2dq %xmm1, %xmm0
cvttps2dq (%rax), %xmm0 // CHECK: cvttps2dq (%rax), %xmm0
// rdar://8456376 - llvm-mc rejects 'roundss'
roundss $0xE, %xmm0, %xmm0 // CHECK: encoding: [0x66,0x0f,0x3a,0x0a,0xc0,0x0e]
roundps $0xE, %xmm0, %xmm0 // CHECK: encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x0e]
roundsd $0xE, %xmm0, %xmm0 // CHECK: encoding: [0x66,0x0f,0x3a,0x0b,0xc0,0x0e]
roundpd $0xE, %xmm0, %xmm0 // CHECK: encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x0e]
// rdar://8482675 - 32-bit mem operand support in 64-bit mode (0x67 prefix)
leal 8(%eax), %esi
// CHECK: leal 8(%eax), %esi
// CHECK: encoding: [0x67,0x8d,0x70,0x08]
leaq 8(%eax), %rsi
// CHECK: leaq 8(%eax), %rsi
// CHECK: encoding: [0x67,0x48,0x8d,0x70,0x08]
leaq 8(%rax), %rsi
// CHECK: leaq 8(%rax), %rsi
// CHECK: encoding: [0x48,0x8d,0x70,0x08]
cvttpd2dq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvttpd2dq 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x67,0x66,0x0f,0xe6,0xac,0xcb,0xef,0xbe,0xad,0xde]
// rdar://8490728 - llvm-mc rejects 'movmskpd'
movmskpd %xmm6, %rax
// CHECK: movmskpd %xmm6, %eax
// CHECK: encoding: [0x66,0x0f,0x50,0xc6]
movmskpd %xmm6, %eax
// CHECK: movmskpd %xmm6, %eax
// CHECK: encoding: [0x66,0x0f,0x50,0xc6]
// rdar://8491845 - Gas supports commuted forms of non-commutable instructions.
-fdivrp %st(0), %st(1) // CHECK: encoding: [0xde,0xf9]
-fdivrp %st(1), %st(0) // CHECK: encoding: [0xde,0xf9]
+fdivrp %st, %st(1) // CHECK: encoding: [0xde,0xf9]
+fdivrp %st(1), %st // CHECK: encoding: [0xde,0xf9]
-fsubrp %st(0), %st(1) // CHECK: encoding: [0xde,0xe9]
-fsubrp %st(1), %st(0) // CHECK: encoding: [0xde,0xe9]
+fsubrp %st, %st(1) // CHECK: encoding: [0xde,0xe9]
+fsubrp %st(1), %st // CHECK: encoding: [0xde,0xe9]
// also PR8861
-fdivp %st(0), %st(1) // CHECK: encoding: [0xde,0xf1]
-fdivp %st(1), %st(0) // CHECK: encoding: [0xde,0xf1]
+fdivp %st, %st(1) // CHECK: encoding: [0xde,0xf1]
+fdivp %st(1), %st // CHECK: encoding: [0xde,0xf1]
movl foo(%rip), %eax
// CHECK: movl foo(%rip), %eax
// CHECK: encoding: [0x8b,0x05,A,A,A,A]
// CHECK: fixup A - offset: 2, value: foo-4, kind: reloc_riprel_4byte
movb $12, foo(%rip)
// CHECK: movb $12, foo(%rip)
// CHECK: encoding: [0xc6,0x05,A,A,A,A,0x0c]
// CHECK: fixup A - offset: 2, value: foo-5, kind: reloc_riprel_4byte
movw $12, foo(%rip)
// CHECK: movw $12, foo(%rip)
// CHECK: encoding: [0x66,0xc7,0x05,A,A,A,A,0x0c,0x00]
// CHECK: fixup A - offset: 3, value: foo-6, kind: reloc_riprel_4byte
movl $12, foo(%rip)
// CHECK: movl $12, foo(%rip)
// CHECK: encoding: [0xc7,0x05,A,A,A,A,0x0c,0x00,0x00,0x00]
// CHECK: fixup A - offset: 2, value: foo-8, kind: reloc_riprel_4byte
// rdar://37247000
movl $12, 1024(%rip)
// CHECK: movl $12, 1024(%rip)
// CHECK: encoding: [0xc7,0x05,0x00,0x04,0x00,0x00,0x0c,0x00,0x00,0x00]
movq $12, foo(%rip)
// CHECK: movq $12, foo(%rip)
// CHECK: encoding: [0x48,0xc7,0x05,A,A,A,A,0x0c,0x00,0x00,0x00]
// CHECK: fixup A - offset: 3, value: foo-8, kind: reloc_riprel_4byte
movl foo(%eip), %eax
// CHECK: movl foo(%eip), %eax
// CHECK: encoding: [0x67,0x8b,0x05,A,A,A,A]
// CHECK: fixup A - offset: 3, value: foo-4, kind: reloc_riprel_4byte
movb $12, foo(%eip)
// CHECK: movb $12, foo(%eip)
// CHECK: encoding: [0x67,0xc6,0x05,A,A,A,A,0x0c]
// CHECK: fixup A - offset: 3, value: foo-5, kind: reloc_riprel_4byte
movw $12, foo(%eip)
// CHECK: movw $12, foo(%eip)
// CHECK: encoding: [0x67,0x66,0xc7,0x05,A,A,A,A,0x0c,0x00]
// CHECK: fixup A - offset: 4, value: foo-6, kind: reloc_riprel_4byte
movl $12, foo(%eip)
// CHECK: movl $12, foo(%eip)
// CHECK: encoding: [0x67,0xc7,0x05,A,A,A,A,0x0c,0x00,0x00,0x00]
// CHECK: fixup A - offset: 3, value: foo-8, kind: reloc_riprel_4byte
movq $12, foo(%eip)
// CHECK: movq $12, foo(%eip)
// CHECK: encoding: [0x67,0x48,0xc7,0x05,A,A,A,A,0x0c,0x00,0x00,0x00]
// CHECK: fixup A - offset: 4, value: foo-8, kind: reloc_riprel_4byte
// CHECK: addq $-424, %rax
// CHECK: encoding: [0x48,0x05,0x58,0xfe,0xff,0xff]
addq $-424, %rax
// CHECK: movq _foo@GOTPCREL(%rip), %rax
// CHECK: encoding: [0x48,0x8b,0x05,A,A,A,A]
// CHECK: fixup A - offset: 3, value: _foo@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
movq _foo@GOTPCREL(%rip), %rax
// CHECK: movq _foo@GOTPCREL(%rip), %r14
// CHECK: encoding: [0x4c,0x8b,0x35,A,A,A,A]
// CHECK: fixup A - offset: 3, value: _foo@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
movq _foo@GOTPCREL(%rip), %r14
// CHECK: movq _foo@GOTPCREL(%eip), %rax
// CHECK: encoding: [0x67,0x48,0x8b,0x05,A,A,A,A]
// CHECK: fixup A - offset: 4, value: _foo@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
movq _foo@GOTPCREL(%eip), %rax
// CHECK: movq _foo@GOTPCREL(%eip), %r14
// CHECK: encoding: [0x67,0x4c,0x8b,0x35,A,A,A,A]
// CHECK: fixup A - offset: 4, value: _foo@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
movq _foo@GOTPCREL(%eip), %r14
// CHECK: movq (%r13,%rax,8), %r13
// CHECK: encoding: [0x4d,0x8b,0x6c,0xc5,0x00]
movq 0x00(%r13,%rax,8),%r13
// CHECK: testq %rax, %rbx
// CHECK: encoding: [0x48,0x85,0xc3]
testq %rax, %rbx
// CHECK: cmpq %rbx, %r14
// CHECK: encoding: [0x49,0x39,0xde]
cmpq %rbx, %r14
// rdar://7947167
movsq
// CHECK: movsq
// CHECK: encoding: [0x48,0xa5]
movsl
// CHECK: movsl
// CHECK: encoding: [0xa5]
stosq
// CHECK: stosq
// CHECK: encoding: [0x48,0xab]
stosl
// CHECK: stosl
// CHECK: encoding: [0xab]
// Not moffset forms of moves, they are x86-32 only! rdar://7947184
movb 0, %al // CHECK: movb 0, %al # encoding: [0x8a,0x04,0x25,0x00,0x00,0x00,0x00]
movw 0, %ax // CHECK: movw 0, %ax # encoding: [0x66,0x8b,0x04,0x25,0x00,0x00,0x00,0x00]
movl 0, %eax // CHECK: movl 0, %eax # encoding: [0x8b,0x04,0x25,0x00,0x00,0x00,0x00]
// CHECK: pushfq # encoding: [0x9c]
pushf
// CHECK: pushfq # encoding: [0x9c]
pushfq
// CHECK: popfq # encoding: [0x9d]
popf
// CHECK: popfq # encoding: [0x9d]
popfq
// CHECK: movabsq $-281474976710654, %rax
// CHECK: encoding: [0x48,0xb8,0x02,0x00,0x00,0x00,0x00,0x00,0xff,0xff]
movabsq $0xFFFF000000000002, %rax
// CHECK: movabsq $-281474976710654, %rax
// CHECK: encoding: [0x48,0xb8,0x02,0x00,0x00,0x00,0x00,0x00,0xff,0xff]
movq $0xFFFF000000000002, %rax
// CHECK: movq $-65536, %rax
// CHECK: encoding: [0x48,0xc7,0xc0,0x00,0x00,0xff,0xff]
movq $0xFFFFFFFFFFFF0000, %rax
// CHECK: movq $-256, %rax
// CHECK: encoding: [0x48,0xc7,0xc0,0x00,0xff,0xff,0xff]
movq $0xFFFFFFFFFFFFFF00, %rax
// CHECK: movq $10, %rax
// CHECK: encoding: [0x48,0xc7,0xc0,0x0a,0x00,0x00,0x00]
movq $10, %rax
// CHECK: movabsb -6066930261531658096, %al
// CHECK: encoding: [0xa0,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsb 0xabcdef1234567890,%al
// CHECK: movabsw -6066930261531658096, %ax
// CHECK: encoding: [0x66,0xa1,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsw 0xabcdef1234567890,%ax
// CHECK: movabsl -6066930261531658096, %eax
// CHECK: encoding: [0xa1,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsl 0xabcdef1234567890,%eax
// CHECK: movabsq -6066930261531658096, %rax
// CHECK: encoding: [0x48,0xa1,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsq 0xabcdef1234567890, %rax
// CHECK: movabsb %al, -6066930261531658096
// CHECK: encoding: [0xa2,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsb %al,0xabcdef1234567890
// CHECK: movabsw %ax, -6066930261531658096
// CHECK: encoding: [0x66,0xa3,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsw %ax,0xabcdef1234567890
// CHECK: movabsl %eax, -6066930261531658096
// CHECK: encoding: [0xa3,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsl %eax,0xabcdef1234567890
// CHECK: movabsq %rax, -6066930261531658096
// CHECK: encoding: [0x48,0xa3,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab]
movabsq %rax,0xabcdef1234567890
// rdar://8014869
//
// CHECK: ret
// CHECK: encoding: [0xc3]
retq
// CHECK: sete %al
// CHECK: encoding: [0x0f,0x94,0xc0]
setz %al
// CHECK: setne %al
// CHECK: encoding: [0x0f,0x95,0xc0]
setnz %al
// CHECK: je 0
// CHECK: encoding: [0x74,A]
jz 0
// CHECK: jne
// CHECK: encoding: [0x75,A]
jnz 0
// PR9264
btl $1, 0 // CHECK: btl $1, 0 # encoding: [0x0f,0xba,0x24,0x25,0x00,0x00,0x00,0x00,0x01]
bt $1, 0 // CHECK: btl $1, 0 # encoding: [0x0f,0xba,0x24,0x25,0x00,0x00,0x00,0x00,0x01]
// rdar://8017515
btq $0x01,%rdx
// CHECK: btq $1, %rdx
// CHECK: encoding: [0x48,0x0f,0xba,0xe2,0x01]
//rdar://8017633
// CHECK: movzbl %al, %esi
// CHECK: encoding: [0x0f,0xb6,0xf0]
movzx %al, %esi
// CHECK: movzbq %al, %rsi
// CHECK: encoding: [0x48,0x0f,0xb6,0xf0]
movzx %al, %rsi
// CHECK: movsbw %al, %ax
// CHECK: encoding: [0x66,0x0f,0xbe,0xc0]
movsx %al, %ax
// CHECK: movsbl %al, %eax
// CHECK: encoding: [0x0f,0xbe,0xc0]
movsx %al, %eax
// CHECK: movswl %ax, %eax
// CHECK: encoding: [0x0f,0xbf,0xc0]
movsx %ax, %eax
// CHECK: movsbq %bl, %rax
// CHECK: encoding: [0x48,0x0f,0xbe,0xc3]
movsx %bl, %rax
// CHECK: movswq %cx, %rax
// CHECK: encoding: [0x48,0x0f,0xbf,0xc1]
movsx %cx, %rax
// CHECK: movslq %edi, %rax
// CHECK: encoding: [0x48,0x63,0xc7]
movsx %edi, %rax
// CHECK: movzbw %al, %ax
// CHECK: encoding: [0x66,0x0f,0xb6,0xc0]
movzx %al, %ax
// CHECK: movzbl %al, %eax
// CHECK: encoding: [0x0f,0xb6,0xc0]
movzx %al, %eax
// CHECK: movzwl %ax, %eax
// CHECK: encoding: [0x0f,0xb7,0xc0]
movzx %ax, %eax
// CHECK: movzbq %bl, %rax
// CHECK: encoding: [0x48,0x0f,0xb6,0xc3]
movzx %bl, %rax
// CHECK: movzwq %cx, %rax
// CHECK: encoding: [0x48,0x0f,0xb7,0xc1]
movzx %cx, %rax
// CHECK: movsbw (%rax), %ax
// CHECK: encoding: [0x66,0x0f,0xbe,0x00]
movsx (%rax), %ax
// CHECK: movzbw (%rax), %ax
// CHECK: encoding: [0x66,0x0f,0xb6,0x00]
movzx (%rax), %ax
// rdar://7873482
// CHECK: [0x65,0x8b,0x04,0x25,0x7c,0x00,0x00,0x00]
movl %gs:124, %eax
// CHECK: jmpq *8(%rax)
// CHECK: encoding: [0xff,0x60,0x08]
jmp *8(%rax)
// CHECK: btq $61, -216(%rbp)
// CHECK: encoding: [0x48,0x0f,0xba,0xa5,0x28,0xff,0xff,0xff,0x3d]
btq $61, -216(%rbp)
// rdar://8061602
L1:
jecxz L1
// CHECK: jecxz L1
// CHECK: encoding: [0x67,0xe3,A]
jrcxz L1
// CHECK: jrcxz L1
// CHECK: encoding: [0xe3,A]
// PR8061
xchgl 368(%rax),%ecx
// CHECK: xchgl %ecx, 368(%rax)
xchgl %ecx, 368(%rax)
// CHECK: xchgl %ecx, 368(%rax)
// rdar://8407548
xchg 0xdeadbeef(%rbx,%rcx,8),%bl
// CHECK: xchgb %bl, 3735928559(%rbx,%rcx,8)
// PR7254
lock incl 1(%rsp)
// CHECK: lock
// CHECK: incl 1(%rsp)
// rdar://8741045
lock/incl 1(%rsp)
// CHECK: lock
// CHECK: incl 1(%rsp)
lock addq %rsi, (%rdi)
// CHECK: lock
// CHECK: addq %rsi, (%rdi)
// CHECK: encoding: [0xf0,0x48,0x01,0x37]
lock subq %rsi, (%rdi)
// CHECK: lock
// CHECK: subq %rsi, (%rdi)
// CHECK: encoding: [0xf0,0x48,0x29,0x37]
lock andq %rsi, (%rdi)
// CHECK: lock
// CHECK: andq %rsi, (%rdi)
// CHECK: encoding: [0xf0,0x48,0x21,0x37]
lock orq %rsi, (%rdi)
// CHECK: lock
// CHECK: orq %rsi, (%rdi)
// CHECK: encoding: [0xf0,0x48,0x09,0x37]
lock xorq %rsi, (%rdi)
// CHECK: lock
// CHECK: xorq %rsi, (%rdi)
// CHECK: encoding: [0xf0,0x48,0x31,0x37]
xacquire lock addq %rax, (%rax)
// CHECK: xacquire
// CHECK: encoding: [0xf2]
// CHECK: lock
// CHECK: addq %rax, (%rax)
// CHECK: encoding: [0xf0,0x48,0x01,0x00]
xrelease lock addq %rax, (%rax)
// CHECK: xrelease
// CHECK: encoding: [0xf3]
// CHECK: lock
// CHECK: addq %rax, (%rax)
// CHECK: encoding: [0xf0,0x48,0x01,0x00]
// rdar://8033482
rep movsl
// CHECK: rep
// CHECK: movsl
// CHECK: encoding: [0xf3,0xa5]
// rdar://8403974
iret
// CHECK: iretl
// CHECK: encoding: [0xcf]
iretw
// CHECK: iretw
// CHECK: encoding: [0x66,0xcf]
iretl
// CHECK: iretl
// CHECK: encoding: [0xcf]
iretq
// CHECK: iretq
// CHECK: encoding: [0x48,0xcf]
// rdar://8416805
// CHECK: retw $31438
// CHECK: encoding: [0x66,0xc2,0xce,0x7a]
retw $0x7ace
// CHECK: lretw $31438
// CHECK: encoding: [0x66,0xca,0xce,0x7a]
lretw $0x7ace
// PR8592
lretq // CHECK: lretq # encoding: [0x48,0xcb]
lretl // CHECK: lretl # encoding: [0xcb]
lret // CHECK: lretl # encoding: [0xcb]
lretw // CHECK: lretw # encoding: [0x66,0xcb]
// rdar://8403907
sysret
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
sysretl
// CHECK: sysretl
// CHECK: encoding: [0x0f,0x07]
sysretq
// CHECK: sysretq
// CHECK: encoding: [0x48,0x0f,0x07]
// rdar://8407242
push %fs
// CHECK: pushq %fs
// CHECK: encoding: [0x0f,0xa0]
push %gs
// CHECK: pushq %gs
// CHECK: encoding: [0x0f,0xa8]
pushw %fs
// CHECK: pushw %fs
// CHECK: encoding: [0x66,0x0f,0xa0]
pushw %gs
// CHECK: pushw %gs
// CHECK: encoding: [0x66,0x0f,0xa8]
pop %fs
// CHECK: popq %fs
// CHECK: encoding: [0x0f,0xa1]
pop %gs
// CHECK: popq %gs
// CHECK: encoding: [0x0f,0xa9]
popw %fs
// CHECK: popw %fs
// CHECK: encoding: [0x66,0x0f,0xa1]
popw %gs
// CHECK: popw %gs
// CHECK: encoding: [0x66,0x0f,0xa9]
// rdar://8438816
fildq -8(%rsp)
fildll -8(%rsp)
// CHECK: fildll -8(%rsp)
// CHECK: encoding: [0xdf,0x6c,0x24,0xf8]
// CHECK: fildll -8(%rsp)
// CHECK: encoding: [0xdf,0x6c,0x24,0xf8]
// CHECK: callq a
callq a
// CHECK: leaq -40(%rbp), %r15
leaq -40(%rbp), %r15
// rdar://8013734 - Alias dr6=db6
mov %dr6, %rax
mov %db6, %rax
// CHECK: movq %dr6, %rax
// CHECK: movq %dr6, %rax
// INC/DEC encodings.
incb %al // CHECK: incb %al # encoding: [0xfe,0xc0]
incw %ax // CHECK: incw %ax # encoding: [0x66,0xff,0xc0]
incl %eax // CHECK: incl %eax # encoding: [0xff,0xc0]
decb %al // CHECK: decb %al # encoding: [0xfe,0xc8]
decw %ax // CHECK: decw %ax # encoding: [0x66,0xff,0xc8]
decl %eax // CHECK: decl %eax # encoding: [0xff,0xc8]
// rdar://8416805
// CHECK: lgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdt 4(%rax)
// CHECK: lgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdtq 4(%rax)
// CHECK: lidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidt 4(%rax)
// CHECK: lidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidtq 4(%rax)
// CHECK: sgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdt 4(%rax)
// CHECK: sgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdtq 4(%rax)
// CHECK: sidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidt 4(%rax)
// CHECK: sidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidtq 4(%rax)
// rdar://8208615
mov (%rsi), %gs // CHECK: movw (%rsi), %gs # encoding: [0x8e,0x2e]
mov %gs, (%rsi) // CHECK: movw %gs, (%rsi) # encoding: [0x8c,0x2e]
// rdar://8431864
//CHECK: divb %bl
//CHECK: divw %bx
//CHECK: divl %ecx
//CHECK: divl 3735928559(%ebx,%ecx,8)
//CHECK: divl 69
//CHECK: divl 32493
//CHECK: divl 3133065982
//CHECK: divl 305419896
//CHECK: idivb %bl
//CHECK: idivw %bx
//CHECK: idivl %ecx
//CHECK: idivl 3735928559(%ebx,%ecx,8)
//CHECK: idivl 69
//CHECK: idivl 32493
//CHECK: idivl 3133065982
//CHECK: idivl 305419896
div %bl,%al
div %bx,%ax
div %ecx,%eax
div 0xdeadbeef(%ebx,%ecx,8),%eax
div 0x45,%eax
div 0x7eed,%eax
div 0xbabecafe,%eax
div 0x12345678,%eax
idiv %bl,%al
idiv %bx,%ax
idiv %ecx,%eax
idiv 0xdeadbeef(%ebx,%ecx,8),%eax
idiv 0x45,%eax
idiv 0x7eed,%eax
idiv 0xbabecafe,%eax
idiv 0x12345678,%eax
// PR8524
movd %rax, %mm5 // CHECK: movq %rax, %mm5 # encoding: [0x48,0x0f,0x6e,0xe8]
movd %mm5, %rbx // CHECK: movq %mm5, %rbx # encoding: [0x48,0x0f,0x7e,0xeb]
movq %rax, %mm5 // CHECK: movq %rax, %mm5 # encoding: [0x48,0x0f,0x6e,0xe8]
movq %mm5, %rbx // CHECK: movq %mm5, %rbx # encoding: [0x48,0x0f,0x7e,0xeb]
rex64 // CHECK: rex64 # encoding: [0x48]
data16 // CHECK: data16 # encoding: [0x66]
// CHECK: data16
// CHECK: encoding: [0x66]
// CHECK: lgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
data16 lgdt 4(%rax)
// PR8855
movq 18446744073709551615,%rbx // CHECK: movq -1, %rbx
// PR8946
movdqu %xmm0, %xmm1 // CHECK: movdqu %xmm0, %xmm1 # encoding: [0xf3,0x0f,0x6f,0xc8]
// PR8935
xgetbv // CHECK: xgetbv # encoding: [0x0f,0x01,0xd0]
xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
// CHECK: loope 0
// CHECK: encoding: [0xe1,A]
loopz 0
// CHECK: loopne 0
// CHECK: encoding: [0xe0,A]
loopnz 0
// CHECK: outsb (%rsi), %dx # encoding: [0x6e]
// CHECK: outsb
// CHECK: outsb
outsb
outsb %ds:(%rsi), %dx
outsb (%rsi), %dx
// CHECK: outsw (%rsi), %dx # encoding: [0x66,0x6f]
// CHECK: outsw
// CHECK: outsw
outsw
outsw %ds:(%rsi), %dx
outsw (%rsi), %dx
// CHECK: outsl (%rsi), %dx # encoding: [0x6f]
// CHECK: outsl
outsl
outsl %ds:(%rsi), %dx
outsl (%rsi), %dx
// CHECK: insb %dx, %es:(%rdi) # encoding: [0x6c]
// CHECK: insb
insb
insb %dx, %es:(%rdi)
// CHECK: insw %dx, %es:(%rdi) # encoding: [0x66,0x6d]
// CHECK: insw
insw
insw %dx, %es:(%rdi)
// CHECK: insl %dx, %es:(%rdi) # encoding: [0x6d]
// CHECK: insl
insl
insl %dx, %es:(%rdi)
// CHECK: movsb (%rsi), %es:(%rdi) # encoding: [0xa4]
// CHECK: movsb
// CHECK: movsb
movsb
movsb %ds:(%rsi), %es:(%rdi)
movsb (%rsi), %es:(%rdi)
// CHECK: movsw (%rsi), %es:(%rdi) # encoding: [0x66,0xa5]
// CHECK: movsw
// CHECK: movsw
movsw
movsw %ds:(%rsi), %es:(%rdi)
movsw (%rsi), %es:(%rdi)
// CHECK: movsl (%rsi), %es:(%rdi) # encoding: [0xa5]
// CHECK: movsl
// CHECK: movsl
movsl
movsl %ds:(%rsi), %es:(%rdi)
movsl (%rsi), %es:(%rdi)
// rdar://10883092
// CHECK: movsl
movsl (%rsi), (%rdi)
// CHECK: movsq (%rsi), %es:(%rdi) # encoding: [0x48,0xa5]
// CHECK: movsq
// CHECK: movsq
movsq
movsq %ds:(%rsi), %es:(%rdi)
movsq (%rsi), %es:(%rdi)
// CHECK: lodsb (%rsi), %al # encoding: [0xac]
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
lodsb
lodsb %ds:(%rsi), %al
lodsb (%rsi), %al
lods %ds:(%rsi), %al
lods (%rsi), %al
// CHECK: lodsw (%rsi), %ax # encoding: [0x66,0xad]
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
lodsw
lodsw %ds:(%rsi), %ax
lodsw (%rsi), %ax
lods %ds:(%rsi), %ax
lods (%rsi), %ax
// CHECK: lodsl (%rsi), %eax # encoding: [0xad]
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
lodsl
lodsl %ds:(%rsi), %eax
lodsl (%rsi), %eax
lods %ds:(%rsi), %eax
lods (%rsi), %eax
// CHECK: lodsq (%rsi), %rax # encoding: [0x48,0xad]
// CHECK: lodsq
// CHECK: lodsq
// CHECK: lodsq
// CHECK: lodsq
lodsq
lodsq %ds:(%rsi), %rax
lodsq (%rsi), %rax
lods %ds:(%rsi), %rax
lods (%rsi), %rax
// CHECK: stosb %al, %es:(%rdi) # encoding: [0xaa]
// CHECK: stosb
// CHECK: stosb
stosb
stosb %al, %es:(%rdi)
stos %al, %es:(%rdi)
// CHECK: stosw %ax, %es:(%rdi) # encoding: [0x66,0xab]
// CHECK: stosw
// CHECK: stosw
stosw
stosw %ax, %es:(%rdi)
stos %ax, %es:(%rdi)
// CHECK: stosl %eax, %es:(%rdi) # encoding: [0xab]
// CHECK: stosl
// CHECK: stosl
stosl
stosl %eax, %es:(%rdi)
stos %eax, %es:(%rdi)
// CHECK: stosq %rax, %es:(%rdi) # encoding: [0x48,0xab]
// CHECK: stosq
// CHECK: stosq
stosq
stosq %rax, %es:(%rdi)
stos %rax, %es:(%rdi)
// CHECK: strw
// CHECK: encoding: [0x66,0x0f,0x00,0xc8]
str %ax
// CHECK: strl
// CHECK: encoding: [0x0f,0x00,0xc8]
str %eax
// CHECK: strw
// CHECK: encoding: [0x66,0x0f,0x00,0xc8]
str %ax
// CHECK: strq
// CHECK: encoding: [0x48,0x0f,0x00,0xc8]
str %rax
// CHECK: movq %rdi, %xmm0
// CHECK: encoding: [0x66,0x48,0x0f,0x6e,0xc7]
movq %rdi,%xmm0
// CHECK: movq %xmm0, %rax
// CHECK: encoding: [0x66,0x48,0x0f,0x7e,0xc0]
movq %xmm0, %rax
// CHECK: movntil %eax, (%rdi)
// CHECK: encoding: [0x0f,0xc3,0x07]
// CHECK: movntil
movntil %eax, (%rdi)
movnti %eax, (%rdi)
// CHECK: movntiq %rax, (%rdi)
// CHECK: encoding: [0x48,0x0f,0xc3,0x07]
// CHECK: movntiq
movntiq %rax, (%rdi)
movnti %rax, (%rdi)
// CHECK: pclmulqdq $17, %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x3a,0x44,0xc8,0x11]
pclmulhqhqdq %xmm0, %xmm1
// CHECK: pclmulqdq $1, %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x3a,0x44,0xc8,0x01]
pclmulqdq $1, %xmm0, %xmm1
// CHECK: pclmulqdq $16, (%rdi), %xmm1
// CHECK: encoding: [0x66,0x0f,0x3a,0x44,0x0f,0x10]
pclmullqhqdq (%rdi), %xmm1
// CHECK: pclmulqdq $0, (%rdi), %xmm1
// CHECK: encoding: [0x66,0x0f,0x3a,0x44,0x0f,0x00]
pclmulqdq $0, (%rdi), %xmm1
// PR10345
// CHECK: nop
// CHECK: encoding: [0x90]
xchgq %rax, %rax
// CHECK: xchgl %eax, %eax
// CHECK: encoding: [0x87,0xc0]
xchgl %eax, %eax
// CHECK: xchgw %ax, %ax
// CHECK: encoding: [0x66,0x90]
xchgw %ax, %ax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x91]
xchgl %ecx, %eax
// CHECK: xchgl %ecx, %eax
// CHECK: encoding: [0x91]
xchgl %eax, %ecx
// CHECK: sysexit
// CHECK: encoding: [0x0f,0x35]
sysexit
// CHECK: sysexitl
// CHECK: encoding: [0x0f,0x35]
sysexitl
// CHECK: sysexitq
// CHECK: encoding: [0x48,0x0f,0x35]
sysexitq
// CHECK: clac
// CHECK: encoding: [0x0f,0x01,0xca]
clac
// CHECK: stac
// CHECK: encoding: [0x0f,0x01,0xcb]
stac
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
-faddp %st(0), %st(1)
-fmulp %st(0), %st(1)
-fsubp %st(0), %st(1)
-fsubrp %st(0), %st(1)
-fdivp %st(0), %st(1)
-fdivrp %st(0), %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+faddp %st, %st(1)
+fmulp %st, %st(1)
+fsubp %st, %st(1)
+fsubrp %st, %st(1)
+fdivp %st, %st(1)
+fdivrp %st, %st(1)
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
-faddp %st(1), %st(0)
-fmulp %st(1), %st(0)
-fsubp %st(1), %st(0)
-fsubrp %st(1), %st(0)
-fdivp %st(1), %st(0)
-fdivrp %st(1), %st(0)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
+faddp %st(1), %st
+fmulp %st(1), %st
+fsubp %st(1), %st
+fsubrp %st(1), %st
+fdivp %st(1), %st
+fdivrp %st(1), %st
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
faddp %st(1)
fmulp %st(1)
fsubp %st(1)
fsubrp %st(1)
fdivp %st(1)
fdivrp %st(1)
-// CHECK: faddp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
+// CHECK: faddp %st, %st(1)
+// CHECK: fmulp %st, %st(1)
+// CHECK: fsubp %st, %st(1)
+// CHECK: fsubrp %st, %st(1)
+// CHECK: fdivp %st, %st(1)
+// CHECK: fdivrp %st, %st(1)
faddp
fmulp
fsubp
fsubrp
fdivp
fdivrp
// CHECK: fadd %st(1)
// CHECK: fmul %st(1)
// CHECK: fsub %st(1)
// CHECK: fsubr %st(1)
// CHECK: fdiv %st(1)
// CHECK: fdivr %st(1)
-fadd %st(1), %st(0)
-fmul %st(1), %st(0)
-fsub %st(1), %st(0)
-fsubr %st(1), %st(0)
-fdiv %st(1), %st(0)
-fdivr %st(1), %st(0)
+fadd %st(1), %st
+fmul %st(1), %st
+fsub %st(1), %st
+fsubr %st(1), %st
+fdiv %st(1), %st
+fdivr %st(1), %st
-// CHECK: fadd %st(0), %st(1)
-// CHECK: fmul %st(0), %st(1)
-// CHECK: fsub %st(0), %st(1)
-// CHECK: fsubr %st(0), %st(1)
-// CHECK: fdiv %st(0), %st(1)
-// CHECK: fdivr %st(0), %st(1)
-fadd %st(0), %st(1)
-fmul %st(0), %st(1)
-fsub %st(0), %st(1)
-fsubr %st(0), %st(1)
-fdiv %st(0), %st(1)
-fdivr %st(0), %st(1)
+// CHECK: fadd %st, %st(1)
+// CHECK: fmul %st, %st(1)
+// CHECK: fsub %st, %st(1)
+// CHECK: fsubr %st, %st(1)
+// CHECK: fdiv %st, %st(1)
+// CHECK: fdivr %st, %st(1)
+fadd %st, %st(1)
+fmul %st, %st(1)
+fsub %st, %st(1)
+fsubr %st, %st(1)
+fdiv %st, %st(1)
+fdivr %st, %st(1)
// CHECK: fadd %st(1)
// CHECK: fmul %st(1)
// CHECK: fsub %st(1)
// CHECK: fsubr %st(1)
// CHECK: fdiv %st(1)
// CHECK: fdivr %st(1)
fadd %st(1)
fmul %st(1)
fsub %st(1)
fsubr %st(1)
fdiv %st(1)
fdivr %st(1)
// CHECK: movd %xmm0, %eax
// CHECK: movq %xmm0, %rax
// CHECK: movq %xmm0, %rax
// CHECK: vmovd %xmm0, %eax
// CHECK: vmovq %xmm0, %rax
// CHECK: vmovq %xmm0, %rax
movd %xmm0, %eax
movq %xmm0, %rax
movq %xmm0, %rax
vmovd %xmm0, %eax
vmovd %xmm0, %rax
vmovq %xmm0, %rax
// CHECK: seto 3735928559(%r10,%r9,8)
// CHECK: encoding: [0x43,0x0f,0x90,0x84,0xca,0xef,0xbe,0xad,0xde]
seto 0xdeadbeef(%r10,%r9,8)
// CHECK: monitorx
// CHECK: encoding: [0x0f,0x01,0xfa]
monitorx
// CHECK: monitorx
// CHECK: encoding: [0x0f,0x01,0xfa]
monitorx %rax, %rcx, %rdx
// CHECK: mwaitx
// CHECK: encoding: [0x0f,0x01,0xfb]
mwaitx
// CHECK: mwaitx
// CHECK: encoding: [0x0f,0x01,0xfb]
mwaitx %rax, %rcx, %rbx
// CHECK: clzero
// CHECK: encoding: [0x0f,0x01,0xfc]
clzero
// CHECK: clzero
// CHECK: encoding: [0x0f,0x01,0xfc]
clzero %rax
// CHECK: movl %r15d, (%r15,%r15)
// CHECK: encoding: [0x47,0x89,0x3c,0x3f]
movl %r15d, (%r15,%r15)
// CHECK: nopq 3735928559(%rbx,%rcx,8)
// CHECK: encoding: [0x48,0x0f,0x1f,0x84,0xcb,0xef,0xbe,0xad,0xde]
nopq 0xdeadbeef(%rbx,%rcx,8)
// CHECK: nopq %rax
// CHECK: encoding: [0x48,0x0f,0x1f,0xc0]
nopq %rax
// CHECK: rdpid %rax
// CHECK: encoding: [0xf3,0x0f,0xc7,0xf8]
rdpid %rax
// CHECK: ptwritel 3735928559(%rbx,%rcx,8)
// CHECK: encoding: [0xf3,0x0f,0xae,0xa4,0xcb,0xef,0xbe,0xad,0xde]
ptwritel 0xdeadbeef(%rbx,%rcx,8)
// CHECK: ptwritel %eax
// CHECK: encoding: [0xf3,0x0f,0xae,0xe0]
ptwritel %eax
// CHECK: ptwriteq 3735928559(%rbx,%rcx,8)
// CHECK: encoding: [0xf3,0x48,0x0f,0xae,0xa4,0xcb,0xef,0xbe,0xad,0xde]
ptwriteq 0xdeadbeef(%rbx,%rcx,8)
// CHECK: ptwriteq %rax
// CHECK: encoding: [0xf3,0x48,0x0f,0xae,0xe0]
ptwriteq %rax
// CHECK: wbnoinvd
// CHECK: encoding: [0xf3,0x0f,0x09]
wbnoinvd
// CHECK: cldemote 4(%rax)
// CHECK: encoding: [0x0f,0x1c,0x40,0x04]
cldemote 4(%rax)
// CHECK: cldemote 3735928559(%rbx,%rcx,8)
// CHECK: encoding: [0x0f,0x1c,0x84,0xcb,0xef,0xbe,0xad,0xde]
cldemote 0xdeadbeef(%rbx,%rcx,8)
// CHECK: umonitor %r13
// CHECK: encoding: [0xf3,0x41,0x0f,0xae,0xf5]
umonitor %r13
// CHECK: umonitor %rax
// CHECK: encoding: [0xf3,0x0f,0xae,0xf0]
umonitor %rax
// CHECK: umonitor %eax
// CHECK: encoding: [0x67,0xf3,0x0f,0xae,0xf0]
umonitor %eax
// CHECK: umwait %r15
// CHECK: encoding: [0xf2,0x41,0x0f,0xae,0xf7]
umwait %r15
// CHECK: umwait %ebx
// CHECK: encoding: [0xf2,0x0f,0xae,0xf3]
umwait %ebx
// CHECK: tpause %r15
// CHECK: encoding: [0x66,0x41,0x0f,0xae,0xf7]
tpause %r15
// CHECK: tpause %ebx
// CHECK: encoding: [0x66,0x0f,0xae,0xf3]
tpause %ebx
// CHECK: movdiri %r15, 485498096
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf9,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
movdiri %r15, 485498096
// CHECK: movdiri %r15, (%rdx)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf9,0x3a]
movdiri %r15, (%rdx)
// CHECK: movdiri %r15, 64(%rdx)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf9,0x7a,0x40]
movdiri %r15, 64(%rdx)
// CHECK: movdir64b 485498096, %rax
// CHECK: # encoding: [0x66,0x0f,0x38,0xf8,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
movdir64b 485498096, %rax
// CHECK: movdir64b 485498096, %eax
// CHECK: # encoding: [0x67,0x66,0x0f,0x38,0xf8,0x04,0x25,0xf0,0x1c,0xf0,0x1c]
movdir64b 485498096, %eax
// CHECK: movdir64b (%rdx), %r15
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf8,0x3a]
movdir64b (%rdx), %r15
// CHECK: pconfig
// CHECK: # encoding: [0x0f,0x01,0xc5]
pconfig
// CHECK: encls
// CHECK: encoding: [0x0f,0x01,0xcf]
encls
// CHECK: enclu
// CHECK: encoding: [0x0f,0x01,0xd7]
enclu
// CHECK: enclv
// CHECK: encoding: [0x0f,0x01,0xc0]
enclv
// CHECK: movq %rax, %rbx
// CHECK: encoding: [0x48,0x8b,0xd8]
movq.s %rax, %rbx
// CHECK: movq %rax, %rbx
// CHECK: encoding: [0x48,0x8b,0xd8]
mov.s %rax, %rbx
// CHECK: movl %eax, %ebx
// CHECK: encoding: [0x8b,0xd8]
movl.s %eax, %ebx
// CHECK: movl %eax, %ebx
// CHECK: encoding: [0x8b,0xd8]
mov.s %eax, %ebx
// CHECK: movw %ax, %bx
// CHECK: encoding: [0x66,0x8b,0xd8]
movw.s %ax, %bx
// CHECK: movw %ax, %bx
// CHECK: encoding: [0x66,0x8b,0xd8]
mov.s %ax, %bx
// CHECK: movb %al, %bl
// CHECK: encoding: [0x8a,0xd8]
movb.s %al, %bl
// CHECK: movb %al, %bl
// CHECK: encoding: [0x8a,0xd8]
mov.s %al, %bl
// CHECK: movq %mm0, %mm1
// CHECK: encoding: [0x0f,0x7f,0xc1]
movq.s %mm0, %mm1
// CHECK: movq %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0xd6,0xc1]
movq.s %xmm0, %xmm1
// CHECK: movdqa %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x7f,0xc1]
movdqa.s %xmm0, %xmm1
// CHECK: movdqu %xmm0, %xmm1
// CHECK: encoding: [0xf3,0x0f,0x7f,0xc1]
movdqu.s %xmm0, %xmm1
// CHECK: movaps %xmm0, %xmm1
// CHECK: encoding: [0x0f,0x29,0xc1]
movaps.s %xmm0, %xmm1
// CHECK: movups %xmm0, %xmm1
// CHECK: encoding: [0x0f,0x11,0xc1]
movups.s %xmm0, %xmm1
// CHECK: movapd %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x29,0xc1]
movapd.s %xmm0, %xmm1
// CHECK: movupd %xmm0, %xmm1
// CHECK: encoding: [0x66,0x0f,0x11,0xc1]
movupd.s %xmm0, %xmm1
// CHECK: vmovq %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x79,0xd6,0xc0]
vmovq.s %xmm0, %xmm8
// CHECK: vmovq %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x79,0xd6,0xc0]
vmovq.s %xmm8, %xmm0
// CHECK: vmovdqa %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x79,0x7f,0xc0]
vmovdqa.s %xmm0, %xmm8
// CHECK: vmovdqa %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x79,0x7f,0xc0]
vmovdqa.s %xmm8, %xmm0
// CHECK: vmovdqu %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x7a,0x7f,0xc0]
vmovdqu.s %xmm0, %xmm8
// CHECK: vmovdqu %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x7a,0x7f,0xc0]
vmovdqu.s %xmm8, %xmm0
// CHECK: vmovaps %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x78,0x29,0xc0]
vmovaps.s %xmm0, %xmm8
// CHECK: vmovaps %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x78,0x29,0xc0]
vmovaps.s %xmm8, %xmm0
// CHECK: vmovups %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x78,0x11,0xc0]
vmovups.s %xmm0, %xmm8
// CHECK: vmovups %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x78,0x11,0xc0]
vmovups.s %xmm8, %xmm0
// CHECK: vmovapd %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x79,0x29,0xc0]
vmovapd.s %xmm0, %xmm8
// CHECK: vmovapd %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x79,0x29,0xc0]
vmovapd.s %xmm8, %xmm0
// CHECK: vmovupd %xmm0, %xmm8
// CHECK: encoding: [0xc4,0xc1,0x79,0x11,0xc0]
vmovupd.s %xmm0, %xmm8
// CHECK: vmovupd %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x79,0x11,0xc0]
vmovupd.s %xmm8, %xmm0
// __asm __volatile(
// "pushf \n\t"
// "popf \n\t"
// "rep \n\t"
// ".byte 0x0f, 0xa7, 0xd0"
// );
// CHECK: pushfq
// CHECK-NEXT: popfq
// CHECK-NEXT: rep
// CHECK-NEXT: .byte 15
// CHECK-NEXT: .byte 167
// CHECK-NEXT: .byte 208
pushfq
popfq
rep
.byte 15
.byte 167
.byte 208
// CHECK: lock
// CHECK: cmpxchgl
cmp $0, %edx
je 1f
lock
1: cmpxchgl %ecx,(%rdi)
// CHECK: rep
// CHECK-NEXT: byte
rep
.byte 0xa4 # movsb
// CHECK: lock
// This line has to be the last one in the file
lock
Index: vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-1.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-1.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-1.ll (revision 344166)
@@ -1,540 +1,574 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK,LINUX,ISC99
+; RUN: opt < %s -instcombine -S -mtriple x86_64-pc-win32 | FileCheck %s --check-prefixes=CHECK,ISC99
+; RUN: opt < %s -instcombine -S -mtriple x86_64-pc-windows-msvc16 | FileCheck %s --check-prefixes=CHECK,MS64,ISC89
+; RUN: opt < %s -instcombine -S -mtriple i386-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,ISC99
+; RUN: opt < %s -instcombine -S -mtriple i686-pc-windows-msvc17 | FileCheck %s --check-prefixes=CHECK,MS32,ISC89
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
; Check for and against shrinkage when using the
; unsafe-fp-math function attribute on a math lib
; function. This optimization may be overridden by
; the -enable-double-float-shrink option.
; PR17850: http://llvm.org/bugs/show_bug.cgi?id=17850
define float @acos_test1(float %f) {
; CHECK-LABEL: @acos_test1(
-; CHECK-NEXT: [[ACOSF:%.*]] = call fast float @acosf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ACOSF]]
+; LINUX-NEXT: [[ACOSF:%.*]] = call fast float @acosf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[ACOSF]]
+; MS32: [[ACOSF:%.*]] = call fast double @acos(double [[F:%.*]])
+; MS64-NEXT: [[ACOSF:%.*]] = call fast float @acosf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @acos(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @acos_test2(float %f) {
; CHECK-LABEL: @acos_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @acos(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @acos(double %conv)
ret double %call
}
define float @acosh_test1(float %f) {
; CHECK-LABEL: @acosh_test1(
-; CHECK-NEXT: [[ACOSHF:%.*]] = call fast float @acoshf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ACOSHF]]
+; ISC99-NEXT: [[ACOSHF:%.*]] = call fast float @acoshf(float [[F:%.*]])
+; ISC99-NEXT: ret float [[ACOSHF]]
+; ISC89: [[ACOSHF:%.*]] = call fast double @acosh(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @acosh(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @acosh_test2(float %f) {
; CHECK-LABEL: @acosh_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @acosh(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @acosh(double %conv)
ret double %call
}
define float @asin_test1(float %f) {
; CHECK-LABEL: @asin_test1(
-; CHECK-NEXT: [[ASINF:%.*]] = call fast float @asinf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ASINF]]
+; LINUX-NEXT: [[ASINF:%.*]] = call fast float @asinf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[ASINF]]
+; MS32: [[ASINF:%.*]] = call fast double @asin(double [[F:%.*]])
+; MS64-NEXT: [[ASINF:%.*]] = call fast float @asinf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @asin(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @asin_test2(float %f) {
; CHECK-LABEL: @asin_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @asin(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @asin(double %conv)
ret double %call
}
define float @asinh_test1(float %f) {
; CHECK-LABEL: @asinh_test1(
-; CHECK-NEXT: [[ASINHF:%.*]] = call fast float @asinhf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ASINHF]]
+; ISC99-NEXT: [[ASINHF:%.*]] = call fast float @asinhf(float [[F:%.*]])
+; ISC99-NEXT: ret float [[ASINHF]]
+; ISC89: [[ASINHF:%.*]] = call fast double @asinh(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @asinh(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @asinh_test2(float %f) {
; CHECK-LABEL: @asinh_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @asinh(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @asinh(double %conv)
ret double %call
}
define float @atan_test1(float %f) {
; CHECK-LABEL: @atan_test1(
-; CHECK-NEXT: [[ATANF:%.*]] = call fast float @atanf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ATANF]]
+; LINUX-NEXT: [[ATANF:%.*]] = call fast float @atanf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[ATANF]]
+; MS32: [[ATANF:%.*]] = call fast double @atan(double [[F:%.*]])
+; MS64-NEXT: [[ATANF:%.*]] = call fast float @atanf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @atan(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @atan_test2(float %f) {
; CHECK-LABEL: @atan_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @atan(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @atan(double %conv)
ret double %call
}
define float @atanh_test1(float %f) {
; CHECK-LABEL: @atanh_test1(
-; CHECK-NEXT: [[ATANHF:%.*]] = call fast float @atanhf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[ATANHF]]
+; ISC99-NEXT: [[ATANHF:%.*]] = call fast float @atanhf(float [[F:%.*]])
+; ISC99-NEXT: ret float [[ATANHF]]
+; ISC89: [[ATANHF:%.*]] = call fast double @atanh(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @atanh(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @atanh_test2(float %f) {
; CHECK-LABEL: @atanh_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @atanh(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @atanh(double %conv)
ret double %call
}
define float @cbrt_test1(float %f) {
; CHECK-LABEL: @cbrt_test1(
-; CHECK-NEXT: [[CBRTF:%.*]] = call fast float @cbrtf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[CBRTF]]
+; ISC99-NEXT: [[CBRTF:%.*]] = call fast float @cbrtf(float [[F:%.*]])
+; ISC99-NEXT: ret float [[CBRTF]]
+; ISC89: [[CBRTF:%.*]] = call fast double @cbrt(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @cbrt(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @cbrt_test2(float %f) {
; CHECK-LABEL: @cbrt_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @cbrt(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @cbrt(double %conv)
ret double %call
}
define float @exp_test1(float %f) {
; CHECK-LABEL: @exp_test1(
-; CHECK-NEXT: [[EXPF:%.*]] = call fast float @expf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[EXPF]]
+; LINUX-NEXT: [[EXPF:%.*]] = call fast float @expf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[EXPF]]
+; MS32: [[EXPF:%.*]] = call fast double @exp(double [[F:%.*]])
+; MS64-NEXT: [[EXPF:%.*]] = call fast float @expf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @exp(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @exp_test2(float %f) {
; CHECK-LABEL: @exp_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @exp(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @exp(double %conv)
ret double %call
}
define float @expm1_test1(float %f) {
; CHECK-LABEL: @expm1_test1(
-; CHECK-NEXT: [[EXPM1F:%.*]] = call fast float @expm1f(float [[F:%.*]])
-; CHECK-NEXT: ret float [[EXPM1F]]
+; ISC99-NEXT: [[EXPM1F:%.*]] = call fast float @expm1f(float [[F:%.*]])
+; ISC99-NEXT: ret float [[EXPM1F]]
+; ISC89: [[EXPM1F:%.*]] = call fast double @expm1(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @expm1(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @expm1_test2(float %f) {
; CHECK-LABEL: @expm1_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @expm1(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @expm1(double %conv)
ret double %call
}
; exp10f() doesn't exist for this triple, so it doesn't shrink.
define float @exp10_test1(float %f) {
; CHECK-LABEL: @exp10_test1(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @exp10(double [[CONV]])
; CHECK-NEXT: [[CONV1:%.*]] = fptrunc double [[CALL]] to float
; CHECK-NEXT: ret float [[CONV1]]
;
%conv = fpext float %f to double
%call = call fast double @exp10(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @exp10_test2(float %f) {
; CHECK-LABEL: @exp10_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @exp10(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @exp10(double %conv)
ret double %call
}
define float @log_test1(float %f) {
; CHECK-LABEL: @log_test1(
-; CHECK-NEXT: [[LOGF:%.*]] = call fast float @logf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[LOGF]]
+; LINUX-NEXT: [[LOGF:%.*]] = call fast float @logf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[LOGF]]
+; MS32: [[LOGF:%.*]] = call fast double @log(double [[F:%.*]])
+; MS64-NEXT: [[LOGF:%.*]] = call fast float @logf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @log(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @log_test2(float %f) {
; CHECK-LABEL: @log_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @log(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @log(double %conv)
ret double %call
}
define float @log10_test1(float %f) {
; CHECK-LABEL: @log10_test1(
-; CHECK-NEXT: [[LOG10F:%.*]] = call fast float @log10f(float [[F:%.*]])
-; CHECK-NEXT: ret float [[LOG10F]]
+; LINUX-NEXT: [[LOG10F:%.*]] = call fast float @log10f(float [[F:%.*]])
+; LINUX-NEXT: ret float [[LOG10F]]
+; MS32: [[LOG10F:%.*]] = call fast double @log10(double [[F:%.*]])
+; MS64-NEXT: [[LOG10F:%.*]] = call fast float @log10f(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @log10(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @log10_test2(float %f) {
; CHECK-LABEL: @log10_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @log10(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @log10(double %conv)
ret double %call
}
define float @log1p_test1(float %f) {
; CHECK-LABEL: @log1p_test1(
-; CHECK-NEXT: [[LOG1PF:%.*]] = call fast float @log1pf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[LOG1PF]]
+; ISC99-NEXT: [[LOG1PF:%.*]] = call fast float @log1pf(float [[F:%.*]])
+; ISC99-NEXT: ret float [[LOG1PF]]
+; ISC89: [[LOG1PF:%.*]] = call fast double @log1p(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @log1p(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @log1p_test2(float %f) {
; CHECK-LABEL: @log1p_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @log1p(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @log1p(double %conv)
ret double %call
}
define float @log2_test1(float %f) {
; CHECK-LABEL: @log2_test1(
-; CHECK-NEXT: [[LOG2F:%.*]] = call fast float @log2f(float [[F:%.*]])
-; CHECK-NEXT: ret float [[LOG2F]]
+; ISC99-NEXT: [[LOG2F:%.*]] = call fast float @log2f(float [[F:%.*]])
+; ISC99-NEXT: ret float [[LOG2F]]
+; ISC89: [[LOG2F:%.*]] = call fast double @log2(double [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @log2(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @log2_test2(float %f) {
; CHECK-LABEL: @log2_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @log2(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @log2(double %conv)
ret double %call
}
define float @logb_test1(float %f) {
; CHECK-LABEL: @logb_test1(
-; CHECK-NEXT: [[LOGBF:%.*]] = call fast float @logbf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[LOGBF]]
+; LINUX-NEXT: [[LOGBF:%.*]] = call fast float @logbf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[LOGBF]]
+; MS32: [[POWF:%.*]] = call fast double @logb(double [[F:%.*]])
+; MS64-NEXT: [[LOGBF:%.*]] = call fast float @logbf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @logb(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @logb_test2(float %f) {
; CHECK-LABEL: @logb_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @logb(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @logb(double %conv)
ret double %call
}
define float @pow_test1(float %f, float %g) {
; CHECK-LABEL: @pow_test1(
-; CHECK-NEXT: [[POWF:%.*]] = call fast float @powf(float %f, float %g)
-; CHECK-NEXT: ret float [[POWF]]
+; LINUX-NEXT: [[POWF:%.*]] = call fast float @powf(float %f, float %g)
+; LINUX-NEXT: ret float [[POWF]]
+; MS32: [[POWF:%.*]] = call fast double @pow(double %df, double %dg)
+; MS64-NEXT: [[POWF:%.*]] = call fast float @powf(float %f, float %g)
;
%df = fpext float %f to double
%dg = fpext float %g to double
%call = call fast double @pow(double %df, double %dg)
%fr = fptrunc double %call to float
ret float %fr
}
define double @pow_test2(float %f, float %g) {
; CHECK-LABEL: @pow_test2(
; CHECK: [[POW:%.*]] = call fast double @pow(double %df, double %dg)
; CHECK-NEXT: ret double [[POW]]
;
%df = fpext float %f to double
%dg = fpext float %g to double
%call = call fast double @pow(double %df, double %dg)
ret double %call
}
define float @sin_test1(float %f) {
; CHECK-LABEL: @sin_test1(
-; CHECK-NEXT: [[SINF:%.*]] = call fast float @sinf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[SINF]]
+; LINUX-NEXT: [[SINF:%.*]] = call fast float @sinf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[SINF]]
+; MS32: [[SINF:%.*]] = call fast double @sin(double [[F:%.*]])
+; MS64-NEXT: [[SINF:%.*]] = call fast float @sinf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @sin(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @sin_test2(float %f) {
; CHECK-LABEL: @sin_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @sin(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @sin(double %conv)
ret double %call
}
define float @sqrt_test1(float %f) {
; CHECK-LABEL: @sqrt_test1(
-; CHECK-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[SQRTF]]
+; LINUX-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[SQRTF]]
+; MS32: [[SQRTF:%.*]] = call double @sqrt(double [[F:%.*]])
+; MS64-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call double @sqrt(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @sqrt_test2(float %f) {
; CHECK-LABEL: @sqrt_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call double @sqrt(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call double @sqrt(double %conv)
ret double %call
}
define float @sqrt_int_test1(float %f) {
; CHECK-LABEL: @sqrt_int_test1(
-; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.sqrt.f32(float [[F:%.*]])
-; CHECK-NEXT: ret float [[TMP1]]
+; LINUX-NEXT: [[TMP1:%.*]] = call float @llvm.sqrt.f32(float [[F:%.*]])
+; LINUX-NEXT: ret float [[TMP1]]
+; MS32: [[TMP1:%.*]] = call double @llvm.sqrt.f64(double [[F:%.*]])
+; MS64-NEXT: [[TMP1:%.*]] = call float @llvm.sqrt.f32(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call double @llvm.sqrt.f64(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @sqrt_int_test2(float %f) {
; CHECK-LABEL: @sqrt_int_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call double @llvm.sqrt.f64(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call double @llvm.sqrt.f64(double %conv)
ret double %call
}
define float @tan_test1(float %f) {
; CHECK-LABEL: @tan_test1(
-; CHECK-NEXT: [[TANF:%.*]] = call fast float @tanf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[TANF]]
+; LINUX-NEXT: [[TANF:%.*]] = call fast float @tanf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[TANF]]
+; MS32: [[TANF:%.*]] = call fast double @tan(double [[F:%.*]])
+; MS64-NEXT: [[TANF:%.*]] = call fast float @tanf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @tan(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @tan_test2(float %f) {
; CHECK-LABEL: @tan_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @tan(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @tan(double %conv)
ret double %call
}
define float @tanh_test1(float %f) {
; CHECK-LABEL: @tanh_test1(
-; CHECK-NEXT: [[TANHF:%.*]] = call fast float @tanhf(float [[F:%.*]])
-; CHECK-NEXT: ret float [[TANHF]]
+; LINUX-NEXT: [[TANHF:%.*]] = call fast float @tanhf(float [[F:%.*]])
+; LINUX-NEXT: ret float [[TANHF]]
+; MS32: [[TANHF:%.*]] = call fast double @tanh(double [[F:%.*]])
+; MS64-NEXT: [[TANHF:%.*]] = call fast float @tanhf(float [[F:%.*]])
;
%conv = fpext float %f to double
%call = call fast double @tanh(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
define double @tanh_test2(float %f) {
; CHECK-LABEL: @tanh_test2(
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[F:%.*]] to double
; CHECK-NEXT: [[CALL:%.*]] = call fast double @tanh(double [[CONV]])
; CHECK-NEXT: ret double [[CALL]]
;
%conv = fpext float %f to double
%call = call fast double @tanh(double %conv)
ret double %call
}
; 'arcp' on an fmax() is meaningless. This test just proves that
; flags are propagated for shrunken *binary* double FP calls.
define float @max1(float %a, float %b) {
; CHECK-LABEL: @max1(
-; CHECK-NEXT: [[FMAXF:%.*]] = call arcp float @fmaxf(float [[A:%.*]], float [[B:%.*]])
-; CHECK-NEXT: ret float [[FMAXF]]
+; ISC99-NEXT: [[FMAXF:%.*]] = call arcp float @fmaxf(float [[A:%.*]], float [[B:%.*]])
+; ISC99-NEXT: ret float [[FMAXF]]
+; ISC89: [[FMAXF:%.*]] = call arcp double @fmax(double [[A:%.*]], double [[B:%.*]])
;
%c = fpext float %a to double
%d = fpext float %b to double
%e = call arcp double @fmax(double %c, double %d)
%f = fptrunc double %e to float
ret float %f
}
; A function can have a name that matches a common libcall,
; but with the wrong type(s). Let it be.
define float @fake_fmin(float %a, float %b) {
; CHECK-LABEL: @fake_fmin(
; CHECK-NEXT: [[C:%.*]] = fpext float [[A:%.*]] to fp128
; CHECK-NEXT: [[D:%.*]] = fpext float [[B:%.*]] to fp128
; CHECK-NEXT: [[E:%.*]] = call fp128 @fmin(fp128 [[C]], fp128 [[D]])
; CHECK-NEXT: [[F:%.*]] = fptrunc fp128 [[E]] to float
; CHECK-NEXT: ret float [[F]]
;
%c = fpext float %a to fp128
%d = fpext float %b to fp128
%e = call fp128 @fmin(fp128 %c, fp128 %d)
%f = fptrunc fp128 %e to float
ret float %f
}
declare fp128 @fmin(fp128, fp128) ; This is not the 'fmin' you're looking for.
declare double @fmax(double, double)
declare double @tanh(double)
declare double @tan(double)
; sqrt is a special case: the shrinking optimization
; is valid even without unsafe-fp-math.
declare double @sqrt(double)
declare double @llvm.sqrt.f64(double)
declare double @sin(double)
declare double @pow(double, double)
declare double @log2(double)
declare double @log1p(double)
declare double @log10(double)
declare double @log(double)
declare double @logb(double)
declare double @exp10(double)
declare double @expm1(double)
declare double @exp(double)
declare double @cbrt(double)
declare double @atanh(double)
declare double @atan(double)
declare double @acos(double)
declare double @acosh(double)
declare double @asin(double)
declare double @asinh(double)
Index: vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-2.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-2.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Transforms/InstCombine/double-float-shrink-2.ll (revision 344166)
@@ -1,690 +1,654 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux" | FileCheck -check-prefix=ALL -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32" | FileCheck -check-prefix=ALL -check-prefix=DONT-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" | FileCheck -check-prefix=ALL -check-prefix=C89-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32" | FileCheck -check-prefix=ALL -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck -check-prefix=ALL -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck -check-prefix=ALL -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -enable-debugify -instcombine -S -mtriple "x86_64-pc-win32" 2>&1 | FileCheck -check-prefix=DBG-VALID %s
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 | FileCheck --check-prefix=DBG-VALID %s
declare double @floor(double)
declare double @ceil(double)
declare double @round(double)
declare double @nearbyint(double)
declare double @trunc(double)
declare double @fabs(double)
declare double @llvm.ceil.f64(double)
declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
declare double @llvm.fabs.f64(double)
declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
declare double @llvm.floor.f64(double)
declare <2 x double> @llvm.floor.v2f64(<2 x double>)
declare double @llvm.nearbyint.f64(double)
declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
declare float @llvm.rint.f32(float)
declare <2 x float> @llvm.rint.v2f32(<2 x float>)
declare double @llvm.round.f64(double)
declare <2 x double> @llvm.round.v2f64(<2 x double>)
declare double @llvm.trunc.f64(double)
declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
define float @test_shrink_libcall_floor(float %C) {
-; ALL-LABEL: @test_shrink_libcall_floor(
-; ALL-NEXT: [[F:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_floor(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext float %C to double
; --> floorf
%E = call double @floor(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_libcall_ceil(float %C) {
-; ALL-LABEL: @test_shrink_libcall_ceil(
-; ALL-NEXT: [[F:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_ceil(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext float %C to double
; --> ceilf
%E = call double @ceil(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_libcall_round(float %C) {
-; DO-SIMPLIFY-LABEL: @test_shrink_libcall_round(
-; DO-SIMPLIFY-NEXT: [[F:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
-; DO-SIMPLIFY-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_round(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
-; DONT-SIMPLIFY-LABEL: @test_shrink_libcall_round(
-; DONT-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; DONT-SIMPLIFY-NEXT: [[E:%.*]] = call double @round(double [[D]])
-; DONT-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; DONT-SIMPLIFY-NEXT: ret float [[F]]
-;
-; C89-SIMPLIFY-LABEL: @test_shrink_libcall_round(
-; C89-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; C89-SIMPLIFY-NEXT: [[E:%.*]] = call double @round(double [[D]])
-; C89-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; C89-SIMPLIFY-NEXT: ret float [[F]]
-;
%D = fpext float %C to double
; --> roundf
%E = call double @round(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_libcall_nearbyint(float %C) {
-; DO-SIMPLIFY-LABEL: @test_shrink_libcall_nearbyint(
-; DO-SIMPLIFY-NEXT: [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
-; DO-SIMPLIFY-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_nearbyint(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
-; DONT-SIMPLIFY-LABEL: @test_shrink_libcall_nearbyint(
-; DONT-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; DONT-SIMPLIFY-NEXT: [[E:%.*]] = call double @nearbyint(double [[D]])
-; DONT-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; DONT-SIMPLIFY-NEXT: ret float [[F]]
-;
-; C89-SIMPLIFY-LABEL: @test_shrink_libcall_nearbyint(
-; C89-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; C89-SIMPLIFY-NEXT: [[E:%.*]] = call double @nearbyint(double [[D]])
-; C89-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; C89-SIMPLIFY-NEXT: ret float [[F]]
-;
%D = fpext float %C to double
; --> nearbyintf
%E = call double @nearbyint(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_libcall_trunc(float %C) {
-; DO-SIMPLIFY-LABEL: @test_shrink_libcall_trunc(
-; DO-SIMPLIFY-NEXT: [[F:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
-; DO-SIMPLIFY-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_trunc(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
-; DONT-SIMPLIFY-LABEL: @test_shrink_libcall_trunc(
-; DONT-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; DONT-SIMPLIFY-NEXT: [[E:%.*]] = call double @trunc(double [[D]])
-; DONT-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; DONT-SIMPLIFY-NEXT: ret float [[F]]
-;
-; C89-SIMPLIFY-LABEL: @test_shrink_libcall_trunc(
-; C89-SIMPLIFY-NEXT: [[D:%.*]] = fpext float [[C:%.*]] to double
-; C89-SIMPLIFY-NEXT: [[E:%.*]] = call double @trunc(double [[D]])
-; C89-SIMPLIFY-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; C89-SIMPLIFY-NEXT: ret float [[F]]
-;
%D = fpext float %C to double
; --> truncf
%E = call double @trunc(double %D)
%F = fptrunc double %E to float
ret float %F
}
; This is replaced with the intrinsic, which does the right thing on
-; all platforms.
+; CHECK platforms.
define float @test_shrink_libcall_fabs(float %C) {
-; ALL-LABEL: @test_shrink_libcall_fabs(
-; ALL-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_fabs(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext float %C to double
%E = call double @fabs(double %D)
%F = fptrunc double %E to float
ret float %F
}
; Make sure fast math flags are preserved
define float @test_shrink_libcall_fabs_fast(float %C) {
-; ALL-LABEL: @test_shrink_libcall_fabs_fast(
-; ALL-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
+; CHECK-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext float %C to double
%E = call fast double @fabs(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_ceil(float %C) {
-; ALL-LABEL: @test_shrink_intrin_ceil(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_ceil(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.ceil.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_fabs(float %C) {
-; ALL-LABEL: @test_shrink_intrin_fabs(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_fabs(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_floor(float %C) {
-; ALL-LABEL: @test_shrink_intrin_floor(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_floor(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.floor.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_nearbyint(float %C) {
-; ALL-LABEL: @test_shrink_intrin_nearbyint(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_nearbyint(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.nearbyint.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define half @test_shrink_intrin_rint(half %C) {
-; ALL-LABEL: @test_shrink_intrin_rint(
-; ALL-NEXT: [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
-; ALL-NEXT: ret half [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_rint(
+; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
+; CHECK-NEXT: ret half [[TMP1]]
;
%D = fpext half %C to float
%E = call float @llvm.rint.f32(float %D)
%F = fptrunc float %E to half
ret half %F
}
define float @test_shrink_intrin_round(float %C) {
-; ALL-LABEL: @test_shrink_intrin_round(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_round(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.round.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_trunc(float %C) {
-; ALL-LABEL: @test_shrink_intrin_trunc(
-; ALL-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_trunc(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call double @llvm.trunc.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
declare void @use_v2f64(<2 x double>)
declare void @use_v2f32(<2 x float>)
define <2 x float> @test_shrink_intrin_ceil_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_ceil_multi_use(
-; ALL-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
-; ALL-NEXT: [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[D]])
-; ALL-NEXT: ret <2 x float> [[F]]
+; CHECK-LABEL: @test_shrink_intrin_ceil_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: ret <2 x float> [[F]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.ceil.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %D)
ret <2 x float> %F
}
define <2 x float> @test_shrink_intrin_fabs_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_fabs_multi_use(
-; ALL-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
-; ALL-NEXT: [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[E]])
-; ALL-NEXT: ret <2 x float> [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_multi_use(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
+; CHECK-NEXT: ret <2 x float> [[TMP1]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.fabs.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %E)
ret <2 x float> %F
}
define <2 x float> @test_shrink_intrin_floor_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_floor_multi_use(
-; ALL-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
-; ALL-NEXT: [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[D]])
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[E]])
-; ALL-NEXT: ret <2 x float> [[F]]
+; CHECK-LABEL: @test_shrink_intrin_floor_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
+; CHECK-NEXT: ret <2 x float> [[F]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.floor.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %D)
call void @use_v2f64(<2 x double> %E)
ret <2 x float> %F
}
define <2 x float> @test_shrink_intrin_nearbyint_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_nearbyint_multi_use(
-; ALL-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
-; ALL-NEXT: [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[D]])
-; ALL-NEXT: ret <2 x float> [[F]]
+; CHECK-LABEL: @test_shrink_intrin_nearbyint_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: ret <2 x float> [[F]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %D)
ret <2 x float> %F
}
define <2 x half> @test_shrink_intrin_rint_multi_use(<2 x half> %C) {
-; ALL-LABEL: @test_shrink_intrin_rint_multi_use(
-; ALL-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
-; ALL-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
-; ALL-NEXT: call void @use_v2f32(<2 x float> [[E]])
-; ALL-NEXT: ret <2 x half> [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_rint_multi_use(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f32(<2 x float> [[E]])
+; CHECK-NEXT: ret <2 x half> [[TMP1]]
;
%D = fpext <2 x half> %C to <2 x float>
%E = call <2 x float> @llvm.rint.v2f32(<2 x float> %D)
%F = fptrunc <2 x float> %E to <2 x half>
call void @use_v2f32(<2 x float> %E)
ret <2 x half> %F
}
define <2 x float> @test_shrink_intrin_round_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_round_multi_use(
-; ALL-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
-; ALL-NEXT: [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[D]])
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[E]])
-; ALL-NEXT: ret <2 x float> [[F]]
+; CHECK-LABEL: @test_shrink_intrin_round_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
+; CHECK-NEXT: ret <2 x float> [[F]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %D)
call void @use_v2f64(<2 x double> %E)
ret <2 x float> %F
}
define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
-; ALL-LABEL: @test_shrink_intrin_trunc_multi_use(
-; ALL-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
-; ALL-NEXT: [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
-; ALL-NEXT: call void @use_v2f64(<2 x double> [[D]])
-; ALL-NEXT: ret <2 x float> [[F]]
+; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: ret <2 x float> [[F]]
;
%D = fpext <2 x float> %C to <2 x double>
%E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
%F = fptrunc <2 x double> %E to <2 x float>
call void @use_v2f64(<2 x double> %D)
ret <2 x float> %F
}
; Make sure fast math flags are preserved
define float @test_shrink_intrin_fabs_fast(float %C) {
-; ALL-LABEL: @test_shrink_intrin_fabs_fast(
-; ALL-NEXT: [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
-; ALL-NEXT: ret float [[TMP1]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_fast(
+; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
;
%D = fpext float %C to double
%E = call fast double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_floor(double %D) {
-; ALL-LABEL: @test_no_shrink_intrin_floor(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_floor(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.floor.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_ceil(double %D) {
-; ALL-LABEL: @test_no_shrink_intrin_ceil(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_ceil(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.ceil.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_round(double %D) {
-; ALL-LABEL: @test_no_shrink_intrin_round(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_round(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.round.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_nearbyint(double %D) {
-; ALL-LABEL: @test_no_shrink_intrin_nearbyint(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.nearbyint.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_trunc(double %D) {
-; ALL-LABEL: @test_no_shrink_intrin_trunc(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_trunc(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.trunc.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_intrin_fabs_double_src(double %D) {
-; ALL-LABEL: @test_shrink_intrin_fabs_double_src(
-; ALL-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
-; ALL-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_double_src(
+; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
+; CHECK-NEXT: ret float [[F]]
;
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
; Make sure fast math flags are preserved
define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
-; ALL-LABEL: @test_shrink_intrin_fabs_fast_double_src(
-; ALL-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
-; ALL-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_fast_double_src(
+; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
+; CHECK-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
+; CHECK-NEXT: ret float [[F]]
;
%E = call fast double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_floor() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
-; ALL-NEXT: ret float 2.000000e+00
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
+; CHECK-NEXT: ret float 2.000000e+00
;
%E = call double @llvm.floor.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_ceil() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
-; ALL-NEXT: ret float 3.000000e+00
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
+; CHECK-NEXT: ret float 3.000000e+00
;
%E = call double @llvm.ceil.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_round() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_round(
-; ALL-NEXT: ret float 2.000000e+00
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_round(
+; CHECK-NEXT: ret float 2.000000e+00
;
%E = call double @llvm.round.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
-; ALL-NEXT: ret float 2.000000e+00
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
+; CHECK-NEXT: ret float 2.000000e+00
;
%E = call double @llvm.nearbyint.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_trunc() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
-; ALL-NEXT: ret float 2.000000e+00
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
+; CHECK-NEXT: ret float 2.000000e+00
;
%E = call double @llvm.trunc.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define float @test_shrink_float_convertible_constant_intrin_fabs() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
-; ALL-NEXT: ret float 0x4000CCCCC0000000
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
+; CHECK-NEXT: ret float 0x4000CCCCC0000000
;
%E = call double @llvm.fabs.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
; Make sure fast math flags are preserved
define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
-; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
-; ALL-NEXT: ret float 0x4000CCCCC0000000
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
+; CHECK-NEXT: ret float 0x4000CCCCC0000000
;
%E = call fast double @llvm.fabs.f64(double 2.1)
%F = fptrunc double %E to float
ret float %F
}
define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
-; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.floor.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
-; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.ceil.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
-; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_round(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_round(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.round.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
-; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.nearbyint.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
-; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
-; ALL-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.trunc.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
-; ALL-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
-; ALL-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
-; ALL-NEXT: [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
+; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
+; CHECK-NEXT: [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
+; CHECK-NEXT: ret half [[F]]
;
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
; Make sure fast math flags are preserved
define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
-; ALL-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
-; ALL-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
-; ALL-NEXT: [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
-; ALL-NEXT: ret half [[F]]
+; CHECK-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
+; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
+; CHECK-NEXT: [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
+; CHECK-NEXT: ret half [[F]]
;
%E = call fast double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to half
ret half %F
}
define <2 x double> @test_shrink_intrin_floor_fp16_vec(<2 x half> %C) {
-; ALL-LABEL: @test_shrink_intrin_floor_fp16_vec(
-; ALL-NEXT: [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
-; ALL-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
-; ALL-NEXT: ret <2 x double> [[E]]
+; CHECK-LABEL: @test_shrink_intrin_floor_fp16_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
+; CHECK-NEXT: ret <2 x double> [[E]]
;
%D = fpext <2 x half> %C to <2 x double>
%E = call arcp <2 x double> @llvm.floor.v2f64(<2 x double> %D)
ret <2 x double> %E
}
define float @test_shrink_intrin_ceil_fp16_src(half %C) {
-; ALL-LABEL: @test_shrink_intrin_ceil_fp16_src(
-; ALL-NEXT: [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
-; ALL-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_ceil_fp16_src(
+; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
%E = call double @llvm.ceil.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define <2 x double> @test_shrink_intrin_round_fp16_vec(<2 x half> %C) {
-; ALL-LABEL: @test_shrink_intrin_round_fp16_vec(
-; ALL-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
-; ALL-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
-; ALL-NEXT: ret <2 x double> [[E]]
+; CHECK-LABEL: @test_shrink_intrin_round_fp16_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
+; CHECK-NEXT: ret <2 x double> [[E]]
;
%D = fpext <2 x half> %C to <2 x double>
%E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
ret <2 x double> %E
}
define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
-; ALL-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
-; ALL-NEXT: [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
-; ALL-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
+; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
%E = call double @llvm.nearbyint.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define <2 x double> @test_shrink_intrin_trunc_fp16_src(<2 x half> %C) {
-; ALL-LABEL: @test_shrink_intrin_trunc_fp16_src(
-; ALL-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
-; ALL-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
-; ALL-NEXT: ret <2 x double> [[E]]
+; CHECK-LABEL: @test_shrink_intrin_trunc_fp16_src(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
+; CHECK-NEXT: ret <2 x double> [[E]]
;
%D = fpext <2 x half> %C to <2 x double>
%E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
ret <2 x double> %E
}
define float @test_shrink_intrin_fabs_fp16_src(half %C) {
-; ALL-LABEL: @test_shrink_intrin_fabs_fp16_src(
-; ALL-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
-; ALL-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_fp16_src(
+; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
; Make sure fast math flags are preserved
define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
-; ALL-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
-; ALL-NEXT: [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
-; ALL-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
+; CHECK-NEXT: [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
%E = call fast double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
-; ALL-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
-; ALL-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
-; ALL-NEXT: store volatile double [[D]], double* undef, align 8
-; ALL-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
+; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
+; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
store volatile double %D, double* undef
%E = call double @llvm.floor.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
-; ALL-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
-; ALL-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
-; ALL-NEXT: store volatile double [[D]], double* undef, align 8
-; ALL-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
-; ALL-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
-; ALL-NEXT: ret float [[F]]
+; CHECK-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
+; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
+; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
store volatile double %D, double* undef
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
}
; DBG-VALID: CheckModuleDebugify: PASS
Index: vendor/llvm/dist-release_80/test/Transforms/InstCombine/pow-1.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Transforms/InstCombine/pow-1.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Transforms/InstCombine/pow-1.ll (revision 344166)
@@ -1,402 +1,488 @@
; Test that the pow library call simplifier works correctly.
;
-; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=ANY
-; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.9 | FileCheck %s --check-prefixes=ANY,CHECK-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios7.0 | FileCheck %s --check-prefixes=ANY,CHECK-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.8 | FileCheck %s --check-prefixes=ANY,CHECK-NO-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios6.0 | FileCheck %s --check-prefixes=ANY,CHECK-NO-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=x86_64-netbsd | FileCheck %s --check-prefixes=ANY,CHECK-NO-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=arm-apple-tvos9.0 | FileCheck %s --check-prefixes=ANY,CHECK-EXP10
-; RUN: opt -instcombine -S < %s -mtriple=arm-apple-watchos2.0 | FileCheck %s --check-prefixes=ANY,CHECK-EXP10
+; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=CHECK,ANY
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.9 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios7.0 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.8 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios6.0 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-netbsd | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-tvos9.0 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-watchos2.0 | FileCheck %s --check-prefixes=CHECK,ANY,CHECK-EXP10
; rdar://7251832
-; RUN: opt -instcombine -S < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK-WIN
+; RUN: opt -instcombine -S < %s -mtriple=i386-pc-windows-msvc18 | FileCheck %s --check-prefixes=CHECK,MSVC,VC32,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=i386-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,MSVC,VC51,VC19,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-pc-windows-msvc18 | FileCheck %s --check-prefixes=CHECK,MSVC,VC64,CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,MSVC,VC83,VC19,CHECK-NO-EXP10
; NOTE: The readonly attribute on the pow call should be preserved
; in the cases below where pow is transformed into another function call.
declare float @powf(float, float) nounwind readonly
declare double @pow(double, double) nounwind readonly
declare double @llvm.pow.f64(double, double)
declare <2 x float> @llvm.pow.v2f32(<2 x float>, <2 x float>) nounwind readonly
declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) nounwind readonly
; Check pow(1.0, x) -> 1.0.
define float @test_simplify1(float %x) {
-; ANY-LABEL: @test_simplify1(
+; CHECK-LABEL: @test_simplify1(
; ANY-NEXT: ret float 1.000000e+00
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float 1.000000e+00, float [[X:%.*]])
+; VC32-NEXT: ret float [[POW]]
+; VC64-NEXT: ret float 1.000000e+00
;
%retval = call float @powf(float 1.0, float %x)
ret float %retval
}
define <2 x float> @test_simplify1v(<2 x float> %x) {
-; ANY-LABEL: @test_simplify1v(
+; CHECK-LABEL: @test_simplify1v(
; ANY-NEXT: ret <2 x float> <float 1.000000e+00, float 1.000000e+00>
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> [[X:%.*]])
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
%retval = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 1.0, float 1.0>, <2 x float> %x)
ret <2 x float> %retval
}
define double @test_simplify2(double %x) {
-; ANY-LABEL: @test_simplify2(
-; ANY-NEXT: ret double 1.000000e+00
+; CHECK-LABEL: @test_simplify2(
+; CHECK-NEXT: ret double 1.000000e+00
;
%retval = call double @pow(double 1.0, double %x)
ret double %retval
}
define <2 x double> @test_simplify2v(<2 x double> %x) {
-; ANY-LABEL: @test_simplify2v(
+; CHECK-LABEL: @test_simplify2v(
; ANY-NEXT: ret <2 x double> <double 1.000000e+00, double 1.000000e+00>
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 1.000000e+00, double 1.000000e+00>, <2 x double> [[X:%.*]])
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%retval = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 1.0, double 1.0>, <2 x double> %x)
ret <2 x double> %retval
}
; Check pow(2.0 ** n, x) -> exp2(n * x).
define float @test_simplify3(float %x) {
-; ANY-LABEL: @test_simplify3(
-; ANY-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[X:%.*]]) [[NUW_RO:#[0-9]+]]
+; CHECK-LABEL: @test_simplify3(
+; ANY-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[X:%.*]])
; ANY-NEXT: ret float [[EXP2F]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float 2.000000e+00, float [[X:%.*]])
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float 2.000000e+00, float [[X:%.*]])
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[POW:%.*]] = call float @powf(float 2.000000e+00, float [[X:%.*]])
+; VC64-NEXT: ret float [[POW]]
+; VC83-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[X:%.*]])
+; VC83-NEXT: ret float [[EXP2F]]
;
-; CHECK-WIN-LABEL: @test_simplify3(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call float @powf(float 2.000000e+00, float [[X:%.*]])
-; CHECK-WIN-NEXT: ret float [[POW]]
-;
%retval = call float @powf(float 2.0, float %x)
ret float %retval
}
define double @test_simplify3n(double %x) {
-; ANY-LABEL: @test_simplify3n(
+; CHECK-LABEL: @test_simplify3n(
; ANY-NEXT: [[MUL:%.*]] = fmul double [[X:%.*]], -2.000000e+00
-; ANY-NEXT: [[EXP2:%.*]] = call double @exp2(double [[MUL]]) [[NUW_RO]]
+; ANY-NEXT: [[EXP2:%.*]] = call double @exp2(double [[MUL]])
; ANY-NEXT: ret double [[EXP2]]
+; VC19-NEXT: [[MUL:%.*]] = fmul double [[X:%.*]], -2.000000e+00
+; VC19-NEXT: [[EXP2:%.*]] = call double @exp2(double [[MUL]])
+; VC19-NEXT: ret double [[EXP2]]
+; VC32-NEXT: [[POW:%.*]] = call double @pow(double 2.500000e-01, double [[X:%.*]])
+; VC32-NEXT: ret double [[POW]]
+; VC64-NEXT: [[POW:%.*]] = call double @pow(double 2.500000e-01, double [[X:%.*]])
+; VC64-NEXT: ret double [[POW]]
;
-; CHECK-WIN-LABEL: @test_simplify3n(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call double @pow(double 2.500000e-01, double [[X:%.*]])
-; CHECK-WIN-NEXT: ret double [[POW]]
-;
%retval = call double @pow(double 0.25, double %x)
ret double %retval
}
define <2 x float> @test_simplify3v(<2 x float> %x) {
-; ANY-LABEL: @test_simplify3v(
+; CHECK-LABEL: @test_simplify3v(
; ANY-NEXT: [[EXP2:%.*]] = call <2 x float> @llvm.exp2.v2f32(<2 x float> [[X:%.*]])
; ANY-NEXT: ret <2 x float> [[EXP2]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 2.000000e+00, float 2.000000e+00>, <2 x float> [[X:%.*]])
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
-; CHECK-WIN-LABEL: @test_simplify3v(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 2.000000e+00, float 2.000000e+00>, <2 x float> [[X:%.*]])
-; CHECK-WIN-NEXT: ret <2 x float> [[POW]]
-;
%retval = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 2.0, float 2.0>, <2 x float> %x)
ret <2 x float> %retval
}
define <2 x double> @test_simplify3vn(<2 x double> %x) {
-; ANY-LABEL: @test_simplify3vn(
+; CHECK-LABEL: @test_simplify3vn(
; ANY-NEXT: [[MUL:%.*]] = fmul <2 x double> [[X:%.*]], <double 2.000000e+00, double 2.000000e+00>
; ANY-NEXT: [[EXP2:%.*]] = call <2 x double> @llvm.exp2.v2f64(<2 x double> [[MUL]])
; ANY-NEXT: ret <2 x double> [[EXP2]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 4.000000e+00, double 4.000000e+00>, <2 x double> [[X:%.*]])
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%retval = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 4.0, double 4.0>, <2 x double> %x)
ret <2 x double> %retval
}
define double @test_simplify4(double %x) {
-; ANY-LABEL: @test_simplify4(
-; ANY-NEXT: [[EXP2:%.*]] = call double @exp2(double [[X:%.*]]) [[NUW_RO]]
+; CHECK-LABEL: @test_simplify4(
+; ANY-NEXT: [[EXP2:%.*]] = call double @exp2(double [[X:%.*]])
; ANY-NEXT: ret double [[EXP2]]
+; VC19-NEXT: [[EXP2:%.*]] = call double @exp2(double [[X:%.*]])
+; VC19-NEXT: ret double [[EXP2]]
+; VC32-NEXT: [[POW:%.*]] = call double @pow(double 2.000000e+00, double [[X:%.*]])
+; VC32-NEXT: ret double [[POW]]
+; VC64-NEXT: [[POW:%.*]] = call double @pow(double 2.000000e+00, double [[X:%.*]])
+; VC64-NEXT: ret double [[POW]]
;
-; CHECK-WIN-LABEL: @test_simplify4(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call double @pow(double 2.000000e+00, double [[X:%.*]])
-; CHECK-WIN-NEXT: ret double [[POW]]
-;
%retval = call double @pow(double 2.0, double %x)
ret double %retval
}
define float @test_simplify4n(float %x) {
-; ANY-LABEL: @test_simplify4n(
+; CHECK-LABEL: @test_simplify4n(
; ANY-NEXT: [[MUL:%.*]] = fmul float [[X:%.*]], 3.000000e+00
-; ANY-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[MUL]]) [[NUW_RO]]
+; ANY-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[MUL]])
; ANY-NEXT: ret float [[EXP2F]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float 8.000000e+00, float [[X:%.*]])
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float 8.000000e+00, float [[X:%.*]])
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[POW:%.*]] = call float @powf(float 8.000000e+00, float [[X:%.*]])
+; VC64-NEXT: ret float [[POW]]
+; VC83-NEXT: [[MUL:%.*]] = fmul float [[X:%.*]], 3.000000e+00
+; VC83-NEXT: [[EXP2F:%.*]] = call float @exp2f(float [[MUL]])
+; VC83-NEXT: ret float [[EXP2F]]
;
-; CHECK-WIN-LABEL: @test_simplify4n(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call float @powf(float 8.000000e+00, float [[X:%.*]])
-; CHECK-WIN-NEXT: ret float [[POW]]
-;
%retval = call float @powf(float 8.0, float %x)
ret float %retval
}
define <2 x double> @test_simplify4v(<2 x double> %x) {
-; ANY-LABEL: @test_simplify4v(
+; CHECK-LABEL: @test_simplify4v(
; ANY-NEXT: [[EXP2:%.*]] = call <2 x double> @llvm.exp2.v2f64(<2 x double> [[X:%.*]])
; ANY-NEXT: ret <2 x double> [[EXP2]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double> [[X:%.*]])
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
-; CHECK-WIN-LABEL: @test_simplify4v(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double> [[X:%.*]])
-; CHECK-WIN-NEXT: ret <2 x double> [[POW]]
-;
%retval = call <2 x double> @llvm.pow.v2f64(<2 x double> <double 2.0, double 2.0>, <2 x double> %x)
ret <2 x double> %retval
}
define <2 x float> @test_simplify4vn(<2 x float> %x) {
-; ANY-LABEL: @test_simplify4vn(
+; CHECK-LABEL: @test_simplify4vn(
; ANY-NEXT: [[MUL:%.*]] = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, [[X:%.*]]
; ANY-NEXT: [[EXP2:%.*]] = call <2 x float> @llvm.exp2.v2f32(<2 x float> [[MUL]])
; ANY-NEXT: ret <2 x float> [[EXP2]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 5.000000e-01, float 5.000000e-01>, <2 x float> [[X:%.*]])
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
-; CHECK-WIN-LABEL: @test_simplify4vn(
-; CHECK-WIN-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 5.000000e-01, float 5.000000e-01>, <2 x float> [[X:%.*]])
-; CHECK-WIN-NEXT: ret <2 x float> [[POW]]
-;
%retval = call <2 x float> @llvm.pow.v2f32(<2 x float> <float 0.5, float 0.5>, <2 x float> %x)
ret <2 x float> %retval
}
; Check pow(x, 0.0) -> 1.0.
define float @test_simplify5(float %x) {
-; ANY-LABEL: @test_simplify5(
+; CHECK-LABEL: @test_simplify5(
; ANY-NEXT: ret float 1.000000e+00
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 0.000000e+00)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 0.000000e+00)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: ret float 1.000000e+00
+; VC83-NEXT: ret float 1.000000e+00
;
%retval = call float @powf(float %x, float 0.0)
ret float %retval
}
define <2 x float> @test_simplify5v(<2 x float> %x) {
-; ANY-LABEL: @test_simplify5v(
+; CHECK-LABEL: @test_simplify5v(
; ANY-NEXT: ret <2 x float> <float 1.000000e+00, float 1.000000e+00>
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> [[X:%.*]], <2 x float> zeroinitializer)
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
%retval = call <2 x float> @llvm.pow.v2f32(<2 x float> %x, <2 x float> <float 0.0, float 0.0>)
ret <2 x float> %retval
}
define double @test_simplify6(double %x) {
-; ANY-LABEL: @test_simplify6(
-; ANY-NEXT: ret double 1.000000e+00
+; CHECK-LABEL: @test_simplify6(
+; CHECK-NEXT: ret double 1.000000e+00
;
%retval = call double @pow(double %x, double 0.0)
ret double %retval
}
define <2 x double> @test_simplify6v(<2 x double> %x) {
-; ANY-LABEL: @test_simplify6v(
+; CHECK-LABEL: @test_simplify6v(
; ANY-NEXT: ret <2 x double> <double 1.000000e+00, double 1.000000e+00>
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> [[X:%.*]], <2 x double> zeroinitializer)
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%retval = call <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 0.0, double 0.0>)
ret <2 x double> %retval
}
; Check pow(x, 0.5) -> fabs(sqrt(x)), where x != -infinity.
define float @test_simplify7(float %x) {
-; ANY-LABEL: @test_simplify7(
-; ANY-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[X:%.*]]) [[NUW_RO]]
+; CHECK-LABEL: @test_simplify7(
+; ANY-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[X:%.*]])
; ANY-NEXT: [[ABS:%.*]] = call float @llvm.fabs.f32(float [[SQRTF]])
; ANY-NEXT: [[ISINF:%.*]] = fcmp oeq float [[X]], 0xFFF0000000000000
; ANY-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], float 0x7FF0000000000000, float [[ABS]]
; ANY-NEXT: ret float [[TMP1]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 5.000000e-01)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 5.000000e-01)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[X:%.*]])
+; VC64-NEXT: [[ABS:%.*]] = call float @llvm.fabs.f32(float [[SQRTF]])
+; VC64-NEXT: [[ISINF:%.*]] = fcmp oeq float [[X]], 0xFFF0000000000000
+; VC64-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], float 0x7FF0000000000000, float [[ABS]]
+; VC64-NEXT: ret float [[TMP1]]
+; VC83-NEXT: [[SQRTF:%.*]] = call float @sqrtf(float [[X:%.*]])
+; VC83-NEXT: [[ABS:%.*]] = call float @llvm.fabs.f32(float [[SQRTF]])
+; VC83-NEXT: [[ISINF:%.*]] = fcmp oeq float [[X]], 0xFFF0000000000000
+; VC83-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], float 0x7FF0000000000000, float [[ABS]]
+; VC83-NEXT: ret float [[TMP1]]
;
%retval = call float @powf(float %x, float 0.5)
ret float %retval
}
define double @test_simplify8(double %x) {
-; ANY-LABEL: @test_simplify8(
-; ANY-NEXT: [[SQRT:%.*]] = call double @sqrt(double [[X:%.*]]) [[NUW_RO]]
-; ANY-NEXT: [[ABS:%.*]] = call double @llvm.fabs.f64(double [[SQRT]])
-; ANY-NEXT: [[ISINF:%.*]] = fcmp oeq double [[X]], 0xFFF0000000000000
-; ANY-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], double 0x7FF0000000000000, double [[ABS]]
-; ANY-NEXT: ret double [[TMP1]]
+; CHECK-LABEL: @test_simplify8(
+; CHECK-NEXT: [[SQRT:%.*]] = call double @sqrt(double [[X:%.*]])
+; CHECK-NEXT: [[ABS:%.*]] = call double @llvm.fabs.f64(double [[SQRT]])
+; CHECK-NEXT: [[ISINF:%.*]] = fcmp oeq double [[X]], 0xFFF0000000000000
+; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], double 0x7FF0000000000000, double [[ABS]]
+; CHECK-NEXT: ret double [[TMP1]]
;
%retval = call double @pow(double %x, double 0.5)
ret double %retval
}
; Check pow(-infinity, 0.5) -> +infinity.
define float @test_simplify9(float %x) {
-; ANY-LABEL: @test_simplify9(
+; CHECK-LABEL: @test_simplify9(
; ANY-NEXT: ret float 0x7FF0000000000000
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float 0xFFF0000000000000, float 5.000000e-01)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float 0xFFF0000000000000, float 5.000000e-01)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: ret float 0x7FF0000000000000
+; VC83-NEXT: ret float 0x7FF0000000000000
;
%retval = call float @powf(float 0xFFF0000000000000, float 0.5)
ret float %retval
}
define double @test_simplify10(double %x) {
-; ANY-LABEL: @test_simplify10(
-; ANY-NEXT: ret double 0x7FF0000000000000
+; CHECK-LABEL: @test_simplify10(
+; CHECK-NEXT: ret double 0x7FF0000000000000
;
%retval = call double @pow(double 0xFFF0000000000000, double 0.5)
ret double %retval
}
; Check pow(x, 1.0) -> x.
define float @test_simplify11(float %x) {
-; ANY-LABEL: @test_simplify11(
+; CHECK-LABEL: @test_simplify11(
; ANY-NEXT: ret float [[X:%.*]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 1.000000e+00)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 1.000000e+00)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: ret float [[X:%.*]]
+; VC83-NEXT: ret float [[X:%.*]]
;
%retval = call float @powf(float %x, float 1.0)
ret float %retval
}
define <2 x float> @test_simplify11v(<2 x float> %x) {
-; ANY-LABEL: @test_simplify11v(
+; CHECK-LABEL: @test_simplify11v(
; ANY-NEXT: ret <2 x float> [[X:%.*]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> [[X:%.*]], <2 x float> <float 1.000000e+00, float 1.000000e+00>)
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
%retval = call <2 x float> @llvm.pow.v2f32(<2 x float> %x, <2 x float> <float 1.0, float 1.0>)
ret <2 x float> %retval
}
define double @test_simplify12(double %x) {
-; ANY-LABEL: @test_simplify12(
-; ANY-NEXT: ret double [[X:%.*]]
+; CHECK-LABEL: @test_simplify12(
+; CHECK-NEXT: ret double [[X:%.*]]
;
%retval = call double @pow(double %x, double 1.0)
ret double %retval
}
define <2 x double> @test_simplify12v(<2 x double> %x) {
-; ANY-LABEL: @test_simplify12v(
+; CHECK-LABEL: @test_simplify12v(
; ANY-NEXT: ret <2 x double> [[X:%.*]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> [[X:%.*]], <2 x double> <double 1.000000e+00, double 1.000000e+00>)
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%retval = call <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 1.0, double 1.0>)
ret <2 x double> %retval
}
; Check pow(x, 2.0) -> x*x.
define float @pow2_strict(float %x) {
-; ANY-LABEL: @pow2_strict(
+; CHECK-LABEL: @pow2_strict(
; ANY-NEXT: [[SQUARE:%.*]] = fmul float [[X:%.*]], [[X]]
; ANY-NEXT: ret float [[SQUARE]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 2.000000e+00)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float 2.000000e+00)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[SQUARE:%.*]] = fmul float [[X:%.*]], [[X]]
+; VC64-NEXT: ret float [[SQUARE]]
+; VC83-NEXT: [[SQUARE:%.*]] = fmul float [[X:%.*]], [[X]]
+; VC83-NEXT: ret float [[SQUARE]]
;
%r = call float @powf(float %x, float 2.0)
ret float %r
}
define <2 x float> @pow2_strictv(<2 x float> %x) {
-; ANY-LABEL: @pow2_strictv(
+; CHECK-LABEL: @pow2_strictv(
; ANY-NEXT: [[SQUARE:%.*]] = fmul <2 x float> [[X:%.*]], [[X]]
; ANY-NEXT: ret <2 x float> [[SQUARE]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> [[X:%.*]], <2 x float> <float 2.000000e+00, float 2.000000e+00>)
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
%r = call <2 x float> @llvm.pow.v2f32(<2 x float> %x, <2 x float> <float 2.0, float 2.0>)
ret <2 x float> %r
}
define double @pow2_double_strict(double %x) {
-; ANY-LABEL: @pow2_double_strict(
-; ANY-NEXT: [[SQUARE:%.*]] = fmul double [[X:%.*]], [[X]]
-; ANY-NEXT: ret double [[SQUARE]]
+; CHECK-LABEL: @pow2_double_strict(
+; CHECK-NEXT: [[SQUARE:%.*]] = fmul double [[X:%.*]], [[X]]
+; CHECK-NEXT: ret double [[SQUARE]]
;
%r = call double @pow(double %x, double 2.0)
ret double %r
}
define <2 x double> @pow2_double_strictv(<2 x double> %x) {
-; ANY-LABEL: @pow2_double_strictv(
+; CHECK-LABEL: @pow2_double_strictv(
; ANY-NEXT: [[SQUARE:%.*]] = fmul <2 x double> [[X:%.*]], [[X]]
; ANY-NEXT: ret <2 x double> [[SQUARE]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> [[X:%.*]], <2 x double> <double 2.000000e+00, double 2.000000e+00>)
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%r = call <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.0, double 2.0>)
ret <2 x double> %r
}
; Don't drop the FMF - PR35601 ( https://bugs.llvm.org/show_bug.cgi?id=35601 )
define float @pow2_fast(float %x) {
-; ANY-LABEL: @pow2_fast(
+; CHECK-LABEL: @pow2_fast(
; ANY-NEXT: [[SQUARE:%.*]] = fmul fast float [[X:%.*]], [[X]]
; ANY-NEXT: ret float [[SQUARE]]
+; VC32-NEXT: [[POW:%.*]] = call fast float @powf(float [[X:%.*]], float 2.000000e+00)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call fast float @powf(float [[X:%.*]], float 2.000000e+00)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[SQUARE:%.*]] = fmul fast float [[X:%.*]], [[X]]
+; VC64-NEXT: ret float [[SQUARE]]
+; VC83-NEXT: [[SQUARE:%.*]] = fmul fast float [[X:%.*]], [[X]]
+; VC83-NEXT: ret float [[SQUARE]]
;
%r = call fast float @powf(float %x, float 2.0)
ret float %r
}
; Check pow(x, -1.0) -> 1.0/x.
define float @pow_neg1_strict(float %x) {
-; ANY-LABEL: @pow_neg1_strict(
+; CHECK-LABEL: @pow_neg1_strict(
; ANY-NEXT: [[RECIPROCAL:%.*]] = fdiv float 1.000000e+00, [[X:%.*]]
; ANY-NEXT: ret float [[RECIPROCAL]]
+; VC32-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float -1.000000e+00)
+; VC32-NEXT: ret float [[POW]]
+; VC51-NEXT: [[POW:%.*]] = call float @powf(float [[X:%.*]], float -1.000000e+00)
+; VC51-NEXT: ret float [[POW]]
+; VC64-NEXT: [[RECIPROCAL:%.*]] = fdiv float 1.000000e+00, [[X:%.*]]
+; VC64-NEXT: ret float [[RECIPROCAL]]
+; VC83-NEXT: [[RECIPROCAL:%.*]] = fdiv float 1.000000e+00, [[X:%.*]]
+; VC83-NEXT: ret float [[RECIPROCAL]]
;
%r = call float @powf(float %x, float -1.0)
ret float %r
}
define <2 x float> @pow_neg1_strictv(<2 x float> %x) {
-; ANY-LABEL: @pow_neg1_strictv(
+; CHECK-LABEL: @pow_neg1_strictv(
; ANY-NEXT: [[RECIPROCAL:%.*]] = fdiv <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[X:%.*]]
; ANY-NEXT: ret <2 x float> [[RECIPROCAL]]
+; MSVC-NEXT: [[POW:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> [[X:%.*]], <2 x float> <float -1.000000e+00, float -1.000000e+00>)
+; MSVC-NEXT: ret <2 x float> [[POW]]
;
%r = call <2 x float> @llvm.pow.v2f32(<2 x float> %x, <2 x float> <float -1.0, float -1.0>)
ret <2 x float> %r
}
define double @pow_neg1_double_fast(double %x) {
-; ANY-LABEL: @pow_neg1_double_fast(
-; ANY-NEXT: [[RECIPROCAL:%.*]] = fdiv fast double 1.000000e+00, [[X:%.*]]
-; ANY-NEXT: ret double [[RECIPROCAL]]
+; CHECK-LABEL: @pow_neg1_double_fast(
+; CHECK-NEXT: [[RECIPROCAL:%.*]] = fdiv fast double 1.000000e+00, [[X:%.*]]
+; CHECK-NEXT: ret double [[RECIPROCAL]]
;
%r = call fast double @pow(double %x, double -1.0)
ret double %r
}
define <2 x double> @pow_neg1_double_fastv(<2 x double> %x) {
-; ANY-LABEL: @pow_neg1_double_fastv(
+; CHECK-LABEL: @pow_neg1_double_fastv(
; ANY-NEXT: [[RECIPROCAL:%.*]] = fdiv fast <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[X:%.*]]
; ANY-NEXT: ret <2 x double> [[RECIPROCAL]]
+; MSVC-NEXT: [[POW:%.*]] = call fast <2 x double> @llvm.pow.v2f64(<2 x double> [[X:%.*]], <2 x double> <double -1.000000e+00, double -1.000000e+00>)
+; MSVC-NEXT: ret <2 x double> [[POW]]
;
%r = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double -1.0, double -1.0>)
ret <2 x double> %r
}
define double @test_simplify17(double %x) {
-; ANY-LABEL: @test_simplify17(
-; ANY-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[X:%.*]])
-; ANY-NEXT: [[ABS:%.*]] = call double @llvm.fabs.f64(double [[SQRT]])
-; ANY-NEXT: [[ISINF:%.*]] = fcmp oeq double [[X]], 0xFFF0000000000000
-; ANY-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], double 0x7FF0000000000000, double [[ABS]]
-; ANY-NEXT: ret double [[TMP1]]
+; CHECK-LABEL: @test_simplify17(
+; CHECK-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[X:%.*]])
+; CHECK-NEXT: [[ABS:%.*]] = call double @llvm.fabs.f64(double [[SQRT]])
+; CHECK-NEXT: [[ISINF:%.*]] = fcmp oeq double [[X]], 0xFFF0000000000000
+; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[ISINF]], double 0x7FF0000000000000, double [[ABS]]
+; CHECK-NEXT: ret double [[TMP1]]
;
%retval = call double @llvm.pow.f64(double %x, double 0.5)
ret double %retval
}
; Check pow(10.0, x) -> __exp10(x) on OS X 10.9+ and iOS 7.0+.
define float @test_simplify18(float %x) {
-; CHECK-EXP10-LABEL: @test_simplify18(
-; CHECK-EXP10-NEXT: [[__EXP10F:%.*]] = call float @__exp10f(float [[X:%.*]]) [[NUW_RO]]
-; CHECK-EXP10-NEXT: ret float [[__EXP10F]]
+; CHECK-LABEL: @test_simplify18(
+; CHECK-EXP10-NEXT: [[__EXP10F:%.*]] = call float @__exp10f(float [[X:%.*]])
+; CHECK-EXP10-NEXT: ret float [[__EXP10F]]
+; CHECK-NO-EXP10-NEXT: [[RETVAL:%.*]] = call float @powf(float 1.000000e+01, float [[X:%.*]])
+; CHECK-NO-EXP10-NEXT: ret float [[RETVAL]]
;
-; CHECK-NO-EXP10-LABEL: @test_simplify18(
-; CHECK-NO-EXP10-NEXT: [[RETVAL:%.*]] = call float @powf(float 1.000000e+01, float [[X:%.*]])
-; CHECK-NO-EXP10-NEXT: ret float [[RETVAL]]
-;
%retval = call float @powf(float 10.0, float %x)
ret float %retval
}
define double @test_simplify19(double %x) {
-; CHECK-EXP10-LABEL: @test_simplify19(
-; CHECK-EXP10-NEXT: [[__EXP10:%.*]] = call double @__exp10(double [[X:%.*]]) [[NUW_RO]]
-; CHECK-EXP10-NEXT: ret double [[__EXP10]]
+; CHECK-LABEL: @test_simplify19(
+; CHECK-EXP10-NEXT: [[__EXP10:%.*]] = call double @__exp10(double [[X:%.*]])
+; CHECK-EXP10-NEXT: ret double [[__EXP10]]
+; CHECK-NO-EXP10-NEXT: [[RETVAL:%.*]] = call double @pow(double 1.000000e+01, double [[X:%.*]])
+; CHECK-NO-EXP10-NEXT: ret double [[RETVAL]]
;
-; CHECK-NO-EXP10-LABEL: @test_simplify19(
-; CHECK-NO-EXP10-NEXT: [[RETVAL:%.*]] = call double @pow(double 1.000000e+01, double [[X:%.*]])
-; CHECK-NO-EXP10-NEXT: ret double [[RETVAL]]
-;
%retval = call double @pow(double 10.0, double %x)
ret double %retval
}
-
-; CHECK-EXP10: attributes [[NUW_RO]] = { nounwind readonly }
Index: vendor/llvm/dist-release_80/test/Transforms/InstCombine/win-math.ll
===================================================================
--- vendor/llvm/dist-release_80/test/Transforms/InstCombine/win-math.ll (revision 344165)
+++ vendor/llvm/dist-release_80/test/Transforms/InstCombine/win-math.ll (revision 344166)
@@ -1,300 +1,335 @@
-; RUN: opt -O2 -S -mtriple=i386-pc-win32 < %s | FileCheck %s -check-prefix=WIN32
-; RUN: opt -O2 -S -mtriple=x86_64-pc-win32 < %s | FileCheck %s -check-prefix=WIN64
-; RUN: opt -O2 -S -mtriple=i386-pc-mingw32 < %s | FileCheck %s -check-prefix=MINGW32
-; RUN: opt -O2 -S -mtriple=x86_64-pc-mingw32 < %s | FileCheck %s -check-prefix=MINGW64
+; RUN: opt < %s -O2 -S -mtriple=i386-pc-windows-msvc18 | FileCheck %s --check-prefixes=CHECK,MSVCXX,MSVC32
+; RUN: opt < %s -O2 -S -mtriple=i386-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,MSVC19,MSVC51
+; RUN: opt < %s -O2 -S -mtriple=x86_64-pc-windows-msvc17 | FileCheck %s --check-prefixes=CHECK,MSVCXX,MSVC64
+; RUN: opt < %s -O2 -S -mtriple=x86_64-pc-win32 | FileCheck %s --check-prefixes=CHECK,MSVC19,MSVC83
+; RUN: opt < %s -O2 -S -mtriple=i386-pc-mingw32 | FileCheck %s --check-prefixes=CHECK,MINGW32
+; RUN: opt < %s -O2 -S -mtriple=x86_64-pc-mingw32 | FileCheck %s --check-prefixes=CHECK,MINGW64
; x86 win32 msvcrt does not provide entry points for single-precision libm.
-; x86-64 win32 msvcrt does (except for fabsf)
-; msvcrt does not provide C99 math, but mingw32 does.
+; x86-64 win32 msvcrt does, but with exceptions
+; msvcrt does not provide all of C99 math, but mingw32 does.
declare double @acos(double %x)
define float @float_acos(float %x) nounwind readnone {
-; WIN32-LABEL: @float_acos(
-; WIN32-NOT: float @acosf
-; WIN32: double @acos
+; CHECK-LABEL: @float_acos(
+; MSVCXX-NOT: float @acosf
+; MSVCXX: double @acos
+; MSVC19-NOT: float @acosf
+; MSVC19: double @acos
%1 = fpext float %x to double
%2 = call double @acos(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @asin(double %x)
define float @float_asin(float %x) nounwind readnone {
-; WIN32-LABEL: @float_asin(
-; WIN32-NOT: float @asinf
-; WIN32: double @asin
+; CHECK-LABEL: @float_asin(
+; MSVCXX-NOT: float @asinf
+; MSVCXX: double @asin
+; MSVC19-NOT: float @asinf
+; MSVC19: double @asin
%1 = fpext float %x to double
%2 = call double @asin(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @atan(double %x)
define float @float_atan(float %x) nounwind readnone {
-; WIN32-LABEL: @float_atan(
-; WIN32-NOT: float @atanf
-; WIN32: double @atan
+; CHECK-LABEL: @float_atan(
+; MSVCXX-NOT: float @atanf
+; MSVCXX: double @atan
+; MSVC19-NOT: float @atanf
+; MSVC19: double @atan
%1 = fpext float %x to double
%2 = call double @atan(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @atan2(double %x, double %y)
define float @float_atan2(float %x, float %y) nounwind readnone {
-; WIN32-LABEL: @float_atan2(
-; WIN32-NOT: float @atan2f
-; WIN32: double @atan2
+; CHECK-LABEL: @float_atan2(
+; MSVCXX-NOT: float @atan2f
+; MSVCXX: double @atan2
+; MSVC19-NOT: float @atan2f
+; MSVC19: double @atan2
%1 = fpext float %x to double
%2 = fpext float %y to double
%3 = call double @atan2(double %1, double %2)
%4 = fptrunc double %3 to float
ret float %4
}
declare double @ceil(double %x)
define float @float_ceil(float %x) nounwind readnone {
-; WIN32-LABEL: @float_ceil(
-; WIN32-NOT: float @ceilf
-; WIN32: float @llvm.ceil.f32
-; WIN64-LABEL: @float_ceil(
-; WIN64: float @llvm.ceil.f32
-; WIN64-NOT: double @ceil
-; MINGW32-LABEL: @float_ceil(
-; MINGW32: float @llvm.ceil.f32
+; CHECK-LABEL: @float_ceil(
+; MSVCXX-NOT: float @ceilf
+; MSVCXX: float @llvm.ceil.f32
+; MSVC19-NOT: double @ceil
+; MSVC19: float @llvm.ceil.f32
; MINGW32-NOT: double @ceil
-; MINGW64-LABEL: @float_ceil(
-; MINGW64: float @llvm.ceil.f32
+; MINGW32: float @llvm.ceil.f32
; MINGW64-NOT: double @ceil
+; MINGW64: float @llvm.ceil.f32
%1 = fpext float %x to double
%2 = call double @ceil(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @_copysign(double %x)
define float @float_copysign(float %x) nounwind readnone {
-; WIN32-LABEL: @float_copysign(
-; WIN32-NOT: float @copysignf
-; WIN32-NOT: float @_copysignf
-; WIN32: double @_copysign
+; CHECK-LABEL: @float_copysign(
+; MSVCXX-NOT: float @_copysignf
+; MSVCXX: double @_copysign
+; MSVC19-NOT: float @_copysignf
+; MSVC19: double @_copysign
%1 = fpext float %x to double
%2 = call double @_copysign(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @cos(double %x)
define float @float_cos(float %x) nounwind readnone {
-; WIN32-LABEL: @float_cos(
-; WIN32-NOT: float @cosf
-; WIN32: double @cos
+; CHECK-LABEL: @float_cos(
+; MSVCXX-NOT: float @cosf
+; MSVCXX: double @cos
+; MSVC19-NOT: float @cosf
+; MSVC19: double @cos
%1 = fpext float %x to double
%2 = call double @cos(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @cosh(double %x)
define float @float_cosh(float %x) nounwind readnone {
-; WIN32-LABEL: @float_cosh(
-; WIN32-NOT: float @coshf
-; WIN32: double @cosh
+; CHECK-LABEL: @float_cosh(
+; MSVCXX-NOT: float @coshf
+; MSVCXX: double @cosh
+; MSVC19-NOT: float @coshf
+; MSVC19: double @cosh
%1 = fpext float %x to double
%2 = call double @cosh(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @exp(double %x, double %y)
define float @float_exp(float %x, float %y) nounwind readnone {
-; WIN32-LABEL: @float_exp(
-; WIN32-NOT: float @expf
-; WIN32: double @exp
+; CHECK-LABEL: @float_exp(
+; MSVCXX-NOT: float @expf
+; MSVCXX: double @exp
+; MSVC19-NOT: float @expf
+; MSVC19: double @exp
%1 = fpext float %x to double
%2 = fpext float %y to double
%3 = call double @exp(double %1, double %2)
%4 = fptrunc double %3 to float
ret float %4
}
declare double @fabs(double %x, double %y)
define float @float_fabs(float %x, float %y) nounwind readnone {
-; WIN32-LABEL: @float_fabs(
-; WIN32-NOT: float @fabsf
-; WIN32: double @fabs
-; WIN64-LABEL: @float_fabs(
-; WIN64-NOT: float @fabsf
-; WIN64: double @fabs
+; CHECK-LABEL: @float_fabs(
+; MSVCXX-NOT: float @fabsf
+; MSVCXX: double @fabs
+; MSVC19-NOT: float @fabsf
+; MSVC19: double @fabs
%1 = fpext float %x to double
%2 = fpext float %y to double
%3 = call double @fabs(double %1, double %2)
%4 = fptrunc double %3 to float
ret float %4
}
declare double @floor(double %x)
define float @float_floor(float %x) nounwind readnone {
-; WIN32-LABEL: @float_floor(
-; WIN32-NOT: float @floorf
-; WIN32: float @llvm.floor.f32
-; WIN64-LABEL: @float_floor(
-; WIN64: float @llvm.floor.f32
-; WIN64-NOT: double @floor
-; MINGW32-LABEL: @float_floor(
-; MINGW32: float @llvm.floor.f32
+; CHECK-LABEL: @float_floor(
+; MSVCXX-NOT: float @floorf
+; MSVCXX: float @llvm.floor.f32
+; MSVC19-NOT: double @floor
+; MSVC19: float @llvm.floor.f32
; MINGW32-NOT: double @floor
-; MINGW64-LABEL: @float_floor(
-; MINGW64: float @llvm.floor.f32
+; MINGW32: float @llvm.floor.f32
; MINGW64-NOT: double @floor
+; MINGW64: float @llvm.floor.f32
%1 = fpext float %x to double
%2 = call double @floor(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @fmod(double %x, double %y)
define float @float_fmod(float %x, float %y) nounwind readnone {
-; WIN32-LABEL: @float_fmod(
-; WIN32-NOT: float @fmodf
-; WIN32: double @fmod
+; MSVCXX-LABEL: @float_fmod(
+; MSVCXX-NOT: float @fmodf
+; MSVCXX: double @fmod
+; MSVC19-NOT: float @fmodf
+; MSVC19: double @fmod
%1 = fpext float %x to double
%2 = fpext float %y to double
%3 = call double @fmod(double %1, double %2)
%4 = fptrunc double %3 to float
ret float %4
}
declare double @log(double %x)
define float @float_log(float %x) nounwind readnone {
-; WIN32-LABEL: @float_log(
-; WIN32-NOT: float @logf
-; WIN32: double @log
+; CHECK-LABEL: @float_log(
+; MSVCXX-NOT: float @logf
+; MSVCXX: double @log
+; MSVC19-NOT: float @logf
+; MSVC19: double @log
%1 = fpext float %x to double
%2 = call double @log(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
+declare double @logb(double %x)
+define float @float_logb(float %x) nounwind readnone {
+; CHECK-LABEL: @float_logb(
+; MSVCXX-NOT: float @logbf
+; MSVCXX: double @logb
+; MSVC19-NOT: float @logbf
+; MSVC19: double @logb
+ %1 = fpext float %x to double
+ %2 = call double @logb(double %1)
+ %3 = fptrunc double %2 to float
+ ret float %3
+}
+
declare double @pow(double %x, double %y)
define float @float_pow(float %x, float %y) nounwind readnone {
-; WIN32-LABEL: @float_pow(
-; WIN32-NOT: float @powf
-; WIN32: double @pow
+; CHECK-LABEL: @float_pow(
+; MSVCXX-NOT: float @powf
+; MSVCXX: double @pow
+; MSVC19-NOT: float @powf
+; MSVC19: double @pow
%1 = fpext float %x to double
%2 = fpext float %y to double
%3 = call double @pow(double %1, double %2)
%4 = fptrunc double %3 to float
ret float %4
}
declare double @sin(double %x)
define float @float_sin(float %x) nounwind readnone {
-; WIN32-LABEL: @float_sin(
-; WIN32-NOT: float @sinf
-; WIN32: double @sin
+; CHECK-LABEL: @float_sin(
+; MSVCXX-NOT: float @sinf
+; MSVCXX: double @sin
+; MSVC19-NOT: float @sinf
+; MSVC19: double @sin
%1 = fpext float %x to double
%2 = call double @sin(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @sinh(double %x)
define float @float_sinh(float %x) nounwind readnone {
-; WIN32-LABEL: @float_sinh(
-; WIN32-NOT: float @sinhf
-; WIN32: double @sinh
+; CHECK-LABEL: @float_sinh(
+; MSVCXX-NOT: float @sinhf
+; MSVCXX: double @sinh
+; MSVC19-NOT: float @sinhf
+; MSVC19: double @sinh
%1 = fpext float %x to double
%2 = call double @sinh(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @sqrt(double %x)
define float @float_sqrt(float %x) nounwind readnone {
-; WIN32-LABEL: @float_sqrt(
-; WIN32-NOT: float @sqrtf
-; WIN32: double @sqrt
-; WIN64-LABEL: @float_sqrt(
-; WIN64: float @sqrtf
-; WIN64-NOT: double @sqrt
-; MINGW32-LABEL: @float_sqrt(
-; MINGW32: float @sqrtf
+; CHECK-LABEL: @float_sqrt(
+; MSVC32-NOT: float @sqrtf
+; MSVC32: double @sqrt
+; MSVC51-NOT: float @sqrtf
+; MSVC51: double @sqrt
+; MSVC64-NOT: double @sqrt
+; MSVC64: float @sqrtf
+; MSVC83-NOT: double @sqrt
+; MSVC83: float @sqrtf
; MINGW32-NOT: double @sqrt
-; MINGW64-LABEL: @float_sqrt(
-; MINGW64: float @sqrtf
+; MINGW32: float @sqrtf
; MINGW64-NOT: double @sqrt
+; MINGW64: float @sqrtf
%1 = fpext float %x to double
%2 = call double @sqrt(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @tan(double %x)
define float @float_tan(float %x) nounwind readnone {
-; WIN32-LABEL: @float_tan(
-; WIN32-NOT: float @tanf
-; WIN32: double @tan
+; CHECK-LABEL: @float_tan(
+; MSVCXX-NOT: float @tanf
+; MSVCXX: double @tan
+; MSVC19-NOT: float @tanf
+; MSVC19: double @tan
%1 = fpext float %x to double
%2 = call double @tan(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare double @tanh(double %x)
define float @float_tanh(float %x) nounwind readnone {
-; WIN32-LABEL: @float_tanh(
-; WIN32-NOT: float @tanhf
-; WIN32: double @tanh
+; CHECK-LABEL: @float_tanh(
+; MSVCXX-NOT: float @tanhf
+; MSVCXX: double @tanh
+; MSVC19-NOT: float @tanhf
+; MSVC19: double @tanh
%1 = fpext float %x to double
%2 = call double @tanh(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
-; win32 does not have round; mingw32 does
+; win32 does not have roundf; mingw32 does
declare double @round(double %x)
define float @float_round(float %x) nounwind readnone {
-; WIN32-LABEL: @float_round(
-; WIN32-NOT: float @roundf
-; WIN32: double @round
-; WIN64-LABEL: @float_round(
-; WIN64-NOT: float @roundf
-; WIN64: double @round
-; MINGW32-LABEL: @float_round(
-; MINGW32: float @llvm.round.f32
+; CHECK-LABEL: @float_round(
+; MSVCXX-NOT: double @roundf
+; MSVCXX: double @round
+; MSVC19-NOT: double @round
+; MSVC19: float @llvm.round.f32
; MINGW32-NOT: double @round
-; MINGW64-LABEL: @float_round(
-; MINGW64: float @llvm.round.f32
+; MINGW32: float @llvm.round.f32
; MINGW64-NOT: double @round
+; MINGW64: float @llvm.round.f32
%1 = fpext float %x to double
%2 = call double @round(double %1)
%3 = fptrunc double %2 to float
ret float %3
}
declare float @powf(float, float)
-; win32 lacks sqrtf&fabsf, win64 lacks fabsf, but
+; win32 lacks sqrtf & fabsf, win64 lacks fabsf, but
; calls to the intrinsics can be emitted instead.
define float @float_powsqrt(float %x) nounwind readnone {
-; WIN32-LABEL: @float_powsqrt(
-; WIN32-NOT: float @sqrtf
-; WIN32: float @powf
-
-; WIN64-LABEL: @float_powsqrt(
-; WIN64: float @sqrtf
-; WIN64: float @llvm.fabs.f32(
-; WIN64-NOT: float @powf
-
-; MINGW32-LABEL: @float_powsqrt(
+; CHECK-LABEL: @float_powsqrt(
+; MSVC32-NOT: float @sqrtf
+; MSVC32: float @powf
+; MSVC51-NOT: float @sqrtf
+; MSVC51: float @powf
+; MSVC64-NOT: float @powf
+; MSVC64: float @sqrtf
+; MSVC64: float @llvm.fabs.f32(
+; MSVC83-NOT: float @powf
+; MSVC83: float @sqrtf
+; MSVC83: float @llvm.fabs.f32(
+; MINGW32-NOT: float @powf
; MINGW32: float @sqrtf
; MINGW32: float @llvm.fabs.f32
-; MINGW32-NOT: float @powf
-; MINGW64-LABEL: @float_powsqrt(
+; MINGW64-NOT: float @powf
; MINGW64: float @sqrtf
; MINGW64: float @llvm.fabs.f32(
-; MINGW64-NOT: float @powf
%1 = call float @powf(float %x, float 0.5)
ret float %1
}
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Atom/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Atom/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Atom/resources-x87.s (revision 344166)
@@ -1,515 +1,515 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=atom -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 99 49.50 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 5 5.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 5 5.00 U fadd %st(2)
+# CHECK-NEXT: 1 5 5.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U fadd %st(2), %st
# CHECK-NEXT: 1 5 5.00 * U fadds (%ecx)
# CHECK-NEXT: 1 5 5.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 5 5.00 U faddp %st(1)
-# CHECK-NEXT: 1 5 5.00 U faddp %st(2)
+# CHECK-NEXT: 1 5 5.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U faddp %st, %st(2)
# CHECK-NEXT: 1 5 5.00 * U fiadds (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 1 25 12.50 U fnclex
-# CHECK-NEXT: 1 9 4.50 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 9 4.50 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 9 4.50 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmove %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 9 4.50 U fcmovu %st(1), %st
# CHECK-NEXT: 1 5 5.00 U fcom %st(1)
# CHECK-NEXT: 1 5 5.00 U fcom %st(3)
# CHECK-NEXT: 1 5 5.00 U fcoms (%ecx)
# CHECK-NEXT: 1 5 5.00 U fcoml (%eax)
# CHECK-NEXT: 1 5 5.00 U fcomp %st(1)
# CHECK-NEXT: 1 5 5.00 U fcomp %st(3)
# CHECK-NEXT: 1 5 5.00 U fcomps (%ecx)
# CHECK-NEXT: 1 5 5.00 U fcompl (%eax)
# CHECK-NEXT: 1 1 1.00 U fcompp
-# CHECK-NEXT: 1 9 4.50 U fcomi %st(3)
-# CHECK-NEXT: 1 9 4.50 U fcompi %st(3)
+# CHECK-NEXT: 1 9 4.50 U fcomi %st(3), %st
+# CHECK-NEXT: 1 9 4.50 U fcompi %st(3), %st
# CHECK-NEXT: 1 174 87.00 U fcos
# CHECK-NEXT: 1 1 0.50 U fdecstp
-# CHECK-NEXT: 1 34 17.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 34 17.00 U fdiv %st(2)
+# CHECK-NEXT: 1 34 17.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 34 17.00 U fdiv %st(2), %st
# CHECK-NEXT: 1 34 17.00 * U fdivs (%ecx)
# CHECK-NEXT: 1 34 17.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 34 17.00 U fdivp %st(1)
-# CHECK-NEXT: 1 34 17.00 U fdivp %st(2)
+# CHECK-NEXT: 1 34 17.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 34 17.00 U fdivp %st, %st(2)
# CHECK-NEXT: 1 34 17.00 * U fidivs (%ecx)
# CHECK-NEXT: 1 34 17.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 34 17.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 34 17.00 U fdivr %st(2)
+# CHECK-NEXT: 1 34 17.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 34 17.00 U fdivr %st(2), %st
# CHECK-NEXT: 1 34 17.00 * U fdivrs (%ecx)
# CHECK-NEXT: 1 34 17.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 34 17.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 34 17.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 34 17.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 34 17.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 1 34 17.00 * U fidivrs (%ecx)
# CHECK-NEXT: 1 34 17.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 1 0.50 U ffree %st(0)
# CHECK-NEXT: 1 5 5.00 U ficoms (%ecx)
# CHECK-NEXT: 1 5 5.00 U ficoml (%eax)
# CHECK-NEXT: 1 5 5.00 U ficomps (%ecx)
# CHECK-NEXT: 1 5 5.00 U ficompl (%eax)
# CHECK-NEXT: 1 5 5.00 * U filds (%edx)
# CHECK-NEXT: 1 5 5.00 * U fildl (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 0.50 U fincstp
# CHECK-NEXT: 1 63 31.50 U fninit
# CHECK-NEXT: 1 6 3.00 * U fists (%edx)
# CHECK-NEXT: 1 6 3.00 * U fistl (%ecx)
# CHECK-NEXT: 1 6 3.00 * U fistps (%edx)
# CHECK-NEXT: 1 6 3.00 * U fistpl (%ecx)
# CHECK-NEXT: 1 6 3.00 * U fistpll (%eax)
# CHECK-NEXT: 1 2 1.00 * U fisttps (%edx)
# CHECK-NEXT: 1 2 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 1 2 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 1.00 U fld %st(0)
# CHECK-NEXT: 1 1 1.00 * U flds (%edx)
# CHECK-NEXT: 1 1 1.00 * U fldl (%ecx)
# CHECK-NEXT: 1 4 2.00 * U fldt (%eax)
# CHECK-NEXT: 1 5 2.50 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fldenv (%eax)
# CHECK-NEXT: 1 6 3.00 U fld1
# CHECK-NEXT: 1 10 5.00 U fldl2e
# CHECK-NEXT: 1 10 5.00 U fldl2t
# CHECK-NEXT: 1 10 5.00 U fldlg2
# CHECK-NEXT: 1 10 5.00 U fldln2
# CHECK-NEXT: 1 10 5.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 4 4.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 4 4.00 U fmul %st(2)
+# CHECK-NEXT: 1 4 4.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 4 4.00 U fmul %st(2), %st
# CHECK-NEXT: 1 4 4.00 * U fmuls (%ecx)
# CHECK-NEXT: 1 4 4.00 * U fmull (%eax)
-# CHECK-NEXT: 1 4 4.00 U fmulp %st(1)
-# CHECK-NEXT: 1 4 4.00 U fmulp %st(2)
+# CHECK-NEXT: 1 4 4.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 4 4.00 U fmulp %st, %st(2)
# CHECK-NEXT: 1 4 4.00 * U fimuls (%ecx)
# CHECK-NEXT: 1 4 4.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 183 91.50 U fpatan
# CHECK-NEXT: 1 55 27.50 U fprem
# CHECK-NEXT: 1 71 35.50 U fprem1
# CHECK-NEXT: 1 168 84.00 U fptan
# CHECK-NEXT: 1 46 23.00 U frndint
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
# CHECK-NEXT: 1 77 38.50 U fscale
# CHECK-NEXT: 1 174 87.00 U fsin
# CHECK-NEXT: 1 174 87.00 U fsincos
# CHECK-NEXT: 1 71 35.50 U fsqrt
# CHECK-NEXT: 1 2 1.00 U fst %st(0)
# CHECK-NEXT: 1 2 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 2 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 2 1.00 U fstp %st(0)
# CHECK-NEXT: 1 2 1.00 * U fstpl (%edx)
# CHECK-NEXT: 1 2 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 1 5 2.50 * U fstpt (%eax)
# CHECK-NEXT: 1 8 4.00 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstenv (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 1 0.50 U wait
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
-# CHECK-NEXT: 1 5 5.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 5 5.00 U fsub %st(2)
+# CHECK-NEXT: 1 5 5.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U fsub %st(2), %st
# CHECK-NEXT: 1 5 5.00 * U fsubs (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 5 5.00 U fsubp %st(1)
-# CHECK-NEXT: 1 5 5.00 U fsubp %st(2)
+# CHECK-NEXT: 1 5 5.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U fsubp %st, %st(2)
# CHECK-NEXT: 1 5 5.00 * U fisubs (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 5 5.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 5 5.00 U fsubr %st(2)
+# CHECK-NEXT: 1 5 5.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U fsubr %st(2), %st
# CHECK-NEXT: 1 5 5.00 * U fsubrs (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 5 5.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 5 5.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 5 5.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 5 5.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 1 5 5.00 * U fisubrs (%ecx)
# CHECK-NEXT: 1 5 5.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 9 4.50 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 1 1.00 U fucompp
-# CHECK-NEXT: 1 9 4.50 U fucomi %st(3)
-# CHECK-NEXT: 1 9 4.50 U fucompi %st(3)
+# CHECK-NEXT: 1 9 4.50 U fucomi %st(3), %st
+# CHECK-NEXT: 1 9 4.50 U fucompi %st(3), %st
# CHECK-NEXT: 1 1 0.50 U wait
# CHECK-NEXT: 1 1 1.00 U fxam
# CHECK-NEXT: 1 1 1.00 U fxch %st(1)
# CHECK-NEXT: 1 1 1.00 U fxch %st(3)
# CHECK-NEXT: 1 141 70.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 140 70.00 * * U fxsave (%eax)
# CHECK-NEXT: 1 25 12.50 U fxtract
# CHECK-NEXT: 1 146 73.00 U fyl2x
# CHECK-NEXT: 1 147 73.50 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - AtomPort0
# CHECK-NEXT: [1] - AtomPort1
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1]
# CHECK-NEXT: 1624.00 1416.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] Instructions:
# CHECK-NEXT: 49.50 49.50 f2xm1
# CHECK-NEXT: - 1.00 fabs
-# CHECK-NEXT: 5.00 - fadd %st(0), %st(1)
-# CHECK-NEXT: 5.00 - fadd %st(2)
+# CHECK-NEXT: 5.00 - fadd %st, %st(1)
+# CHECK-NEXT: 5.00 - fadd %st(2), %st
# CHECK-NEXT: 5.00 - fadds (%ecx)
# CHECK-NEXT: 5.00 - faddl (%ecx)
-# CHECK-NEXT: 5.00 - faddp %st(1)
-# CHECK-NEXT: 5.00 - faddp %st(2)
+# CHECK-NEXT: 5.00 - faddp %st, %st(1)
+# CHECK-NEXT: 5.00 - faddp %st, %st(2)
# CHECK-NEXT: 5.00 - fiadds (%ecx)
# CHECK-NEXT: 5.00 - fiaddl (%ecx)
# CHECK-NEXT: 0.50 0.50 fbld (%ecx)
# CHECK-NEXT: 0.50 0.50 fbstp (%eax)
# CHECK-NEXT: - 1.00 fchs
# CHECK-NEXT: 12.50 12.50 fnclex
-# CHECK-NEXT: 4.50 4.50 fcmovb %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmove %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovne %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 4.50 4.50 fcmovu %st(1), %st(0)
+# CHECK-NEXT: 4.50 4.50 fcmovb %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovbe %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmove %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovnb %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovnbe %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovne %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovnu %st(1), %st
+# CHECK-NEXT: 4.50 4.50 fcmovu %st(1), %st
# CHECK-NEXT: 5.00 - fcom %st(1)
# CHECK-NEXT: 5.00 - fcom %st(3)
# CHECK-NEXT: 5.00 - fcoms (%ecx)
# CHECK-NEXT: 5.00 - fcoml (%eax)
# CHECK-NEXT: 5.00 - fcomp %st(1)
# CHECK-NEXT: 5.00 - fcomp %st(3)
# CHECK-NEXT: 5.00 - fcomps (%ecx)
# CHECK-NEXT: 5.00 - fcompl (%eax)
# CHECK-NEXT: - 1.00 fcompp
-# CHECK-NEXT: 4.50 4.50 fcomi %st(3)
-# CHECK-NEXT: 4.50 4.50 fcompi %st(3)
+# CHECK-NEXT: 4.50 4.50 fcomi %st(3), %st
+# CHECK-NEXT: 4.50 4.50 fcompi %st(3), %st
# CHECK-NEXT: 87.00 87.00 fcos
# CHECK-NEXT: 0.50 0.50 fdecstp
-# CHECK-NEXT: 17.00 17.00 fdiv %st(0), %st(1)
-# CHECK-NEXT: 17.00 17.00 fdiv %st(2)
+# CHECK-NEXT: 17.00 17.00 fdiv %st, %st(1)
+# CHECK-NEXT: 17.00 17.00 fdiv %st(2), %st
# CHECK-NEXT: 17.00 17.00 fdivs (%ecx)
# CHECK-NEXT: 17.00 17.00 fdivl (%eax)
-# CHECK-NEXT: 17.00 17.00 fdivp %st(1)
-# CHECK-NEXT: 17.00 17.00 fdivp %st(2)
+# CHECK-NEXT: 17.00 17.00 fdivp %st, %st(1)
+# CHECK-NEXT: 17.00 17.00 fdivp %st, %st(2)
# CHECK-NEXT: 17.00 17.00 fidivs (%ecx)
# CHECK-NEXT: 17.00 17.00 fidivl (%eax)
-# CHECK-NEXT: 17.00 17.00 fdivr %st(0), %st(1)
-# CHECK-NEXT: 17.00 17.00 fdivr %st(2)
+# CHECK-NEXT: 17.00 17.00 fdivr %st, %st(1)
+# CHECK-NEXT: 17.00 17.00 fdivr %st(2), %st
# CHECK-NEXT: 17.00 17.00 fdivrs (%ecx)
# CHECK-NEXT: 17.00 17.00 fdivrl (%eax)
-# CHECK-NEXT: 17.00 17.00 fdivrp %st(1)
-# CHECK-NEXT: 17.00 17.00 fdivrp %st(2)
+# CHECK-NEXT: 17.00 17.00 fdivrp %st, %st(1)
+# CHECK-NEXT: 17.00 17.00 fdivrp %st, %st(2)
# CHECK-NEXT: 17.00 17.00 fidivrs (%ecx)
# CHECK-NEXT: 17.00 17.00 fidivrl (%eax)
# CHECK-NEXT: 0.50 0.50 ffree %st(0)
# CHECK-NEXT: 5.00 - ficoms (%ecx)
# CHECK-NEXT: 5.00 - ficoml (%eax)
# CHECK-NEXT: 5.00 - ficomps (%ecx)
# CHECK-NEXT: 5.00 - ficompl (%eax)
# CHECK-NEXT: 5.00 5.00 filds (%edx)
# CHECK-NEXT: 5.00 5.00 fildl (%ecx)
# CHECK-NEXT: 5.00 5.00 fildll (%eax)
# CHECK-NEXT: 0.50 0.50 fincstp
# CHECK-NEXT: 31.50 31.50 fninit
# CHECK-NEXT: 3.00 3.00 fists (%edx)
# CHECK-NEXT: 3.00 3.00 fistl (%ecx)
# CHECK-NEXT: 3.00 3.00 fistps (%edx)
# CHECK-NEXT: 3.00 3.00 fistpl (%ecx)
# CHECK-NEXT: 3.00 3.00 fistpll (%eax)
# CHECK-NEXT: 1.00 1.00 fisttps (%edx)
# CHECK-NEXT: 1.00 1.00 fisttpl (%ecx)
# CHECK-NEXT: 1.00 1.00 fisttpll (%eax)
# CHECK-NEXT: 1.00 - fld %st(0)
# CHECK-NEXT: 1.00 - flds (%edx)
# CHECK-NEXT: 1.00 - fldl (%ecx)
# CHECK-NEXT: 2.00 2.00 fldt (%eax)
# CHECK-NEXT: 2.50 2.50 fldcw (%eax)
# CHECK-NEXT: 0.50 0.50 fldenv (%eax)
# CHECK-NEXT: 3.00 3.00 fld1
# CHECK-NEXT: 5.00 5.00 fldl2e
# CHECK-NEXT: 5.00 5.00 fldl2t
# CHECK-NEXT: 5.00 5.00 fldlg2
# CHECK-NEXT: 5.00 5.00 fldln2
# CHECK-NEXT: 5.00 5.00 fldpi
# CHECK-NEXT: 0.50 0.50 fldz
-# CHECK-NEXT: 4.00 - fmul %st(0), %st(1)
-# CHECK-NEXT: 4.00 - fmul %st(2)
+# CHECK-NEXT: 4.00 - fmul %st, %st(1)
+# CHECK-NEXT: 4.00 - fmul %st(2), %st
# CHECK-NEXT: 4.00 - fmuls (%ecx)
# CHECK-NEXT: 4.00 - fmull (%eax)
-# CHECK-NEXT: 4.00 - fmulp %st(1)
-# CHECK-NEXT: 4.00 - fmulp %st(2)
+# CHECK-NEXT: 4.00 - fmulp %st, %st(1)
+# CHECK-NEXT: 4.00 - fmulp %st, %st(2)
# CHECK-NEXT: 4.00 - fimuls (%ecx)
# CHECK-NEXT: 4.00 - fimull (%eax)
# CHECK-NEXT: 0.50 0.50 fnop
# CHECK-NEXT: 91.50 91.50 fpatan
# CHECK-NEXT: 27.50 27.50 fprem
# CHECK-NEXT: 35.50 35.50 fprem1
# CHECK-NEXT: 84.00 84.00 fptan
# CHECK-NEXT: 23.00 23.00 frndint
# CHECK-NEXT: 0.50 0.50 frstor (%eax)
# CHECK-NEXT: 0.50 0.50 fnsave (%eax)
# CHECK-NEXT: 38.50 38.50 fscale
# CHECK-NEXT: 87.00 87.00 fsin
# CHECK-NEXT: 87.00 87.00 fsincos
# CHECK-NEXT: 35.50 35.50 fsqrt
# CHECK-NEXT: 1.00 1.00 fst %st(0)
# CHECK-NEXT: 1.00 1.00 fsts (%edx)
# CHECK-NEXT: 1.00 1.00 fstl (%ecx)
# CHECK-NEXT: 1.00 1.00 fstp %st(0)
# CHECK-NEXT: 1.00 1.00 fstpl (%edx)
# CHECK-NEXT: 1.00 1.00 fstpl (%ecx)
# CHECK-NEXT: 2.50 2.50 fstpt (%eax)
# CHECK-NEXT: 4.00 4.00 fnstcw (%eax)
# CHECK-NEXT: 0.50 0.50 fnstenv (%eax)
# CHECK-NEXT: 0.50 0.50 fnstsw (%eax)
# CHECK-NEXT: 0.50 0.50 frstor (%eax)
# CHECK-NEXT: 0.50 0.50 wait
# CHECK-NEXT: 0.50 0.50 fnsave (%eax)
-# CHECK-NEXT: 5.00 - fsub %st(0), %st(1)
-# CHECK-NEXT: 5.00 - fsub %st(2)
+# CHECK-NEXT: 5.00 - fsub %st, %st(1)
+# CHECK-NEXT: 5.00 - fsub %st(2), %st
# CHECK-NEXT: 5.00 - fsubs (%ecx)
# CHECK-NEXT: 5.00 - fsubl (%eax)
-# CHECK-NEXT: 5.00 - fsubp %st(1)
-# CHECK-NEXT: 5.00 - fsubp %st(2)
+# CHECK-NEXT: 5.00 - fsubp %st, %st(1)
+# CHECK-NEXT: 5.00 - fsubp %st, %st(2)
# CHECK-NEXT: 5.00 - fisubs (%ecx)
# CHECK-NEXT: 5.00 - fisubl (%eax)
-# CHECK-NEXT: 5.00 - fsubr %st(0), %st(1)
-# CHECK-NEXT: 5.00 - fsubr %st(2)
+# CHECK-NEXT: 5.00 - fsubr %st, %st(1)
+# CHECK-NEXT: 5.00 - fsubr %st(2), %st
# CHECK-NEXT: 5.00 - fsubrs (%ecx)
# CHECK-NEXT: 5.00 - fsubrl (%eax)
-# CHECK-NEXT: 5.00 - fsubrp %st(1)
-# CHECK-NEXT: 5.00 - fsubrp %st(2)
+# CHECK-NEXT: 5.00 - fsubrp %st, %st(1)
+# CHECK-NEXT: 5.00 - fsubrp %st, %st(2)
# CHECK-NEXT: 5.00 - fisubrs (%ecx)
# CHECK-NEXT: 5.00 - fisubrl (%eax)
# CHECK-NEXT: 4.50 4.50 ftst
# CHECK-NEXT: - 1.00 fucom %st(1)
# CHECK-NEXT: - 1.00 fucom %st(3)
# CHECK-NEXT: - 1.00 fucomp %st(1)
# CHECK-NEXT: - 1.00 fucomp %st(3)
# CHECK-NEXT: - 1.00 fucompp
-# CHECK-NEXT: 4.50 4.50 fucomi %st(3)
-# CHECK-NEXT: 4.50 4.50 fucompi %st(3)
+# CHECK-NEXT: 4.50 4.50 fucomi %st(3), %st
+# CHECK-NEXT: 4.50 4.50 fucompi %st(3), %st
# CHECK-NEXT: 0.50 0.50 wait
# CHECK-NEXT: 1.00 - fxam
# CHECK-NEXT: 1.00 1.00 fxch %st(1)
# CHECK-NEXT: 1.00 1.00 fxch %st(3)
# CHECK-NEXT: 70.50 70.50 fxrstor (%eax)
# CHECK-NEXT: 70.00 70.00 fxsave (%eax)
# CHECK-NEXT: 12.50 12.50 fxtract
# CHECK-NEXT: 73.00 73.00 fyl2x
# CHECK-NEXT: 73.50 73.50 fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BdVer2/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BdVer2/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BdVer2/resources-x87.s (revision 344166)
@@ -1,536 +1,536 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.50 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 5 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 5 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fadd %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 1 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 5 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 5 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 5 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fiadds (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 1 100 0.50 U fnclex
-# CHECK-NEXT: 1 1 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 1 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 1 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 1 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 2 1 1.00 U fcom %st(1)
# CHECK-NEXT: 2 1 1.00 U fcom %st(3)
# CHECK-NEXT: 1 6 1.00 U fcoms (%ecx)
# CHECK-NEXT: 1 6 1.00 U fcoml (%eax)
# CHECK-NEXT: 2 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 2 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 1 6 1.00 U fcomps (%ecx)
# CHECK-NEXT: 1 6 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.50 U fcompp
-# CHECK-NEXT: 2 1 1.00 U fcomi %st(3)
-# CHECK-NEXT: 2 1 1.00 U fcompi %st(3)
+# CHECK-NEXT: 2 1 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 2 1 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.50 U fcos
# CHECK-NEXT: 1 100 0.50 U fdecstp
-# CHECK-NEXT: 1 9 9.50 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 9 9.50 U fdiv %st(2)
+# CHECK-NEXT: 1 9 9.50 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 9 9.50 U fdiv %st(2), %st
# CHECK-NEXT: 1 14 9.50 * U fdivs (%ecx)
# CHECK-NEXT: 1 14 9.50 * U fdivl (%eax)
-# CHECK-NEXT: 1 9 9.50 U fdivp %st(1)
-# CHECK-NEXT: 1 9 9.50 U fdivp %st(2)
+# CHECK-NEXT: 1 9 9.50 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 9 9.50 U fdivp %st, %st(2)
# CHECK-NEXT: 1 14 9.50 * U fidivs (%ecx)
# CHECK-NEXT: 1 14 9.50 * U fidivl (%eax)
-# CHECK-NEXT: 1 9 9.50 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 9 9.50 U fdivr %st(2)
+# CHECK-NEXT: 1 9 9.50 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 9 9.50 U fdivr %st(2), %st
# CHECK-NEXT: 1 14 9.50 * U fdivrs (%ecx)
# CHECK-NEXT: 1 14 9.50 * U fdivrl (%eax)
-# CHECK-NEXT: 1 9 9.50 U fdivrp %st(1)
-# CHECK-NEXT: 1 9 9.50 U fdivrp %st(2)
+# CHECK-NEXT: 1 9 9.50 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 9 9.50 U fdivrp %st, %st(2)
# CHECK-NEXT: 1 14 9.50 * U fidivrs (%ecx)
# CHECK-NEXT: 1 14 9.50 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 0.50 U ffree %st(0)
# CHECK-NEXT: 2 6 1.00 U ficoms (%ecx)
# CHECK-NEXT: 2 6 1.00 U ficoml (%eax)
# CHECK-NEXT: 2 6 1.00 U ficomps (%ecx)
# CHECK-NEXT: 2 6 1.00 U ficompl (%eax)
# CHECK-NEXT: 1 5 0.50 * U filds (%edx)
# CHECK-NEXT: 1 5 0.50 * U fildl (%ecx)
# CHECK-NEXT: 1 5 0.50 * U fildll (%eax)
# CHECK-NEXT: 1 100 0.50 U fincstp
# CHECK-NEXT: 1 100 0.50 U fninit
# CHECK-NEXT: 1 1 1.00 * U fists (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistpll (%eax)
# CHECK-NEXT: 1 1 1.00 * U fisttps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.50 U fld %st(0)
# CHECK-NEXT: 1 5 0.50 * U flds (%edx)
# CHECK-NEXT: 1 5 0.50 * U fldl (%ecx)
# CHECK-NEXT: 1 5 0.50 * U fldt (%eax)
# CHECK-NEXT: 1 5 0.50 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fldenv (%eax)
# CHECK-NEXT: 1 3 1.00 U fld1
# CHECK-NEXT: 1 3 1.00 U fldl2e
# CHECK-NEXT: 1 3 1.00 U fldl2t
# CHECK-NEXT: 1 3 1.00 U fldlg2
# CHECK-NEXT: 1 3 1.00 U fldln2
# CHECK-NEXT: 1 3 1.00 U fldpi
# CHECK-NEXT: 1 3 1.00 U fldz
-# CHECK-NEXT: 1 5 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmul %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.50 U fpatan
# CHECK-NEXT: 1 100 0.50 U fprem
# CHECK-NEXT: 1 100 0.50 U fprem1
# CHECK-NEXT: 1 100 0.50 U fptan
# CHECK-NEXT: 1 100 0.50 U frndint
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.50 U fscale
# CHECK-NEXT: 1 100 0.50 U fsin
# CHECK-NEXT: 1 100 0.50 U fsincos
# CHECK-NEXT: 1 1 17.50 U fsqrt
# CHECK-NEXT: 1 1 0.50 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.50 U fstp %st(0)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 1 1 0.50 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstenv (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 100 0.50 U wait
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
-# CHECK-NEXT: 1 5 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 5 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fsub %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 5 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fisubs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 5 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 5 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 5 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fisubrs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 1 1.00 U ftst
# CHECK-NEXT: 2 1 1.00 U fucom %st(1)
# CHECK-NEXT: 2 1 1.00 U fucom %st(3)
# CHECK-NEXT: 2 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 2 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 1 1.00 U fucompp
-# CHECK-NEXT: 2 1 1.00 U fucomi %st(3)
-# CHECK-NEXT: 2 1 1.00 U fucompi %st(3)
+# CHECK-NEXT: 2 1 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 2 1 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 1 100 0.50 U wait
# CHECK-NEXT: 1 100 0.50 U fxam
# CHECK-NEXT: 1 1 0.50 U fxch %st(1)
# CHECK-NEXT: 1 1 0.50 U fxch %st(3)
# CHECK-NEXT: 1 100 0.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.50 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.50 U fxtract
# CHECK-NEXT: 1 100 0.50 U fyl2x
# CHECK-NEXT: 1 100 0.50 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0.0] - PdAGLU01
# CHECK-NEXT: [0.1] - PdAGLU01
# CHECK-NEXT: [1] - PdBranch
# CHECK-NEXT: [2] - PdCount
# CHECK-NEXT: [3] - PdDiv
# CHECK-NEXT: [4] - PdEX0
# CHECK-NEXT: [5] - PdEX1
# CHECK-NEXT: [6] - PdFPCVT
# CHECK-NEXT: [7.0] - PdFPFMA
# CHECK-NEXT: [7.1] - PdFPFMA
# CHECK-NEXT: [8.0] - PdFPMAL
# CHECK-NEXT: [8.1] - PdFPMAL
# CHECK-NEXT: [9] - PdFPMMA
# CHECK-NEXT: [10] - PdFPSTO
# CHECK-NEXT: [11] - PdFPU0
# CHECK-NEXT: [12] - PdFPU1
# CHECK-NEXT: [13] - PdFPU2
# CHECK-NEXT: [14] - PdFPU3
# CHECK-NEXT: [15] - PdFPXBR
# CHECK-NEXT: [16.0] - PdLoad
# CHECK-NEXT: [16.1] - PdLoad
# CHECK-NEXT: [17] - PdMul
# CHECK-NEXT: [18] - PdStore
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0.0] [0.1] [1] [2] [3] [4] [5] [6] [7.0] [7.1] [8.0] [8.1] [9] [10] [11] [12] [13] [14] [15] [16.0] [16.1] [17] [18]
# CHECK-NEXT: 24.00 24.00 - - - 36.00 20.00 - 201.50 201.50 - - - 7.00 48.00 40.00 - - - 17.50 17.50 - 13.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0.0] [0.1] [1] [2] [3] [4] [5] [6] [7.0] [7.1] [8.0] [8.1] [9] [10] [11] [12] [13] [14] [15] [16.0] [16.1] [17] [18] Instructions:
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - f2xm1
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fabs
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fadd %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fadd %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fadds (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - faddl (%ecx)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - faddp %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - faddp %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - faddp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fiadds (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fiaddl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fbld (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fbstp (%eax)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fchs
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnclex
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcom %st(1)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcom %st(3)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fcoms (%ecx)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fcoml (%eax)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcomp %st(1)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcomp %st(3)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fcomps (%ecx)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fcompl (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fcompp
-# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcomi %st(3)
-# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcompi %st(3)
+# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fcompi %st(3), %st
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fcos
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fdecstp
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fdivs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fdivl (%eax)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fidivs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fidivl (%eax)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fdivrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fdivrl (%eax)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 9.50 9.50 - - - - - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fidivrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 9.50 9.50 - - - - - 1.00 - - - 0.50 0.50 - - fidivrl (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - ffree %st(0)
# CHECK-NEXT: 0.50 0.50 - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - ficoms (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - ficoml (%eax)
# CHECK-NEXT: 0.50 0.50 - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - ficomps (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - ficompl (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - filds (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - fildl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - fildll (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fincstp
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fninit
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fists (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fistl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fistps (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fistpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fistpll (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fisttps (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fisttpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fisttpll (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fld %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - flds (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - fldl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - fldt (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - 0.50 0.50 - - fldcw (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fldenv (%eax)
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fld1
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldl2e
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldl2t
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldlg2
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldln2
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldpi
# CHECK-NEXT: - - - - - - - - - - - - - 1.00 - 1.00 - - - - - - - fldz
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - - 1.00 - - - 0.50 0.50 - - fmuls (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - - 1.00 - - - 0.50 0.50 - - fmull (%eax)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - - 1.00 - - - 0.50 0.50 - - fimuls (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - - 1.00 - - - 0.50 0.50 - - fimull (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnop
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fpatan
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fprem
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fprem1
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fptan
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - frndint
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnsave (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fscale
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fsin
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fsincos
# CHECK-NEXT: - - - - - - - - 17.50 17.50 - - - - - 1.00 - - - - - - - fsqrt
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fst %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fsts (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fstl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fstp %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fstpl (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fstpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - - - - - - - - - 1.00 fstpt (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnstcw (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnstenv (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnstsw (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - wait
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fnsave (%eax)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsub %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsub %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fsubs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fsubl (%eax)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubp %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubp %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fisubs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fisubl (%eax)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubr %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubr %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fsubrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fsubrl (%eax)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - 1.00 - - - - - - - - fsubrp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fisubrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 0.50 - - - - 1.00 - - - - 0.50 0.50 - - fisubrl (%eax)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - ftst
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucom %st(1)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucom %st(3)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucomp %st(1)
# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucomp %st(3)
# CHECK-NEXT: - - - - - - - - 0.50 0.50 - - - - - 1.00 - - - - - - - fucompp
-# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucomi %st(3)
-# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucompi %st(3)
+# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: - - - - - 1.00 - - 0.50 0.50 - - - - 1.00 - - - - - - - - fucompi %st(3), %st
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - wait
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxam
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxch %st(1)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxch %st(3)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxrstor (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxsave (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fxtract
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fyl2x
# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - - - - - - - - - - - - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Broadwell/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Broadwell/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Broadwell/resources-x87.s (revision 344166)
@@ -1,523 +1,523 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=broadwell -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.25 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 9 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 9 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 12 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 12 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.25 U fbld (%ecx)
# CHECK-NEXT: 2 1 1.00 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 4 4 1.00 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 7 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 7 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 7 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 7 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.25 U fcompp
-# CHECK-NEXT: 1 3 1.00 U fcomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fcompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.25 U fcos
# CHECK-NEXT: 2 2 1.00 U fdecstp
-# CHECK-NEXT: 1 15 1.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdiv %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 21 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 21 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 24 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 24 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivr %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 26 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 26 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 29 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 29 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 0.25 U ffree %st(0)
# CHECK-NEXT: 3 10 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 10 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 10 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 10 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 9 1.00 * U filds (%edx)
# CHECK-NEXT: 2 9 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 9 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 0.50 U fincstp
# CHECK-NEXT: 15 75 6.00 U fninit
# CHECK-NEXT: 3 4 1.00 * U fists (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 4 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.25 U fld %st(0)
# CHECK-NEXT: 1 6 0.50 * U flds (%edx)
# CHECK-NEXT: 1 6 0.50 * U fldl (%ecx)
# CHECK-NEXT: 1 6 0.50 * U fldt (%eax)
# CHECK-NEXT: 3 7 1.00 * U fldcw (%eax)
# CHECK-NEXT: 64 60 14.00 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 5 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 11 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 11 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 14 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 14 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.25 U fpatan
# CHECK-NEXT: 1 100 0.25 U fprem
# CHECK-NEXT: 1 100 0.25 U fprem1
# CHECK-NEXT: 1 100 0.25 U fptan
# CHECK-NEXT: 1 100 0.25 U frndint
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fscale
# CHECK-NEXT: 1 100 0.25 U fsin
# CHECK-NEXT: 1 100 0.25 U fsincos
# CHECK-NEXT: 1 23 9.00 U fsqrt
# CHECK-NEXT: 1 1 0.25 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.25 U fstp %st(0)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 2 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 3 2 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 100 115 19.50 U fnstenv (%eax)
# CHECK-NEXT: 3 4 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 9 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 9 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 12 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 12 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 9 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 9 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 12 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 12 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 3 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 3 1.00 U fucompp
-# CHECK-NEXT: 1 3 1.00 U fucomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fucompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fxam
# CHECK-NEXT: 12 14 4.00 U fxch %st(1)
# CHECK-NEXT: 12 14 4.00 U fxch %st(3)
# CHECK-NEXT: 90 63 16.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.25 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fxtract
# CHECK-NEXT: 1 100 0.25 U fyl2x
# CHECK-NEXT: 1 100 0.25 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - BWDivider
# CHECK-NEXT: [1] - BWFPDivider
# CHECK-NEXT: [2] - BWPort0
# CHECK-NEXT: [3] - BWPort1
# CHECK-NEXT: [4] - BWPort2
# CHECK-NEXT: [5] - BWPort3
# CHECK-NEXT: [6] - BWPort4
# CHECK-NEXT: [7] - BWPort5
# CHECK-NEXT: [8] - BWPort6
# CHECK-NEXT: [9] - BWPort7
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
# CHECK-NEXT: - 9.00 116.92 145.92 49.00 49.00 27.00 59.42 69.75 9.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - f2xm1
# CHECK-NEXT: - - - - - - - 1.00 - - fabs
-# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fadds (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - faddl (%ecx)
-# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fiadds (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fiaddl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fbld (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fbstp (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - fchs
# CHECK-NEXT: - - 1.00 1.00 - - - 1.00 1.00 - fnclex
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - 1.00 - - - - - - fcom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fcom %st(3)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcoms (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcoml (%eax)
# CHECK-NEXT: - - - 1.00 - - - - - - fcomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fcomp %st(3)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcomps (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcompl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcompp
-# CHECK-NEXT: - - - 1.00 - - - - - - fcomi %st(3)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcompi %st(3)
+# CHECK-NEXT: - - - 1.00 - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcos
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fdecstp
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivrl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - ffree %st(0)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficoms (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficoml (%eax)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficomps (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficompl (%eax)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - filds (%edx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fildl (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fildll (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fincstp
# CHECK-NEXT: - - 3.00 3.00 - - - 7.50 1.50 - fninit
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fists (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistps (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistpl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistpll (%eax)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttps (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttpl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttpll (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fld %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - flds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldt (%eax)
# CHECK-NEXT: - - 1.50 0.50 0.50 0.50 - - - - fldcw (%eax)
# CHECK-NEXT: - - 18.92 11.42 4.00 4.00 - 10.92 14.75 - fldenv (%eax)
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fld1
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldl2e
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldl2t
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldlg2
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldln2
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldpi
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fimuls (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fimull (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fnop
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fpatan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem1
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fptan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frndint
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fscale
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsin
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsincos
# CHECK-NEXT: - 9.00 1.00 - - - - - - - fsqrt
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fst %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fsts (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fstp %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpt (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - 1.00 0.33 fnstcw (%eax)
# CHECK-NEXT: - - 27.00 16.50 3.67 3.67 11.00 15.50 19.00 3.67 fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 0.33 0.33 1.00 - - 0.33 fnstsw (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubs (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubs (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubrs (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubrl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubrs (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubrl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - - - ftst
# CHECK-NEXT: - - - 1.00 - - - - - - fucom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fucom %st(3)
# CHECK-NEXT: - - - 1.00 - - - - - - fucomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fucomp %st(3)
# CHECK-NEXT: - - - 1.00 - - - - - - fucompp
-# CHECK-NEXT: - - - 1.00 - - - - - - fucomi %st(3)
-# CHECK-NEXT: - - - 1.00 - - - - - - fucompi %st(3)
+# CHECK-NEXT: - - - 1.00 - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxam
# CHECK-NEXT: - - 3.25 2.25 - - - 1.25 5.25 - fxch %st(1)
# CHECK-NEXT: - - 3.25 2.25 - - - 1.25 5.25 - fxch %st(3)
# CHECK-NEXT: - - 17.25 12.25 16.50 16.50 - 12.75 14.75 - fxrstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxtract
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2x
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BtVer2/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BtVer2/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/BtVer2/resources-x87.s (revision 344166)
@@ -1,527 +1,527 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.50 U f2xm1
# CHECK-NEXT: 1 2 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 1 8 1.00 * U fadds (%ecx)
# CHECK-NEXT: 1 8 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 1 8 1.00 * U fiadds (%ecx)
# CHECK-NEXT: 1 8 1.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.50 U fbstp (%eax)
# CHECK-NEXT: 1 2 1.00 U fchs
# CHECK-NEXT: 1 100 0.50 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 3 1.00 U fcom %st(1)
# CHECK-NEXT: 1 3 1.00 U fcom %st(3)
# CHECK-NEXT: 1 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 1 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 3 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 3 1.00 U fcomp %st(3)
# CHECK-NEXT: 1 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 1 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.50 U fcompp
-# CHECK-NEXT: 1 3 1.00 U fcomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fcompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.50 U fcos
# CHECK-NEXT: 1 100 0.50 U fdecstp
-# CHECK-NEXT: 1 19 19.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 19 19.00 U fdiv %st(2)
+# CHECK-NEXT: 1 19 19.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 19 19.00 U fdiv %st(2), %st
# CHECK-NEXT: 1 24 19.00 * U fdivs (%ecx)
# CHECK-NEXT: 1 24 19.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 19 19.00 U fdivp %st(1)
-# CHECK-NEXT: 1 19 19.00 U fdivp %st(2)
+# CHECK-NEXT: 1 19 19.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 19 19.00 U fdivp %st, %st(2)
# CHECK-NEXT: 1 24 19.00 * U fidivs (%ecx)
# CHECK-NEXT: 1 24 19.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 19 19.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 19 19.00 U fdivr %st(2)
+# CHECK-NEXT: 1 19 19.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 19 19.00 U fdivr %st(2), %st
# CHECK-NEXT: 1 24 19.00 * U fdivrs (%ecx)
# CHECK-NEXT: 1 24 19.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 19 19.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 19 19.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 19 19.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 19 19.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 1 24 19.00 * U fidivrs (%ecx)
# CHECK-NEXT: 1 24 19.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 0.50 U ffree %st(0)
# CHECK-NEXT: 1 8 1.00 U ficoms (%ecx)
# CHECK-NEXT: 1 8 1.00 U ficoml (%eax)
# CHECK-NEXT: 1 8 1.00 U ficomps (%ecx)
# CHECK-NEXT: 1 8 1.00 U ficompl (%eax)
# CHECK-NEXT: 1 5 1.00 * U filds (%edx)
# CHECK-NEXT: 1 5 1.00 * U fildl (%ecx)
# CHECK-NEXT: 1 5 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 100 0.50 U fincstp
# CHECK-NEXT: 1 100 0.50 U fninit
# CHECK-NEXT: 1 1 1.00 * U fists (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistpll (%eax)
# CHECK-NEXT: 1 1 1.00 * U fisttps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.50 U fld %st(0)
# CHECK-NEXT: 1 5 1.00 * U flds (%edx)
# CHECK-NEXT: 1 5 1.00 * U fldl (%ecx)
# CHECK-NEXT: 1 5 1.00 * U fldt (%eax)
# CHECK-NEXT: 1 5 1.00 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fldenv (%eax)
# CHECK-NEXT: 1 3 1.00 U fld1
# CHECK-NEXT: 1 3 1.00 U fldl2e
# CHECK-NEXT: 1 3 1.00 U fldl2t
# CHECK-NEXT: 1 3 1.00 U fldlg2
# CHECK-NEXT: 1 3 1.00 U fldln2
# CHECK-NEXT: 1 3 1.00 U fldpi
# CHECK-NEXT: 1 3 1.00 U fldz
-# CHECK-NEXT: 1 2 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 2 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 2 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 2 1.00 U fmul %st(2), %st
# CHECK-NEXT: 1 7 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 1 7 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 2 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 2 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 2 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 2 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 1 7 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 1 7 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.50 U fpatan
# CHECK-NEXT: 1 100 0.50 U fprem
# CHECK-NEXT: 1 100 0.50 U fprem1
# CHECK-NEXT: 1 100 0.50 U fptan
# CHECK-NEXT: 1 100 0.50 U frndint
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.50 U fscale
# CHECK-NEXT: 1 100 0.50 U fsin
# CHECK-NEXT: 1 100 0.50 U fsincos
# CHECK-NEXT: 1 35 35.00 U fsqrt
# CHECK-NEXT: 1 1 0.50 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.50 U fstp %st(0)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 1 1 0.50 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstenv (%eax)
# CHECK-NEXT: 1 100 0.50 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.50 U frstor (%eax)
# CHECK-NEXT: 1 100 0.50 U wait
# CHECK-NEXT: 1 100 0.50 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 1 8 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 1 8 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 1 8 1.00 * U fisubs (%ecx)
# CHECK-NEXT: 1 8 1.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 1 8 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 1 8 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 1 8 1.00 * U fisubrs (%ecx)
# CHECK-NEXT: 1 8 1.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 3 1.00 U ftst
# CHECK-NEXT: 1 3 1.00 U fucom %st(1)
# CHECK-NEXT: 1 3 1.00 U fucom %st(3)
# CHECK-NEXT: 1 3 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 3 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 3 1.00 U fucompp
-# CHECK-NEXT: 1 3 1.00 U fucomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fucompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 1 100 0.50 U wait
# CHECK-NEXT: 1 100 0.50 U fxam
# CHECK-NEXT: 1 1 0.50 U fxch %st(1)
# CHECK-NEXT: 1 1 0.50 U fxch %st(3)
# CHECK-NEXT: 1 100 0.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.50 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.50 U fxtract
# CHECK-NEXT: 1 100 0.50 U fyl2x
# CHECK-NEXT: 1 100 0.50 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - JALU0
# CHECK-NEXT: [1] - JALU1
# CHECK-NEXT: [2] - JDiv
# CHECK-NEXT: [3] - JFPA
# CHECK-NEXT: [4] - JFPM
# CHECK-NEXT: [5] - JFPU0
# CHECK-NEXT: [6] - JFPU1
# CHECK-NEXT: [7] - JLAGU
# CHECK-NEXT: [8] - JMul
# CHECK-NEXT: [9] - JSAGU
# CHECK-NEXT: [10] - JSTC
# CHECK-NEXT: [11] - JVALU0
# CHECK-NEXT: [12] - JVALU1
# CHECK-NEXT: [13] - JVIMUL
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
# CHECK-NEXT: 42.00 20.00 - 54.00 349.00 54.00 34.00 39.00 - 13.00 7.00 - - -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - f2xm1
# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fabs
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fadd %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fadd %st(2), %st
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fadds (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - faddl (%ecx)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - faddp %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - faddp %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - faddp %st, %st(2)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fiadds (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fiaddl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fbld (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fbstp (%eax)
# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fchs
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnclex
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcom %st(1)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcom %st(3)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - fcoms (%ecx)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - fcoml (%eax)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcomp %st(1)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcomp %st(3)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - fcomps (%ecx)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - fcompl (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fcompp
-# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcomi %st(3)
-# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcompi %st(3)
+# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fcompi %st(3), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fcos
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fdecstp
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fdivs (%ecx)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fdivl (%eax)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fidivs (%ecx)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fidivl (%eax)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fdivrs (%ecx)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fdivrl (%eax)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - - - 19.00 - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fidivrs (%ecx)
# CHECK-NEXT: - - - - 19.00 - 1.00 1.00 - - - - - - fidivrl (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - ffree %st(0)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - ficoms (%ecx)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - ficoml (%eax)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - ficomps (%ecx)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - 1.00 - - - - - - ficompl (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - filds (%edx)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - fildl (%ecx)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - fildll (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fincstp
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fninit
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fists (%edx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fistl (%ecx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fistps (%edx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fistpl (%ecx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fistpll (%eax)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fisttps (%edx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fisttpl (%ecx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fisttpll (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fld %st(0)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - flds (%edx)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - fldl (%ecx)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - fldt (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - - - fldcw (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fldenv (%eax)
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fld1
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldl2e
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldl2t
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldlg2
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldln2
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldpi
# CHECK-NEXT: - - - - - - 1.00 - - - 1.00 - - - fldz
-# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 - - - - - - fmuls (%ecx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 - - - - - - fmull (%eax)
-# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 - - - - - - fimuls (%ecx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 - - - - - - fimull (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnop
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fpatan
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fprem
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fprem1
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fptan
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - frndint
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnsave (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fscale
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fsin
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fsincos
# CHECK-NEXT: - - - - 35.00 - 1.00 - - - - - - - fsqrt
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fst %st(0)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fsts (%edx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fstl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fstp %st(0)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fstpl (%edx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fstpl (%ecx)
# CHECK-NEXT: - - - - - - - - - 1.00 - - - - fstpt (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnstcw (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnstenv (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnstsw (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - wait
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fnsave (%eax)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsub %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsub %st(2), %st
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fsubs (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fsubl (%eax)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubp %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubp %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fisubs (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fisubl (%eax)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubr %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubr %st(2), %st
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fsubrs (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fsubrl (%eax)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - 1.00 - - - - - - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fisubrs (%ecx)
# CHECK-NEXT: - - - 1.00 - 1.00 - 1.00 - - - - - - fisubrl (%eax)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - ftst
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucom %st(1)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucom %st(3)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucomp %st(1)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucomp %st(3)
# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucompp
-# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucomi %st(3)
-# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucompi %st(3)
+# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: 1.00 - - 1.00 - 1.00 - - - - - - - - fucompi %st(3), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - wait
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxam
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxch %st(1)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxch %st(3)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxrstor (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxsave (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fxtract
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fyl2x
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - - - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Generic/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Generic/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Generic/resources-x87.s (revision 344166)
@@ -1,521 +1,521 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.33 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.33 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.33 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 1 100 0.33 U fnclex
-# CHECK-NEXT: 3 3 2.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 3 3 2.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmove %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.33 U fcompp
-# CHECK-NEXT: 3 3 1.00 U fcomi %st(3)
-# CHECK-NEXT: 3 3 1.00 U fcompi %st(3)
+# CHECK-NEXT: 3 3 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 3 3 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.33 U fcos
# CHECK-NEXT: 1 1 1.00 U fdecstp
-# CHECK-NEXT: 1 14 14.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdiv %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 31 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 31 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivp %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivp %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 34 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 34 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivr %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 31 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 31 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 34 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 34 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 1 1.00 U ffree %st(0)
# CHECK-NEXT: 3 11 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 11 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 10 1.00 * U filds (%edx)
# CHECK-NEXT: 2 10 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 1.00 U fincstp
# CHECK-NEXT: 4 5 1.33 U fninit
# CHECK-NEXT: 4 9 1.00 * U fists (%edx)
# CHECK-NEXT: 4 9 1.00 * U fistl (%ecx)
# CHECK-NEXT: 4 9 1.00 * U fistps (%edx)
# CHECK-NEXT: 4 9 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 4 9 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 5 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 5 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 5 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 1.00 U fld %st(0)
# CHECK-NEXT: 3 9 1.00 * U flds (%edx)
# CHECK-NEXT: 3 9 1.00 * U fldl (%ecx)
# CHECK-NEXT: 3 9 1.00 * U fldt (%eax)
# CHECK-NEXT: 5 8 2.00 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.33 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 1.00 U fldz
-# CHECK-NEXT: 1 5 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 12 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 12 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 15 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 15 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 1.00 U fnop
# CHECK-NEXT: 1 100 0.33 U fpatan
# CHECK-NEXT: 1 100 0.33 U fprem
# CHECK-NEXT: 1 100 0.33 U fprem1
# CHECK-NEXT: 1 100 0.33 U fptan
# CHECK-NEXT: 1 100 0.33 U frndint
# CHECK-NEXT: 1 100 0.33 U frstor (%eax)
# CHECK-NEXT: 1 100 0.33 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.33 U fscale
# CHECK-NEXT: 1 100 0.33 U fsin
# CHECK-NEXT: 1 100 0.33 U fsincos
# CHECK-NEXT: 1 24 24.00 U fsqrt
# CHECK-NEXT: 1 1 1.00 U fst %st(0)
# CHECK-NEXT: 3 6 1.00 * U fsts (%edx)
# CHECK-NEXT: 3 6 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 1.00 U fstp %st(0)
# CHECK-NEXT: 3 6 1.00 * U fstpl (%edx)
# CHECK-NEXT: 3 6 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 3 6 1.00 * U fstpt (%eax)
# CHECK-NEXT: 4 7 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.33 U fnstenv (%eax)
# CHECK-NEXT: 4 7 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.33 U frstor (%eax)
# CHECK-NEXT: 1 100 0.33 U wait
# CHECK-NEXT: 1 100 0.33 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 3 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 3 1.00 U fucompp
-# CHECK-NEXT: 3 3 1.00 U fucomi %st(3)
-# CHECK-NEXT: 3 3 1.00 U fucompi %st(3)
+# CHECK-NEXT: 3 3 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 3 3 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 1 100 0.33 U wait
# CHECK-NEXT: 1 100 0.33 U fxam
# CHECK-NEXT: 1 1 0.33 U fxch %st(1)
# CHECK-NEXT: 1 1 0.33 U fxch %st(3)
# CHECK-NEXT: 5 5 2.00 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.33 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.33 U fxtract
# CHECK-NEXT: 1 100 0.33 U fyl2x
# CHECK-NEXT: 1 100 0.33 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - SBDivider
# CHECK-NEXT: [1] - SBFPDivider
# CHECK-NEXT: [2] - SBPort0
# CHECK-NEXT: [3] - SBPort1
# CHECK-NEXT: [4] - SBPort4
# CHECK-NEXT: [5] - SBPort5
# CHECK-NEXT: [6.0] - SBPort23
# CHECK-NEXT: [6.1] - SBPort23
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6.0] [6.1]
# CHECK-NEXT: - 136.00 52.67 90.67 17.00 54.67 34.00 34.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6.0] [6.1] Instructions:
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - f2xm1
# CHECK-NEXT: - - - - - 1.00 - - fabs
-# CHECK-NEXT: - - - 1.00 - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fadd %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fadd %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fadds (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 faddl (%ecx)
-# CHECK-NEXT: - - - 1.00 - - - - faddp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - faddp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - faddp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fiadds (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fiaddl (%ecx)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fbld (%ecx)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fbstp (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fchs
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnclex
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovb %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmove %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovne %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovu %st(1), %st
# CHECK-NEXT: - - - 1.00 - - - - fcom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fcom %st(3)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcoms (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcoml (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fcomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fcomp %st(3)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcomps (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcompl (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fcompp
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcomi %st(3)
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcompi %st(3)
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcomi %st(3), %st
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fcos
# CHECK-NEXT: - - - - - 1.00 - - fdecstp
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivrl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivrl (%eax)
# CHECK-NEXT: - - - - - 1.00 - - ffree %st(0)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficoms (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficoml (%eax)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficomps (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficompl (%eax)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 filds (%edx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fildl (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fildll (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fincstp
# CHECK-NEXT: - - 1.00 1.00 - 2.00 - - fninit
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fists (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistps (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistpl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistpll (%eax)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttps (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttpl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttpll (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fld %st(0)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 flds (%edx)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 fldl (%ecx)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 fldt (%eax)
# CHECK-NEXT: - - - - 1.00 2.00 1.00 1.00 fldcw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fldenv (%eax)
# CHECK-NEXT: - - 1.00 - - 1.00 - - fld1
# CHECK-NEXT: - - 1.00 1.00 - - - - fldl2e
# CHECK-NEXT: - - 1.00 1.00 - - - - fldl2t
# CHECK-NEXT: - - 1.00 1.00 - - - - fldlg2
# CHECK-NEXT: - - 1.00 1.00 - - - - fldln2
# CHECK-NEXT: - - 1.00 1.00 - - - - fldpi
# CHECK-NEXT: - - - - - 1.00 - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fimuls (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fimull (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fnop
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fpatan
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fprem
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fprem1
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fptan
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frndint
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnsave (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fscale
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fsin
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fsincos
# CHECK-NEXT: - 24.00 1.00 - - - - - fsqrt
# CHECK-NEXT: - - - - - 1.00 - - fst %st(0)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fsts (%edx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstl (%ecx)
# CHECK-NEXT: - - - - - 1.00 - - fstp %st(0)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpl (%edx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpl (%ecx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpt (%eax)
# CHECK-NEXT: - - - - 1.00 1.00 1.00 1.00 fnstcw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 1.00 - 1.00 1.00 fnstsw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - wait
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnsave (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsub %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsub %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubs (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubs (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubrs (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubrl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubrs (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubrl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - ftst
# CHECK-NEXT: - - - 1.00 - - - - fucom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fucom %st(3)
# CHECK-NEXT: - - - 1.00 - - - - fucomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fucomp %st(3)
# CHECK-NEXT: - - - 1.00 - - - - fucompp
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucomi %st(3)
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucompi %st(3)
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucomi %st(3), %st
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - wait
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxam
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxch %st(1)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxch %st(3)
# CHECK-NEXT: - - 0.50 0.50 1.00 2.00 0.50 0.50 fxrstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxsave (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxtract
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fyl2x
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Haswell/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Haswell/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Haswell/resources-x87.s (revision 344166)
@@ -1,523 +1,523 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=haswell -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.25 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 43 47 10.75 U fbld (%ecx)
# CHECK-NEXT: 2 1 1.00 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 4 4 1.00 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 2 1 0.50 U fcompp
-# CHECK-NEXT: 3 1 0.50 U fcomi %st(3)
-# CHECK-NEXT: 3 1 0.50 U fcompi %st(3)
+# CHECK-NEXT: 3 1 0.50 U fcomi %st(3), %st
+# CHECK-NEXT: 3 1 0.50 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.25 U fcos
# CHECK-NEXT: 2 2 1.00 U fdecstp
-# CHECK-NEXT: 1 24 1.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdiv %st(2)
+# CHECK-NEXT: 1 24 1.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 31 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 31 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 24 1.00 U fdivp %st(1)
-# CHECK-NEXT: 1 24 1.00 U fdivp %st(2)
+# CHECK-NEXT: 1 24 1.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 24 1.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 34 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 34 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 24 1.00 U fdivr %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 24 1.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 27 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 27 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 30 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 30 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 1 0.50 U ffree %st(0)
# CHECK-NEXT: 3 11 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 11 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 10 1.00 * U filds (%edx)
# CHECK-NEXT: 2 10 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 0.50 U fincstp
# CHECK-NEXT: 15 75 6.00 U fninit
# CHECK-NEXT: 3 4 1.00 * U fists (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 4 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.50 U fld %st(0)
# CHECK-NEXT: 1 7 0.50 * U flds (%edx)
# CHECK-NEXT: 1 7 0.50 * U fldl (%ecx)
# CHECK-NEXT: 1 7 0.50 * U fldt (%eax)
# CHECK-NEXT: 3 7 1.00 * U fldcw (%eax)
# CHECK-NEXT: 64 61 14.00 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 5 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 12 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 12 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 15 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 15 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.25 U fpatan
# CHECK-NEXT: 28 19 7.00 U fprem
# CHECK-NEXT: 41 27 10.25 U fprem1
# CHECK-NEXT: 1 100 0.25 U fptan
# CHECK-NEXT: 17 11 4.25 U frndint
# CHECK-NEXT: 90 1 22.50 U frstor (%eax)
# CHECK-NEXT: 147 1 36.75 U fnsave (%eax)
# CHECK-NEXT: 50 75 12.50 U fscale
# CHECK-NEXT: 1 100 0.25 U fsin
# CHECK-NEXT: 1 100 0.25 U fsincos
# CHECK-NEXT: 1 23 17.00 U fsqrt
# CHECK-NEXT: 1 1 0.50 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.50 U fstp %st(0)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 2 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 3 2 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 100 115 19.50 U fnstenv (%eax)
# CHECK-NEXT: 3 4 1.00 U fnstsw (%eax)
# CHECK-NEXT: 90 1 22.50 U frstor (%eax)
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 147 1 36.75 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 1 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 2 1 0.50 U fucompp
-# CHECK-NEXT: 3 1 0.50 U fucomi %st(3)
-# CHECK-NEXT: 3 1 0.50 U fucompi %st(3)
+# CHECK-NEXT: 3 1 0.50 U fucomi %st(3), %st
+# CHECK-NEXT: 3 1 0.50 U fucompi %st(3), %st
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 2 1 2.00 U fxam
# CHECK-NEXT: 15 17 4.00 U fxch %st(1)
# CHECK-NEXT: 15 17 4.00 U fxch %st(3)
# CHECK-NEXT: 90 64 16.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.25 * * U fxsave (%eax)
# CHECK-NEXT: 17 15 4.25 U fxtract
# CHECK-NEXT: 1 100 0.25 U fyl2x
# CHECK-NEXT: 1 100 0.25 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - HWDivider
# CHECK-NEXT: [1] - HWFPDivider
# CHECK-NEXT: [2] - HWPort0
# CHECK-NEXT: [3] - HWPort1
# CHECK-NEXT: [4] - HWPort2
# CHECK-NEXT: [5] - HWPort3
# CHECK-NEXT: [6] - HWPort4
# CHECK-NEXT: [7] - HWPort5
# CHECK-NEXT: [8] - HWPort6
# CHECK-NEXT: [9] - HWPort7
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
# CHECK-NEXT: - 17.00 121.42 145.42 49.00 49.00 27.00 56.92 65.25 9.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - f2xm1
# CHECK-NEXT: - - 1.00 - - - - - - - fabs
-# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fadd %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fadds (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - faddl (%ecx)
-# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - faddp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fiadds (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fiaddl (%ecx)
# CHECK-NEXT: - - - - - - - - - - fbld (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fbstp (%eax)
# CHECK-NEXT: - - 1.00 - - - - - - - fchs
# CHECK-NEXT: - - 1.00 1.00 - - - 1.00 1.00 - fnclex
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - 1.00 - - - - - - fcom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fcom %st(3)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcoms (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcoml (%eax)
# CHECK-NEXT: - - - 1.00 - - - - - - fcomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fcomp %st(3)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcomps (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fcompl (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fcompp
-# CHECK-NEXT: - - 0.50 0.50 - - - - - - fcomi %st(3)
-# CHECK-NEXT: - - 0.50 0.50 - - - - - - fcompi %st(3)
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcos
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fdecstp
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fidivrl (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - ffree %st(0)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficoms (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficoml (%eax)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficomps (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - ficompl (%eax)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - filds (%edx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fildl (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fildll (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fincstp
# CHECK-NEXT: - - 3.00 3.00 - - - 7.50 1.50 - fninit
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fists (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistps (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistpl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fistpll (%eax)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttps (%edx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttpl (%ecx)
# CHECK-NEXT: - - - 1.00 0.33 0.33 1.00 - - 0.33 fisttpll (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fld %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - flds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldt (%eax)
# CHECK-NEXT: - - 1.50 0.50 0.50 0.50 - - - - fldcw (%eax)
# CHECK-NEXT: - - 18.92 11.42 4.00 4.00 - 10.92 14.75 - fldenv (%eax)
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fld1
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldl2e
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldl2t
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldlg2
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldln2
# CHECK-NEXT: - - 1.00 1.00 - - - - - - fldpi
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fimuls (%ecx)
# CHECK-NEXT: - - 1.00 1.00 0.50 0.50 - - - - fimull (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fnop
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fpatan
# CHECK-NEXT: - - - - - - - - - - fprem
# CHECK-NEXT: - - - - - - - - - - fprem1
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fptan
# CHECK-NEXT: - - - - - - - - - - frndint
# CHECK-NEXT: - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - - - - - - - - - fnsave (%eax)
# CHECK-NEXT: - - - - - - - - - - fscale
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsin
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsincos
# CHECK-NEXT: - 17.00 1.00 - - - - - - - fsqrt
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fst %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fsts (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstl (%ecx)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fstp %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpt (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - 1.00 0.33 fnstcw (%eax)
# CHECK-NEXT: - - 27.00 16.50 3.67 3.67 11.00 15.50 19.00 3.67 fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 0.33 0.33 1.00 - - 0.33 fnstsw (%eax)
# CHECK-NEXT: - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - - - - - - - - - fnsave (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsub %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubs (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubs (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubr %st(2), %st
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubrs (%ecx)
# CHECK-NEXT: - - - 1.00 0.50 0.50 - - - - fsubrl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubrs (%ecx)
# CHECK-NEXT: - - - 2.00 0.50 0.50 - - - - fisubrl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - - - ftst
# CHECK-NEXT: - - - 1.00 - - - - - - fucom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fucom %st(3)
# CHECK-NEXT: - - - 1.00 - - - - - - fucomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - - - fucomp %st(3)
# CHECK-NEXT: - - 0.50 0.50 - - - - - - fucompp
-# CHECK-NEXT: - - 0.50 0.50 - - - - - - fucomi %st(3)
-# CHECK-NEXT: - - 0.50 0.50 - - - - - - fucompi %st(3)
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - - 2.00 - - - - - - fxam
# CHECK-NEXT: - - 4.00 3.00 - - - 3.00 5.00 - fxch %st(1)
# CHECK-NEXT: - - 4.00 3.00 - - - 3.00 5.00 - fxch %st(3)
# CHECK-NEXT: - - 17.25 12.25 16.50 16.50 - 12.75 14.75 - fxrstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxsave (%eax)
# CHECK-NEXT: - - - - - - - - - - fxtract
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2x
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SLM/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SLM/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SLM/resources-x87.s (revision 344166)
@@ -1,521 +1,521 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=slm -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 1.00 U f2xm1
# CHECK-NEXT: 1 1 0.50 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 1 6 1.00 * U fadds (%ecx)
# CHECK-NEXT: 1 6 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 1 6 1.00 * U fiadds (%ecx)
# CHECK-NEXT: 1 6 1.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 1.00 U fbld (%ecx)
# CHECK-NEXT: 1 100 1.00 U fbstp (%eax)
# CHECK-NEXT: 1 1 0.50 U fchs
# CHECK-NEXT: 1 100 1.00 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 3 1.00 U fcom %st(1)
# CHECK-NEXT: 1 3 1.00 U fcom %st(3)
# CHECK-NEXT: 1 6 1.00 U fcoms (%ecx)
# CHECK-NEXT: 1 6 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 3 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 3 1.00 U fcomp %st(3)
# CHECK-NEXT: 1 6 1.00 U fcomps (%ecx)
# CHECK-NEXT: 1 6 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 1.00 U fcompp
-# CHECK-NEXT: 1 3 1.00 U fcomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fcompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 1.00 U fcos
# CHECK-NEXT: 1 100 1.00 U fdecstp
-# CHECK-NEXT: 1 19 17.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 19 17.00 U fdiv %st(2)
+# CHECK-NEXT: 1 19 17.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 19 17.00 U fdiv %st(2), %st
# CHECK-NEXT: 1 22 17.00 * U fdivs (%ecx)
# CHECK-NEXT: 1 22 17.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 19 17.00 U fdivp %st(1)
-# CHECK-NEXT: 1 19 17.00 U fdivp %st(2)
+# CHECK-NEXT: 1 19 17.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 19 17.00 U fdivp %st, %st(2)
# CHECK-NEXT: 1 22 17.00 * U fidivs (%ecx)
# CHECK-NEXT: 1 22 17.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 19 17.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 19 17.00 U fdivr %st(2)
+# CHECK-NEXT: 1 19 17.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 19 17.00 U fdivr %st(2), %st
# CHECK-NEXT: 1 22 17.00 * U fdivrs (%ecx)
# CHECK-NEXT: 1 22 17.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 19 17.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 19 17.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 19 17.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 19 17.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 1 22 17.00 * U fidivrs (%ecx)
# CHECK-NEXT: 1 22 17.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 1.00 U ffree %st(0)
# CHECK-NEXT: 1 6 1.00 U ficoms (%ecx)
# CHECK-NEXT: 1 6 1.00 U ficoml (%eax)
# CHECK-NEXT: 1 6 1.00 U ficomps (%ecx)
# CHECK-NEXT: 1 6 1.00 U ficompl (%eax)
# CHECK-NEXT: 1 3 1.00 * U filds (%edx)
# CHECK-NEXT: 1 3 1.00 * U fildl (%ecx)
# CHECK-NEXT: 1 3 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 100 1.00 U fincstp
# CHECK-NEXT: 1 100 1.00 U fninit
# CHECK-NEXT: 1 1 1.00 * U fists (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fistpll (%eax)
# CHECK-NEXT: 1 1 1.00 * U fisttps (%edx)
# CHECK-NEXT: 1 1 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.50 U fld %st(0)
# CHECK-NEXT: 1 3 1.00 * U flds (%edx)
# CHECK-NEXT: 1 3 1.00 * U fldl (%ecx)
# CHECK-NEXT: 1 3 1.00 * U fldt (%eax)
# CHECK-NEXT: 1 3 1.00 * U fldcw (%eax)
# CHECK-NEXT: 1 100 1.00 U fldenv (%eax)
# CHECK-NEXT: 1 1 0.50 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 5 2.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 2.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 2.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 2.00 U fmul %st(2), %st
# CHECK-NEXT: 1 8 2.00 * U fmuls (%ecx)
# CHECK-NEXT: 1 8 2.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 2.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 2.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 2.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 2.00 U fmulp %st, %st(2)
# CHECK-NEXT: 1 8 2.00 * U fimuls (%ecx)
# CHECK-NEXT: 1 8 2.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 1.00 U fpatan
# CHECK-NEXT: 1 100 1.00 U fprem
# CHECK-NEXT: 1 100 1.00 U fprem1
# CHECK-NEXT: 1 100 1.00 U fptan
# CHECK-NEXT: 1 100 1.00 U frndint
# CHECK-NEXT: 1 100 1.00 U frstor (%eax)
# CHECK-NEXT: 1 100 1.00 U fnsave (%eax)
# CHECK-NEXT: 1 100 1.00 U fscale
# CHECK-NEXT: 1 100 1.00 U fsin
# CHECK-NEXT: 1 100 1.00 U fsincos
# CHECK-NEXT: 1 40 40.00 U fsqrt
# CHECK-NEXT: 1 1 0.50 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.50 U fstp %st(0)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 1 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 1 1 0.50 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 1.00 U fnstenv (%eax)
# CHECK-NEXT: 1 100 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 1.00 U frstor (%eax)
# CHECK-NEXT: 1 100 1.00 U wait
# CHECK-NEXT: 1 100 1.00 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 1 6 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 1 6 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 1 6 1.00 * U fisubs (%ecx)
# CHECK-NEXT: 1 6 1.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 1 6 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 1 6 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 1 6 1.00 * U fisubrs (%ecx)
# CHECK-NEXT: 1 6 1.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 3 1.00 U ftst
# CHECK-NEXT: 1 3 1.00 U fucom %st(1)
# CHECK-NEXT: 1 3 1.00 U fucom %st(3)
# CHECK-NEXT: 1 3 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 3 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 3 1.00 U fucompp
-# CHECK-NEXT: 1 3 1.00 U fucomi %st(3)
-# CHECK-NEXT: 1 3 1.00 U fucompi %st(3)
+# CHECK-NEXT: 1 3 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 1 3 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 1 100 1.00 U wait
# CHECK-NEXT: 1 100 1.00 U fxam
# CHECK-NEXT: 1 1 0.50 U fxch %st(1)
# CHECK-NEXT: 1 1 0.50 U fxch %st(3)
# CHECK-NEXT: 1 100 1.00 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 1.00 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 1.00 U fxtract
# CHECK-NEXT: 1 100 1.00 U fyl2x
# CHECK-NEXT: 1 100 1.00 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - SLMDivider
# CHECK-NEXT: [1] - SLMFPDivider
# CHECK-NEXT: [2] - SLMFPMultiplier
# CHECK-NEXT: [3] - SLM_FPC_RSV0
# CHECK-NEXT: [4] - SLM_FPC_RSV1
# CHECK-NEXT: [5] - SLM_IEC_RSV0
# CHECK-NEXT: [6] - SLM_IEC_RSV1
# CHECK-NEXT: [7] - SLM_MEC_RSV
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7]
# CHECK-NEXT: - 312.00 16.00 65.00 61.00 9.50 9.50 52.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] Instructions:
# CHECK-NEXT: - - - 1.00 - - - - f2xm1
# CHECK-NEXT: - - - 0.50 0.50 - - - fabs
-# CHECK-NEXT: - - - - 1.00 - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - fadd %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - fadd %st(2), %st
# CHECK-NEXT: - - - - 1.00 - - 1.00 fadds (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 faddl (%ecx)
-# CHECK-NEXT: - - - - 1.00 - - - faddp %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - faddp %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - faddp %st, %st(2)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fiadds (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fiaddl (%ecx)
# CHECK-NEXT: - - - 1.00 - - - - fbld (%ecx)
# CHECK-NEXT: - - - 1.00 - - - - fbstp (%eax)
# CHECK-NEXT: - - - 0.50 0.50 - - - fchs
# CHECK-NEXT: - - - 1.00 - - - - fnclex
-# CHECK-NEXT: - - - - 1.00 - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - - 1.00 - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - - 1.00 - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - - 1.00 - - - fcom %st(1)
# CHECK-NEXT: - - - - 1.00 - - - fcom %st(3)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fcoms (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fcoml (%eax)
# CHECK-NEXT: - - - - 1.00 - - - fcomp %st(1)
# CHECK-NEXT: - - - - 1.00 - - - fcomp %st(3)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fcomps (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fcompl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fcompp
-# CHECK-NEXT: - - - - 1.00 - - - fcomi %st(3)
-# CHECK-NEXT: - - - - 1.00 - - - fcompi %st(3)
+# CHECK-NEXT: - - - - 1.00 - - - fcomi %st(3), %st
+# CHECK-NEXT: - - - - 1.00 - - - fcompi %st(3), %st
# CHECK-NEXT: - - - 1.00 - - - - fcos
# CHECK-NEXT: - - - 1.00 - - - - fdecstp
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdiv %st(2)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdiv %st(2), %st
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fdivs (%ecx)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fdivl (%eax)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivp %st(1)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivp %st(2)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivp %st, %st(2)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fidivs (%ecx)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fidivl (%eax)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivr %st(2)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivr %st(2), %st
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fdivrs (%ecx)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fdivrl (%eax)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivrp %st(1)
-# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivrp %st(2)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - 17.00 - 1.00 - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fidivrs (%ecx)
# CHECK-NEXT: - 17.00 - 1.00 - - - 1.00 fidivrl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - ffree %st(0)
# CHECK-NEXT: - - - - 1.00 - - 1.00 ficoms (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 ficoml (%eax)
# CHECK-NEXT: - - - - 1.00 - - 1.00 ficomps (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 ficompl (%eax)
# CHECK-NEXT: - - - - - - - 1.00 filds (%edx)
# CHECK-NEXT: - - - - - - - 1.00 fildl (%ecx)
# CHECK-NEXT: - - - - - - - 1.00 fildll (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fincstp
# CHECK-NEXT: - - - 1.00 - - - - fninit
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fists (%edx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fistl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fistps (%edx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fistpl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fistpll (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fisttps (%edx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fisttpl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fisttpll (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - fld %st(0)
# CHECK-NEXT: - - - - - - - 1.00 flds (%edx)
# CHECK-NEXT: - - - - - - - 1.00 fldl (%ecx)
# CHECK-NEXT: - - - - - - - 1.00 fldt (%eax)
# CHECK-NEXT: - - - - - - - 1.00 fldcw (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fldenv (%eax)
# CHECK-NEXT: - - - 0.50 0.50 - - - fld1
# CHECK-NEXT: - - - 1.00 1.00 - - - fldl2e
# CHECK-NEXT: - - - 1.00 1.00 - - - fldl2t
# CHECK-NEXT: - - - 1.00 1.00 - - - fldlg2
# CHECK-NEXT: - - - 1.00 1.00 - - - fldln2
# CHECK-NEXT: - - - 1.00 1.00 - - - fldpi
# CHECK-NEXT: - - - 0.50 0.50 - - - fldz
-# CHECK-NEXT: - - 2.00 1.00 - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 2.00 1.00 - - - - fmul %st(2)
+# CHECK-NEXT: - - 2.00 1.00 - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 2.00 1.00 - - - - fmul %st(2), %st
# CHECK-NEXT: - - 2.00 1.00 - - - 1.00 fmuls (%ecx)
# CHECK-NEXT: - - 2.00 1.00 - - - 1.00 fmull (%eax)
-# CHECK-NEXT: - - 2.00 1.00 - - - - fmulp %st(1)
-# CHECK-NEXT: - - 2.00 1.00 - - - - fmulp %st(2)
+# CHECK-NEXT: - - 2.00 1.00 - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 2.00 1.00 - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 2.00 1.00 - - - 1.00 fimuls (%ecx)
# CHECK-NEXT: - - 2.00 1.00 - - - 1.00 fimull (%eax)
# CHECK-NEXT: - - - - - - - - fnop
# CHECK-NEXT: - - - 1.00 - - - - fpatan
# CHECK-NEXT: - - - 1.00 - - - - fprem
# CHECK-NEXT: - - - 1.00 - - - - fprem1
# CHECK-NEXT: - - - 1.00 - - - - fptan
# CHECK-NEXT: - - - 1.00 - - - - frndint
# CHECK-NEXT: - - - 1.00 - - - - frstor (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fnsave (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fscale
# CHECK-NEXT: - - - 1.00 - - - - fsin
# CHECK-NEXT: - - - 1.00 - - - - fsincos
# CHECK-NEXT: - 40.00 - 1.00 - - - - fsqrt
# CHECK-NEXT: - - - - - 0.50 0.50 - fst %st(0)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fsts (%edx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fstl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 - fstp %st(0)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fstpl (%edx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fstpl (%ecx)
# CHECK-NEXT: - - - - - 0.50 0.50 1.00 fstpt (%eax)
# CHECK-NEXT: - - - - - 0.50 0.50 - fnstcw (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fnstenv (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fnstsw (%eax)
# CHECK-NEXT: - - - 1.00 - - - - frstor (%eax)
# CHECK-NEXT: - - - 1.00 - - - - wait
# CHECK-NEXT: - - - 1.00 - - - - fnsave (%eax)
-# CHECK-NEXT: - - - - 1.00 - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - fsub %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - fsub %st(2), %st
# CHECK-NEXT: - - - - 1.00 - - 1.00 fsubs (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fsubl (%eax)
-# CHECK-NEXT: - - - - 1.00 - - - fsubp %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - fsubp %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fisubs (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fisubl (%eax)
-# CHECK-NEXT: - - - - 1.00 - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - fsubr %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - fsubr %st(2), %st
# CHECK-NEXT: - - - - 1.00 - - 1.00 fsubrs (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fsubrl (%eax)
-# CHECK-NEXT: - - - - 1.00 - - - fsubrp %st(1)
-# CHECK-NEXT: - - - - 1.00 - - - fsubrp %st(2)
+# CHECK-NEXT: - - - - 1.00 - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - - 1.00 - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fisubrs (%ecx)
# CHECK-NEXT: - - - - 1.00 - - 1.00 fisubrl (%eax)
# CHECK-NEXT: - - - - 1.00 - - - ftst
# CHECK-NEXT: - - - - 1.00 - - - fucom %st(1)
# CHECK-NEXT: - - - - 1.00 - - - fucom %st(3)
# CHECK-NEXT: - - - - 1.00 - - - fucomp %st(1)
# CHECK-NEXT: - - - - 1.00 - - - fucomp %st(3)
# CHECK-NEXT: - - - - 1.00 - - - fucompp
-# CHECK-NEXT: - - - - 1.00 - - - fucomi %st(3)
-# CHECK-NEXT: - - - - 1.00 - - - fucompi %st(3)
+# CHECK-NEXT: - - - - 1.00 - - - fucomi %st(3), %st
+# CHECK-NEXT: - - - - 1.00 - - - fucompi %st(3), %st
# CHECK-NEXT: - - - 1.00 - - - - wait
# CHECK-NEXT: - - - 1.00 - - - - fxam
# CHECK-NEXT: - - - - - 0.50 0.50 - fxch %st(1)
# CHECK-NEXT: - - - - - 0.50 0.50 - fxch %st(3)
# CHECK-NEXT: - - - 1.00 - - - - fxrstor (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fxsave (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fxtract
# CHECK-NEXT: - - - 1.00 - - - - fyl2x
# CHECK-NEXT: - - - 1.00 - - - - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SandyBridge/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SandyBridge/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SandyBridge/resources-x87.s (revision 344166)
@@ -1,521 +1,521 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=sandybridge -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.33 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.33 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.33 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 1 100 0.33 U fnclex
-# CHECK-NEXT: 3 3 2.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 3 3 2.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 3 3 2.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmove %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 3 3 2.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.33 U fcompp
-# CHECK-NEXT: 3 3 1.00 U fcomi %st(3)
-# CHECK-NEXT: 3 3 1.00 U fcompi %st(3)
+# CHECK-NEXT: 3 3 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 3 3 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.33 U fcos
# CHECK-NEXT: 1 1 1.00 U fdecstp
-# CHECK-NEXT: 1 14 14.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdiv %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 31 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 31 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivp %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivp %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 34 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 34 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivr %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 31 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 31 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 14 14.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 14 14.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 14 14.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 14 14.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 34 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 34 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 1 1.00 U ffree %st(0)
# CHECK-NEXT: 3 11 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 11 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 10 1.00 * U filds (%edx)
# CHECK-NEXT: 2 10 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 1.00 U fincstp
# CHECK-NEXT: 4 5 1.33 U fninit
# CHECK-NEXT: 4 9 1.00 * U fists (%edx)
# CHECK-NEXT: 4 9 1.00 * U fistl (%ecx)
# CHECK-NEXT: 4 9 1.00 * U fistps (%edx)
# CHECK-NEXT: 4 9 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 4 9 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 5 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 5 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 5 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 1.00 U fld %st(0)
# CHECK-NEXT: 3 9 1.00 * U flds (%edx)
# CHECK-NEXT: 3 9 1.00 * U fldl (%ecx)
# CHECK-NEXT: 3 9 1.00 * U fldt (%eax)
# CHECK-NEXT: 5 8 2.00 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.33 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 1.00 U fldz
-# CHECK-NEXT: 1 5 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 12 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 12 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 5 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 5 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 15 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 15 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 1.00 U fnop
# CHECK-NEXT: 1 100 0.33 U fpatan
# CHECK-NEXT: 1 100 0.33 U fprem
# CHECK-NEXT: 1 100 0.33 U fprem1
# CHECK-NEXT: 1 100 0.33 U fptan
# CHECK-NEXT: 1 100 0.33 U frndint
# CHECK-NEXT: 1 100 0.33 U frstor (%eax)
# CHECK-NEXT: 1 100 0.33 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.33 U fscale
# CHECK-NEXT: 1 100 0.33 U fsin
# CHECK-NEXT: 1 100 0.33 U fsincos
# CHECK-NEXT: 1 24 24.00 U fsqrt
# CHECK-NEXT: 1 1 1.00 U fst %st(0)
# CHECK-NEXT: 3 6 1.00 * U fsts (%edx)
# CHECK-NEXT: 3 6 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 1.00 U fstp %st(0)
# CHECK-NEXT: 3 6 1.00 * U fstpl (%edx)
# CHECK-NEXT: 3 6 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 3 6 1.00 * U fstpt (%eax)
# CHECK-NEXT: 4 7 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.33 U fnstenv (%eax)
# CHECK-NEXT: 4 7 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.33 U frstor (%eax)
# CHECK-NEXT: 1 100 0.33 U wait
# CHECK-NEXT: 1 100 0.33 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 3 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 3 1.00 U fucompp
-# CHECK-NEXT: 3 3 1.00 U fucomi %st(3)
-# CHECK-NEXT: 3 3 1.00 U fucompi %st(3)
+# CHECK-NEXT: 3 3 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 3 3 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 1 100 0.33 U wait
# CHECK-NEXT: 1 100 0.33 U fxam
# CHECK-NEXT: 1 1 0.33 U fxch %st(1)
# CHECK-NEXT: 1 1 0.33 U fxch %st(3)
# CHECK-NEXT: 5 5 2.00 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.33 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.33 U fxtract
# CHECK-NEXT: 1 100 0.33 U fyl2x
# CHECK-NEXT: 1 100 0.33 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - SBDivider
# CHECK-NEXT: [1] - SBFPDivider
# CHECK-NEXT: [2] - SBPort0
# CHECK-NEXT: [3] - SBPort1
# CHECK-NEXT: [4] - SBPort4
# CHECK-NEXT: [5] - SBPort5
# CHECK-NEXT: [6.0] - SBPort23
# CHECK-NEXT: [6.1] - SBPort23
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6.0] [6.1]
# CHECK-NEXT: - 136.00 52.67 90.67 17.00 54.67 34.00 34.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6.0] [6.1] Instructions:
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - f2xm1
# CHECK-NEXT: - - - - - 1.00 - - fabs
-# CHECK-NEXT: - - - 1.00 - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fadd %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fadd %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fadds (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 faddl (%ecx)
-# CHECK-NEXT: - - - 1.00 - - - - faddp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - faddp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - faddp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fiadds (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fiaddl (%ecx)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fbld (%ecx)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fbstp (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fchs
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnclex
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovb %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmove %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovne %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - 0.50 - - 2.50 - - fcmovu %st(1), %st
# CHECK-NEXT: - - - 1.00 - - - - fcom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fcom %st(3)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcoms (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcoml (%eax)
# CHECK-NEXT: - - - 1.00 - - - - fcomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fcomp %st(3)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcomps (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fcompl (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fcompp
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcomi %st(3)
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcompi %st(3)
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcomi %st(3), %st
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fcos
# CHECK-NEXT: - - - - - 1.00 - - fdecstp
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fdivrl (%eax)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st(1)
-# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st(2)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - 14.00 1.00 - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fidivrl (%eax)
# CHECK-NEXT: - - - - - 1.00 - - ffree %st(0)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficoms (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficoml (%eax)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficomps (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 ficompl (%eax)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 filds (%edx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fildl (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fildll (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fincstp
# CHECK-NEXT: - - 1.00 1.00 - 2.00 - - fninit
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fists (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistps (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistpl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 1.00 1.00 fistpll (%eax)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttps (%edx)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttpl (%ecx)
# CHECK-NEXT: - - - 1.00 1.00 - 0.50 0.50 fisttpll (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fld %st(0)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 flds (%edx)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 fldl (%ecx)
# CHECK-NEXT: - - 0.50 0.50 - 1.00 0.50 0.50 fldt (%eax)
# CHECK-NEXT: - - - - 1.00 2.00 1.00 1.00 fldcw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fldenv (%eax)
# CHECK-NEXT: - - 1.00 - - 1.00 - - fld1
# CHECK-NEXT: - - 1.00 1.00 - - - - fldl2e
# CHECK-NEXT: - - 1.00 1.00 - - - - fldl2t
# CHECK-NEXT: - - 1.00 1.00 - - - - fldlg2
# CHECK-NEXT: - - 1.00 1.00 - - - - fldln2
# CHECK-NEXT: - - 1.00 1.00 - - - - fldpi
# CHECK-NEXT: - - - - - 1.00 - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - - - 0.50 0.50 fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fimuls (%ecx)
# CHECK-NEXT: - - 1.00 1.00 - - 0.50 0.50 fimull (%eax)
# CHECK-NEXT: - - - - - 1.00 - - fnop
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fpatan
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fprem
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fprem1
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fptan
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frndint
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnsave (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fscale
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fsin
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fsincos
# CHECK-NEXT: - 24.00 1.00 - - - - - fsqrt
# CHECK-NEXT: - - - - - 1.00 - - fst %st(0)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fsts (%edx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstl (%ecx)
# CHECK-NEXT: - - - - - 1.00 - - fstp %st(0)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpl (%edx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpl (%ecx)
# CHECK-NEXT: - - - - 1.00 - 1.00 1.00 fstpt (%eax)
# CHECK-NEXT: - - - - 1.00 1.00 1.00 1.00 fnstcw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 1.00 - 1.00 1.00 fnstsw (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - frstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - wait
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fnsave (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsub %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsub %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubs (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubs (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubr %st(2), %st
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubrs (%ecx)
# CHECK-NEXT: - - - 1.00 - - 0.50 0.50 fsubrl (%eax)
-# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - 1.00 - - - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubrs (%ecx)
# CHECK-NEXT: - - - 2.00 - - 0.50 0.50 fisubrl (%eax)
# CHECK-NEXT: - - - 1.00 - - - - ftst
# CHECK-NEXT: - - - 1.00 - - - - fucom %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fucom %st(3)
# CHECK-NEXT: - - - 1.00 - - - - fucomp %st(1)
# CHECK-NEXT: - - - 1.00 - - - - fucomp %st(3)
# CHECK-NEXT: - - - 1.00 - - - - fucompp
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucomi %st(3)
-# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucompi %st(3)
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucomi %st(3), %st
+# CHECK-NEXT: - - 1.00 1.00 - 1.00 - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - wait
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxam
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxch %st(1)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxch %st(3)
# CHECK-NEXT: - - 0.50 0.50 1.00 2.00 0.50 0.50 fxrstor (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxsave (%eax)
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fxtract
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fyl2x
# CHECK-NEXT: - - 0.33 0.33 - 0.33 - - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeClient/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeClient/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeClient/resources-x87.s (revision 344166)
@@ -1,523 +1,523 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=skylake -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.25 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.25 U fbld (%ecx)
# CHECK-NEXT: 2 1 1.00 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 4 4 1.00 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.25 U fcompp
-# CHECK-NEXT: 1 2 1.00 U fcomi %st(3)
-# CHECK-NEXT: 1 2 1.00 U fcompi %st(3)
+# CHECK-NEXT: 1 2 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 1 2 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.25 U fcos
# CHECK-NEXT: 2 2 1.00 U fdecstp
-# CHECK-NEXT: 1 15 1.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdiv %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 22 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 22 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 25 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 25 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivr %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 27 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 27 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 30 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 30 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 0.25 U ffree %st(0)
# CHECK-NEXT: 3 11 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 11 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 10 1.00 * U filds (%edx)
# CHECK-NEXT: 2 10 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 0.50 U fincstp
# CHECK-NEXT: 15 75 6.00 U fninit
# CHECK-NEXT: 3 4 1.00 * U fists (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 4 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.25 U fld %st(0)
# CHECK-NEXT: 1 7 0.50 * U flds (%edx)
# CHECK-NEXT: 1 7 0.50 * U fldl (%ecx)
# CHECK-NEXT: 1 7 0.50 * U fldt (%eax)
# CHECK-NEXT: 3 7 1.00 * U fldcw (%eax)
# CHECK-NEXT: 64 62 14.00 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 4 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 4 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 4 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 4 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 11 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 11 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 4 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 4 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 4 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 4 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 14 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 14 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.25 U fpatan
# CHECK-NEXT: 1 100 0.25 U fprem
# CHECK-NEXT: 1 100 0.25 U fprem1
# CHECK-NEXT: 1 100 0.25 U fptan
# CHECK-NEXT: 1 100 0.25 U frndint
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fscale
# CHECK-NEXT: 1 100 0.25 U fsin
# CHECK-NEXT: 1 100 0.25 U fsincos
# CHECK-NEXT: 1 21 7.00 U fsqrt
# CHECK-NEXT: 1 1 0.25 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.25 U fstp %st(0)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 2 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 3 2 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 100 106 19.50 U fnstenv (%eax)
# CHECK-NEXT: 3 3 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 2 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 2 1.00 U fucompp
-# CHECK-NEXT: 1 2 1.00 U fucomi %st(3)
-# CHECK-NEXT: 1 2 1.00 U fucompi %st(3)
+# CHECK-NEXT: 1 2 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 1 2 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fxam
# CHECK-NEXT: 15 17 4.00 U fxch %st(1)
# CHECK-NEXT: 15 17 4.00 U fxch %st(3)
# CHECK-NEXT: 90 63 16.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.25 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fxtract
# CHECK-NEXT: 1 100 0.25 U fyl2x
# CHECK-NEXT: 1 100 0.25 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - SKLDivider
# CHECK-NEXT: [1] - SKLFPDivider
# CHECK-NEXT: [2] - SKLPort0
# CHECK-NEXT: [3] - SKLPort1
# CHECK-NEXT: [4] - SKLPort2
# CHECK-NEXT: [5] - SKLPort3
# CHECK-NEXT: [6] - SKLPort4
# CHECK-NEXT: [7] - SKLPort5
# CHECK-NEXT: [8] - SKLPort6
# CHECK-NEXT: [9] - SKLPort7
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
# CHECK-NEXT: - 7.00 126.75 52.25 49.00 49.00 27.00 149.75 69.25 9.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - f2xm1
# CHECK-NEXT: - - 1.00 - - - - - - - fabs
-# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fadds (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - faddl (%ecx)
-# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fiadds (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fiaddl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fbld (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fbstp (%eax)
# CHECK-NEXT: - - 1.00 - - - - - - - fchs
# CHECK-NEXT: - - 1.00 1.00 - - - 1.00 1.00 - fnclex
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - - - - - 1.00 - - fcom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fcom %st(3)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcoms (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcoml (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - fcomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fcomp %st(3)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcomps (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcompl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcompp
-# CHECK-NEXT: - - 1.00 - - - - - - - fcomi %st(3)
-# CHECK-NEXT: - - 1.00 - - - - - - - fcompi %st(3)
+# CHECK-NEXT: - - 1.00 - - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: - - 1.00 - - - - - - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcos
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fdecstp
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivrl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - ffree %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficoms (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficoml (%eax)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficomps (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficompl (%eax)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - filds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fildl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fildll (%eax)
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fincstp
# CHECK-NEXT: - - 3.00 1.50 - - - 9.00 1.50 - fninit
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fists (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistps (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistpll (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttps (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttpll (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fld %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - flds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldt (%eax)
# CHECK-NEXT: - - 1.50 - 0.50 0.50 - 0.50 - - fldcw (%eax)
# CHECK-NEXT: - - 19.25 9.75 4.00 4.00 - 12.25 14.75 - fldenv (%eax)
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fld1
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldl2e
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldl2t
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldlg2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldln2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldpi
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fimuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fimull (%eax)
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fnop
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fpatan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem1
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fptan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frndint
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fscale
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsin
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsincos
# CHECK-NEXT: - 7.00 1.00 - - - - - - - fsqrt
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fst %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fsts (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fstp %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpt (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - 1.00 0.33 fnstcw (%eax)
# CHECK-NEXT: - - 27.00 8.50 3.67 3.67 11.00 23.50 19.00 3.67 fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 0.33 0.33 1.00 - - 0.33 fnstsw (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubrs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubrl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubrs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubrl (%eax)
# CHECK-NEXT: - - 1.00 - - - - - - - ftst
# CHECK-NEXT: - - - - - - - 1.00 - - fucom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fucom %st(3)
# CHECK-NEXT: - - - - - - - 1.00 - - fucomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fucomp %st(3)
# CHECK-NEXT: - - 1.00 - - - - - - - fucompp
-# CHECK-NEXT: - - 1.00 - - - - - - - fucomi %st(3)
-# CHECK-NEXT: - - 1.00 - - - - - - - fucompi %st(3)
+# CHECK-NEXT: - - 1.00 - - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: - - 1.00 - - - - - - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxam
# CHECK-NEXT: - - 4.00 2.00 - - - 4.00 5.00 - fxch %st(1)
# CHECK-NEXT: - - 4.00 2.00 - - - 4.00 5.00 - fxch %st(3)
# CHECK-NEXT: - - 17.25 12.25 16.50 16.50 - 12.75 14.75 - fxrstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxtract
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2x
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeServer/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeServer/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/SkylakeServer/resources-x87.s (revision 344166)
@@ -1,523 +1,523 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.25 U f2xm1
# CHECK-NEXT: 1 1 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 2 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fiadds (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.25 U fbld (%ecx)
# CHECK-NEXT: 2 1 1.00 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 4 4 1.00 U fnclex
-# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 3 1.00 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmove %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 3 1.00 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 2 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 2 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 2 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 100 0.25 U fcompp
-# CHECK-NEXT: 1 2 1.00 U fcomi %st(3)
-# CHECK-NEXT: 1 2 1.00 U fcompi %st(3)
+# CHECK-NEXT: 1 2 1.00 U fcomi %st(3), %st
+# CHECK-NEXT: 1 2 1.00 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.25 U fcos
# CHECK-NEXT: 2 2 1.00 U fdecstp
-# CHECK-NEXT: 1 15 1.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdiv %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdiv %st(2), %st
# CHECK-NEXT: 2 22 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 2 22 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(2)
# CHECK-NEXT: 3 25 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 3 25 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivr %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivr %st(2), %st
# CHECK-NEXT: 2 27 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 2 27 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 20 1.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 20 1.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 3 30 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 3 30 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 100 0.25 U ffree %st(0)
# CHECK-NEXT: 3 11 2.00 U ficoms (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficoml (%eax)
# CHECK-NEXT: 3 11 2.00 U ficomps (%ecx)
# CHECK-NEXT: 3 11 2.00 U ficompl (%eax)
# CHECK-NEXT: 2 10 1.00 * U filds (%edx)
# CHECK-NEXT: 2 10 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 1 0.50 U fincstp
# CHECK-NEXT: 15 75 6.00 U fninit
# CHECK-NEXT: 3 4 1.00 * U fists (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fistpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fistpll (%eax)
# CHECK-NEXT: 3 4 1.00 * U fisttps (%edx)
# CHECK-NEXT: 3 4 1.00 * U fisttpl (%ecx)
# CHECK-NEXT: 3 4 1.00 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.25 U fld %st(0)
# CHECK-NEXT: 1 7 0.50 * U flds (%edx)
# CHECK-NEXT: 1 7 0.50 * U fldl (%ecx)
# CHECK-NEXT: 1 7 0.50 * U fldt (%eax)
# CHECK-NEXT: 3 7 1.00 * U fldcw (%eax)
# CHECK-NEXT: 64 62 14.00 U fldenv (%eax)
# CHECK-NEXT: 2 1 1.00 U fld1
# CHECK-NEXT: 2 1 1.00 U fldl2e
# CHECK-NEXT: 2 1 1.00 U fldl2t
# CHECK-NEXT: 2 1 1.00 U fldlg2
# CHECK-NEXT: 2 1 1.00 U fldln2
# CHECK-NEXT: 2 1 1.00 U fldpi
# CHECK-NEXT: 1 1 0.50 U fldz
-# CHECK-NEXT: 1 4 1.00 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 4 1.00 U fmul %st(2)
+# CHECK-NEXT: 1 4 1.00 U fmul %st, %st(1)
+# CHECK-NEXT: 1 4 1.00 U fmul %st(2), %st
# CHECK-NEXT: 2 11 1.00 * U fmuls (%ecx)
# CHECK-NEXT: 2 11 1.00 * U fmull (%eax)
-# CHECK-NEXT: 1 4 1.00 U fmulp %st(1)
-# CHECK-NEXT: 1 4 1.00 U fmulp %st(2)
+# CHECK-NEXT: 1 4 1.00 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 4 1.00 U fmulp %st, %st(2)
# CHECK-NEXT: 3 14 1.00 * U fimuls (%ecx)
# CHECK-NEXT: 3 14 1.00 * U fimull (%eax)
# CHECK-NEXT: 1 1 0.50 U fnop
# CHECK-NEXT: 1 100 0.25 U fpatan
# CHECK-NEXT: 1 100 0.25 U fprem
# CHECK-NEXT: 1 100 0.25 U fprem1
# CHECK-NEXT: 1 100 0.25 U fptan
# CHECK-NEXT: 1 100 0.25 U frndint
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fscale
# CHECK-NEXT: 1 100 0.25 U fsin
# CHECK-NEXT: 1 100 0.25 U fsincos
# CHECK-NEXT: 1 21 7.00 U fsqrt
# CHECK-NEXT: 1 1 0.25 U fst %st(0)
# CHECK-NEXT: 1 1 1.00 * U fsts (%edx)
# CHECK-NEXT: 1 1 1.00 * U fstl (%ecx)
# CHECK-NEXT: 1 1 0.25 U fstp %st(0)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%edx)
# CHECK-NEXT: 2 1 1.00 * U fstpl (%ecx)
# CHECK-NEXT: 2 1 1.00 * U fstpt (%eax)
# CHECK-NEXT: 3 2 1.00 * U fnstcw (%eax)
# CHECK-NEXT: 100 106 19.50 U fnstenv (%eax)
# CHECK-NEXT: 3 3 1.00 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 2 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 2 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 3 13 2.00 * U fisubrs (%ecx)
# CHECK-NEXT: 3 13 2.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 2 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 2 1.00 U fucompp
-# CHECK-NEXT: 1 2 1.00 U fucomi %st(3)
-# CHECK-NEXT: 1 2 1.00 U fucompi %st(3)
+# CHECK-NEXT: 1 2 1.00 U fucomi %st(3), %st
+# CHECK-NEXT: 1 2 1.00 U fucompi %st(3), %st
# CHECK-NEXT: 2 2 0.50 U wait
# CHECK-NEXT: 1 100 0.25 U fxam
# CHECK-NEXT: 15 17 4.00 U fxch %st(1)
# CHECK-NEXT: 15 17 4.00 U fxch %st(3)
# CHECK-NEXT: 90 63 16.50 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.25 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fxtract
# CHECK-NEXT: 1 100 0.25 U fyl2x
# CHECK-NEXT: 1 100 0.25 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - SKXDivider
# CHECK-NEXT: [1] - SKXFPDivider
# CHECK-NEXT: [2] - SKXPort0
# CHECK-NEXT: [3] - SKXPort1
# CHECK-NEXT: [4] - SKXPort2
# CHECK-NEXT: [5] - SKXPort3
# CHECK-NEXT: [6] - SKXPort4
# CHECK-NEXT: [7] - SKXPort5
# CHECK-NEXT: [8] - SKXPort6
# CHECK-NEXT: [9] - SKXPort7
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
# CHECK-NEXT: - 7.00 126.75 52.25 49.00 49.00 27.00 149.75 69.25 9.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - f2xm1
# CHECK-NEXT: - - 1.00 - - - - - - - fabs
-# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fadd %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fadds (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - faddl (%ecx)
-# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - faddp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fiadds (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fiaddl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fbld (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fbstp (%eax)
# CHECK-NEXT: - - 1.00 - - - - - - - fchs
# CHECK-NEXT: - - 1.00 1.00 - - - 1.00 1.00 - fnclex
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - 1.00 - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - - - - - 1.00 - - fcom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fcom %st(3)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcoms (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcoml (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - fcomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fcomp %st(3)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcomps (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fcompl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcompp
-# CHECK-NEXT: - - 1.00 - - - - - - - fcomi %st(3)
-# CHECK-NEXT: - - 1.00 - - - - - - - fcompi %st(3)
+# CHECK-NEXT: - - 1.00 - - - - - - - fcomi %st(3), %st
+# CHECK-NEXT: - - 1.00 - - - - - - - fcompi %st(3), %st
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fcos
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fdecstp
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdiv %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivr %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fdivrl (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fdivrp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivrs (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fidivrl (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - ffree %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficoms (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficoml (%eax)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficomps (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - ficompl (%eax)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - filds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fildl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fildll (%eax)
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fincstp
# CHECK-NEXT: - - 3.00 1.50 - - - 9.00 1.50 - fninit
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fists (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistps (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fistpll (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttps (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 1.00 - 0.33 fisttpll (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fld %st(0)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - flds (%edx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldl (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - - - - fldt (%eax)
# CHECK-NEXT: - - 1.50 - 0.50 0.50 - 0.50 - - fldcw (%eax)
# CHECK-NEXT: - - 19.25 9.75 4.00 4.00 - 12.25 14.75 - fldenv (%eax)
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fld1
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldl2e
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldl2t
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldlg2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldln2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - fldpi
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fldz
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmul %st(2), %st
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - fmull (%eax)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(1)
-# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st(2)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - 1.00 - - - - - - - fmulp %st, %st(2)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fimuls (%ecx)
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - fimull (%eax)
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - fnop
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fpatan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fprem1
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fptan
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frndint
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fscale
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsin
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fsincos
# CHECK-NEXT: - 7.00 1.00 - - - - - - - fsqrt
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fst %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fsts (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstl (%ecx)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fstp %st(0)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%edx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpl (%ecx)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - - 0.33 fstpt (%eax)
# CHECK-NEXT: - - - - 0.33 0.33 1.00 - 1.00 0.33 fnstcw (%eax)
# CHECK-NEXT: - - 27.00 8.50 3.67 3.67 11.00 23.50 19.00 3.67 fnstenv (%eax)
# CHECK-NEXT: - - 1.00 - 0.33 0.33 1.00 - - 0.33 fnstsw (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - frstor (%eax)
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fnsave (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsub %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubr %st(2), %st
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubrs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - fsubrl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - fsubrp %st, %st(2)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubrs (%ecx)
# CHECK-NEXT: - - - - 0.50 0.50 - 2.00 - - fisubrl (%eax)
# CHECK-NEXT: - - 1.00 - - - - - - - ftst
# CHECK-NEXT: - - - - - - - 1.00 - - fucom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fucom %st(3)
# CHECK-NEXT: - - - - - - - 1.00 - - fucomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - fucomp %st(3)
# CHECK-NEXT: - - 1.00 - - - - - - - fucompp
-# CHECK-NEXT: - - 1.00 - - - - - - - fucomi %st(3)
-# CHECK-NEXT: - - 1.00 - - - - - - - fucompi %st(3)
+# CHECK-NEXT: - - 1.00 - - - - - - - fucomi %st(3), %st
+# CHECK-NEXT: - - 1.00 - - - - - - - fucompi %st(3), %st
# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - wait
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxam
# CHECK-NEXT: - - 4.00 2.00 - - - 4.00 5.00 - fxch %st(1)
# CHECK-NEXT: - - 4.00 2.00 - - - 4.00 5.00 - fxch %st(3)
# CHECK-NEXT: - - 17.25 12.25 16.50 16.50 - 12.75 14.75 - fxrstor (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxsave (%eax)
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fxtract
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2x
# CHECK-NEXT: - - 0.25 0.25 - - - 0.25 0.25 - fyl2xp1
Index: vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Znver1/resources-x87.s
===================================================================
--- vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Znver1/resources-x87.s (revision 344165)
+++ vendor/llvm/dist-release_80/test/tools/llvm-mca/X86/Znver1/resources-x87.s (revision 344166)
@@ -1,525 +1,525 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=znver1 -instruction-tables < %s | FileCheck %s
f2xm1
fabs
-fadd %st(0), %st(1)
+fadd %st, %st(1)
fadd %st(2)
fadds (%ecx)
faddl (%ecx)
faddp %st(1)
faddp %st(2)
fiadds (%ecx)
fiaddl (%ecx)
fbld (%ecx)
fbstp (%eax)
fchs
fnclex
-fcmovb %st(1), %st(0)
-fcmovbe %st(1), %st(0)
-fcmove %st(1), %st(0)
-fcmovnb %st(1), %st(0)
-fcmovnbe %st(1), %st(0)
-fcmovne %st(1), %st(0)
-fcmovnu %st(1), %st(0)
-fcmovu %st(1), %st(0)
+fcmovb %st(1), %st
+fcmovbe %st(1), %st
+fcmove %st(1), %st
+fcmovnb %st(1), %st
+fcmovnbe %st(1), %st
+fcmovne %st(1), %st
+fcmovnu %st(1), %st
+fcmovu %st(1), %st
fcom %st(1)
fcom %st(3)
fcoms (%ecx)
fcoml (%eax)
fcomp %st(1)
fcomp %st(3)
fcomps (%ecx)
fcompl (%eax)
fcompp
fcomi %st(3)
fcompi %st(3)
fcos
fdecstp
-fdiv %st(0), %st(1)
+fdiv %st, %st(1)
fdiv %st(2)
fdivs (%ecx)
fdivl (%eax)
fdivp %st(1)
fdivp %st(2)
fidivs (%ecx)
fidivl (%eax)
-fdivr %st(0), %st(1)
+fdivr %st, %st(1)
fdivr %st(2)
fdivrs (%ecx)
fdivrl (%eax)
fdivrp %st(1)
fdivrp %st(2)
fidivrs (%ecx)
fidivrl (%eax)
ffree %st(0)
ficoms (%ecx)
ficoml (%eax)
ficomps (%ecx)
ficompl (%eax)
filds (%edx)
fildl (%ecx)
fildll (%eax)
fincstp
fninit
fists (%edx)
fistl (%ecx)
fistps (%edx)
fistpl (%ecx)
fistpll (%eax)
fisttps (%edx)
fisttpl (%ecx)
fisttpll (%eax)
fld %st(0)
flds (%edx)
fldl (%ecx)
fldt (%eax)
fldcw (%eax)
fldenv (%eax)
fld1
fldl2e
fldl2t
fldlg2
fldln2
fldpi
fldz
-fmul %st(0), %st(1)
+fmul %st, %st(1)
fmul %st(2)
fmuls (%ecx)
fmull (%eax)
fmulp %st(1)
fmulp %st(2)
fimuls (%ecx)
fimull (%eax)
fnop
fpatan
fprem
fprem1
fptan
frndint
frstor (%eax)
fnsave (%eax)
fscale
fsin
fsincos
fsqrt
fst %st(0)
fsts (%edx)
fstl (%ecx)
fstp %st(0)
fstpl (%edx)
fstpl (%ecx)
fstpt (%eax)
fnstcw (%eax)
fnstenv (%eax)
fnstsw (%eax)
frstor (%eax)
fsave (%eax)
-fsub %st(0), %st(1)
+fsub %st, %st(1)
fsub %st(2)
fsubs (%ecx)
fsubl (%eax)
fsubp %st(1)
fsubp %st(2)
fisubs (%ecx)
fisubl (%eax)
-fsubr %st(0), %st(1)
+fsubr %st, %st(1)
fsubr %st(2)
fsubrs (%ecx)
fsubrl (%eax)
fsubrp %st(1)
fsubrp %st(2)
fisubrs (%ecx)
fisubrl (%eax)
ftst
fucom %st(1)
fucom %st(3)
fucomp %st(1)
fucomp %st(3)
fucompp
fucomi %st(3)
fucompi %st(3)
fwait
fxam
fxch %st(1)
fxch %st(3)
fxrstor (%eax)
fxsave (%eax)
fxtract
fyl2x
fyl2xp1
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 100 0.25 U f2xm1
# CHECK-NEXT: 1 2 1.00 U fabs
-# CHECK-NEXT: 1 3 1.00 U fadd %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fadd %st(2)
+# CHECK-NEXT: 1 3 1.00 U fadd %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fadd %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fadds (%ecx)
# CHECK-NEXT: 1 10 1.00 * U faddl (%ecx)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(1)
-# CHECK-NEXT: 1 3 1.00 U faddp %st(2)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U faddp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fiadds (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fiaddl (%ecx)
# CHECK-NEXT: 1 100 0.25 U fbld (%ecx)
# CHECK-NEXT: 1 100 0.25 U fbstp (%eax)
# CHECK-NEXT: 1 1 1.00 U fchs
# CHECK-NEXT: 1 100 0.25 U fnclex
-# CHECK-NEXT: 1 100 0.25 U fcmovb %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovbe %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmove %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovnb %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovne %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovnu %st(1), %st(0)
-# CHECK-NEXT: 1 100 0.25 U fcmovu %st(1), %st(0)
+# CHECK-NEXT: 1 100 0.25 U fcmovb %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovbe %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmove %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovnb %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovnbe %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovne %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovnu %st(1), %st
+# CHECK-NEXT: 1 100 0.25 U fcmovu %st(1), %st
# CHECK-NEXT: 1 1 1.00 U fcom %st(1)
# CHECK-NEXT: 1 1 1.00 U fcom %st(3)
# CHECK-NEXT: 1 8 1.00 U fcoms (%ecx)
# CHECK-NEXT: 1 8 1.00 U fcoml (%eax)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fcomp %st(3)
# CHECK-NEXT: 1 8 1.00 U fcomps (%ecx)
# CHECK-NEXT: 1 8 1.00 U fcompl (%eax)
# CHECK-NEXT: 1 1 1.00 U fcompp
-# CHECK-NEXT: 1 9 0.50 U fcomi %st(3)
-# CHECK-NEXT: 1 9 0.50 U fcompi %st(3)
+# CHECK-NEXT: 1 9 0.50 U fcomi %st(3), %st
+# CHECK-NEXT: 1 9 0.50 U fcompi %st(3), %st
# CHECK-NEXT: 1 100 0.25 U fcos
# CHECK-NEXT: 1 11 1.00 U fdecstp
-# CHECK-NEXT: 1 15 1.00 U fdiv %st(0), %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdiv %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdiv %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdiv %st(2), %st
# CHECK-NEXT: 1 22 1.00 * U fdivs (%ecx)
# CHECK-NEXT: 1 22 1.00 * U fdivl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivp %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivp %st, %st(2)
# CHECK-NEXT: 1 22 1.00 * U fidivs (%ecx)
# CHECK-NEXT: 1 22 1.00 * U fidivl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivr %st(0), %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivr %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivr %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivr %st(2), %st
# CHECK-NEXT: 1 22 1.00 * U fdivrs (%ecx)
# CHECK-NEXT: 1 22 1.00 * U fdivrl (%eax)
-# CHECK-NEXT: 1 15 1.00 U fdivrp %st(1)
-# CHECK-NEXT: 1 15 1.00 U fdivrp %st(2)
+# CHECK-NEXT: 1 15 1.00 U fdivrp %st, %st(1)
+# CHECK-NEXT: 1 15 1.00 U fdivrp %st, %st(2)
# CHECK-NEXT: 1 22 1.00 * U fidivrs (%ecx)
# CHECK-NEXT: 1 22 1.00 * U fidivrl (%eax)
# CHECK-NEXT: 1 11 1.00 U ffree %st(0)
# CHECK-NEXT: 2 12 1.50 U ficoms (%ecx)
# CHECK-NEXT: 2 12 1.50 U ficoml (%eax)
# CHECK-NEXT: 2 12 1.50 U ficomps (%ecx)
# CHECK-NEXT: 2 12 1.50 U ficompl (%eax)
# CHECK-NEXT: 2 11 1.00 * U filds (%edx)
# CHECK-NEXT: 2 11 1.00 * U fildl (%ecx)
# CHECK-NEXT: 2 11 1.00 * U fildll (%eax)
# CHECK-NEXT: 1 11 1.00 U fincstp
# CHECK-NEXT: 1 100 0.25 U fninit
# CHECK-NEXT: 1 12 0.50 * U fists (%edx)
# CHECK-NEXT: 1 12 0.50 * U fistl (%ecx)
# CHECK-NEXT: 1 12 0.50 * U fistps (%edx)
# CHECK-NEXT: 1 12 0.50 * U fistpl (%ecx)
# CHECK-NEXT: 1 12 0.50 * U fistpll (%eax)
# CHECK-NEXT: 1 12 0.50 * U fisttps (%edx)
# CHECK-NEXT: 1 12 0.50 * U fisttpl (%ecx)
# CHECK-NEXT: 1 12 0.50 * U fisttpll (%eax)
# CHECK-NEXT: 1 1 0.50 U fld %st(0)
# CHECK-NEXT: 1 8 0.50 * U flds (%edx)
# CHECK-NEXT: 1 8 0.50 * U fldl (%ecx)
# CHECK-NEXT: 2 1 0.50 * U fldt (%eax)
# CHECK-NEXT: 1 100 0.25 * U fldcw (%eax)
# CHECK-NEXT: 1 100 0.25 U fldenv (%eax)
# CHECK-NEXT: 1 11 1.00 U fld1
# CHECK-NEXT: 1 11 1.00 U fldl2e
# CHECK-NEXT: 1 11 1.00 U fldl2t
# CHECK-NEXT: 1 11 1.00 U fldlg2
# CHECK-NEXT: 1 11 1.00 U fldln2
# CHECK-NEXT: 1 11 1.00 U fldpi
# CHECK-NEXT: 1 8 0.50 U fldz
-# CHECK-NEXT: 1 3 0.50 U fmul %st(0), %st(1)
-# CHECK-NEXT: 1 3 0.50 U fmul %st(2)
+# CHECK-NEXT: 1 3 0.50 U fmul %st, %st(1)
+# CHECK-NEXT: 1 3 0.50 U fmul %st(2), %st
# CHECK-NEXT: 2 10 0.50 * U fmuls (%ecx)
# CHECK-NEXT: 2 10 0.50 * U fmull (%eax)
-# CHECK-NEXT: 1 3 0.50 U fmulp %st(1)
-# CHECK-NEXT: 1 3 0.50 U fmulp %st(2)
+# CHECK-NEXT: 1 3 0.50 U fmulp %st, %st(1)
+# CHECK-NEXT: 1 3 0.50 U fmulp %st, %st(2)
# CHECK-NEXT: 2 10 0.50 * U fimuls (%ecx)
# CHECK-NEXT: 2 10 0.50 * U fimull (%eax)
# CHECK-NEXT: 1 1 1.00 U fnop
# CHECK-NEXT: 1 100 0.25 U fpatan
# CHECK-NEXT: 1 100 0.25 U fprem
# CHECK-NEXT: 1 100 0.25 U fprem1
# CHECK-NEXT: 1 100 0.25 U fptan
# CHECK-NEXT: 1 100 0.25 U frndint
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fscale
# CHECK-NEXT: 1 100 0.25 U fsin
# CHECK-NEXT: 1 100 0.25 U fsincos
# CHECK-NEXT: 1 20 20.00 U fsqrt
# CHECK-NEXT: 2 5 0.50 U fst %st(0)
# CHECK-NEXT: 1 1 0.50 * U fsts (%edx)
# CHECK-NEXT: 1 1 0.50 * U fstl (%ecx)
# CHECK-NEXT: 2 5 0.50 U fstp %st(0)
# CHECK-NEXT: 1 1 0.50 * U fstpl (%edx)
# CHECK-NEXT: 1 1 0.50 * U fstpl (%ecx)
# CHECK-NEXT: 1 5 0.50 * U fstpt (%eax)
# CHECK-NEXT: 1 100 0.25 * U fnstcw (%eax)
# CHECK-NEXT: 1 100 0.25 U fnstenv (%eax)
# CHECK-NEXT: 1 100 0.25 U fnstsw (%eax)
# CHECK-NEXT: 1 100 0.25 U frstor (%eax)
# CHECK-NEXT: 1 1 1.00 U wait
# CHECK-NEXT: 1 100 0.25 U fnsave (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsub %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsub %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsub %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fsubs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fsubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fisubs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fisubl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(0), %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubr %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubr %st(2), %st
# CHECK-NEXT: 1 10 1.00 * U fsubrs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fsubrl (%eax)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(1)
-# CHECK-NEXT: 1 3 1.00 U fsubrp %st(2)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(1)
+# CHECK-NEXT: 1 3 1.00 U fsubrp %st, %st(2)
# CHECK-NEXT: 1 10 1.00 * U fisubrs (%ecx)
# CHECK-NEXT: 1 10 1.00 * U fisubrl (%eax)
# CHECK-NEXT: 1 1 1.00 U ftst
# CHECK-NEXT: 1 1 1.00 U fucom %st(1)
# CHECK-NEXT: 1 1 1.00 U fucom %st(3)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(1)
# CHECK-NEXT: 1 1 1.00 U fucomp %st(3)
# CHECK-NEXT: 1 1 1.00 U fucompp
-# CHECK-NEXT: 1 9 0.50 U fucomi %st(3)
-# CHECK-NEXT: 1 9 0.50 U fucompi %st(3)
+# CHECK-NEXT: 1 9 0.50 U fucomi %st(3), %st
+# CHECK-NEXT: 1 9 0.50 U fucompi %st(3), %st
# CHECK-NEXT: 1 1 1.00 U wait
# CHECK-NEXT: 1 1 1.00 U fxam
# CHECK-NEXT: 1 1 0.25 U fxch %st(1)
# CHECK-NEXT: 1 1 0.25 U fxch %st(3)
# CHECK-NEXT: 1 100 0.25 * * U fxrstor (%eax)
# CHECK-NEXT: 1 100 0.25 * * U fxsave (%eax)
# CHECK-NEXT: 1 100 0.25 U fxtract
# CHECK-NEXT: 1 100 0.25 U fyl2x
# CHECK-NEXT: 1 100 0.25 U fyl2xp1
# CHECK: Resources:
# CHECK-NEXT: [0] - ZnAGU0
# CHECK-NEXT: [1] - ZnAGU1
# CHECK-NEXT: [2] - ZnALU0
# CHECK-NEXT: [3] - ZnALU1
# CHECK-NEXT: [4] - ZnALU2
# CHECK-NEXT: [5] - ZnALU3
# CHECK-NEXT: [6] - ZnDivider
# CHECK-NEXT: [7] - ZnFPU0
# CHECK-NEXT: [8] - ZnFPU1
# CHECK-NEXT: [9] - ZnFPU2
# CHECK-NEXT: [10] - ZnFPU3
# CHECK-NEXT: [11] - ZnMultiplier
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
# CHECK-NEXT: 32.50 32.50 - - - - - 54.50 6.00 8.00 64.50 -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
# CHECK-NEXT: - - - - - - - - - - - - f2xm1
# CHECK-NEXT: - - - - - - - - - - 1.00 - fabs
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fadd %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fadd %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fadd %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fadd %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fadds (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - faddl (%ecx)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - faddp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - faddp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - faddp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - faddp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fiadds (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fiaddl (%ecx)
# CHECK-NEXT: - - - - - - - - - - - - fbld (%ecx)
# CHECK-NEXT: - - - - - - - - - - - - fbstp (%eax)
# CHECK-NEXT: - - - - - - - - - - 1.00 - fchs
# CHECK-NEXT: - - - - - - - - - - - - fnclex
-# CHECK-NEXT: - - - - - - - - - - - - fcmovb %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovbe %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmove %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovnb %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovnbe %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovne %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovnu %st(1), %st(0)
-# CHECK-NEXT: - - - - - - - - - - - - fcmovu %st(1), %st(0)
+# CHECK-NEXT: - - - - - - - - - - - - fcmovb %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovbe %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmove %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovnb %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovnbe %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovne %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovnu %st(1), %st
+# CHECK-NEXT: - - - - - - - - - - - - fcmovu %st(1), %st
# CHECK-NEXT: - - - - - - - 1.00 - - - - fcom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fcom %st(3)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fcoms (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fcoml (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fcomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fcomp %st(3)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fcomps (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fcompl (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fcompp
-# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fcomi %st(3)
-# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fcompi %st(3)
+# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fcomi %st(3), %st
+# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fcompi %st(3), %st
# CHECK-NEXT: - - - - - - - - - - - - fcos
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fdecstp
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdiv %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdiv %st(2)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdiv %st, %st(1)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdiv %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fdivs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fdivl (%eax)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivp %st(1)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivp %st(2)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fidivs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fidivl (%eax)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivr %st(2)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivr %st, %st(1)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivr %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fdivrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fdivrl (%eax)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivrp %st(1)
-# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivrp %st(2)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - - - - 1.00 - fdivrp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fidivrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fidivrl (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - ffree %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.50 - - 1.50 - ficoms (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.50 - - 1.50 - ficoml (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.50 - - 1.50 - ficomps (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.50 - - 1.50 - ficompl (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - filds (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fildl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fildll (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fincstp
# CHECK-NEXT: - - - - - - - - - - - - fninit
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fists (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fistl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fistps (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fistpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fistpll (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fisttps (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fisttpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fisttpll (%eax)
# CHECK-NEXT: - - - - - - - - 0.50 - 0.50 - fld %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - flds (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - fldl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 - 0.50 - fldt (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fldcw (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fldenv (%eax)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fld1
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fldl2e
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fldl2t
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fldlg2
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fldln2
# CHECK-NEXT: 0.50 0.50 - - - - - - - - 1.00 - fldpi
# CHECK-NEXT: 0.50 0.50 - - - - - - 0.50 - 0.50 - fldz
-# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmul %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmul %st(2)
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmul %st, %st(1)
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmul %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 0.50 - - - fmuls (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 0.50 - - - fmull (%eax)
-# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmulp %st(1)
-# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmulp %st(2)
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmulp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - fmulp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 0.50 - - - fimuls (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 0.50 - - - fimull (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fnop
# CHECK-NEXT: - - - - - - - - - - - - fpatan
# CHECK-NEXT: - - - - - - - - - - - - fprem
# CHECK-NEXT: - - - - - - - - - - - - fprem1
# CHECK-NEXT: - - - - - - - - - - - - fptan
# CHECK-NEXT: - - - - - - - - - - - - frndint
# CHECK-NEXT: - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fnsave (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fscale
# CHECK-NEXT: - - - - - - - - - - - - fsin
# CHECK-NEXT: - - - - - - - - - - - - fsincos
# CHECK-NEXT: - - - - - - - - - - 20.00 - fsqrt
# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fst %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - fsts (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - fstl (%ecx)
# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fstp %st(0)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - fstpl (%edx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - fstpl (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - - - 0.50 0.50 - fstpt (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fnstcw (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fnstenv (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fnstsw (%eax)
# CHECK-NEXT: - - - - - - - - - - - - frstor (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - wait
# CHECK-NEXT: - - - - - - - - - - - - fnsave (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsub %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsub %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsub %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsub %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fsubs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fsubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fisubs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fisubl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubr %st(0), %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubr %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubr %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubr %st(2), %st
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fsubrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fsubrl (%eax)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubrp %st(1)
-# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubrp %st(2)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubrp %st, %st(1)
+# CHECK-NEXT: - - - - - - - 1.00 - - - - fsubrp %st, %st(2)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fisubrs (%ecx)
# CHECK-NEXT: 0.50 0.50 - - - - - 1.00 - - - - fisubrl (%eax)
# CHECK-NEXT: - - - - - - - 1.00 - - - - ftst
# CHECK-NEXT: - - - - - - - 1.00 - - - - fucom %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fucom %st(3)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fucomp %st(1)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fucomp %st(3)
# CHECK-NEXT: - - - - - - - 1.00 - - - - fucompp
-# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fucomi %st(3)
-# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fucompi %st(3)
+# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fucomi %st(3), %st
+# CHECK-NEXT: 0.50 0.50 - - - - - 0.50 - 0.50 - - fucompi %st(3), %st
# CHECK-NEXT: - - - - - - - 1.00 - - - - wait
# CHECK-NEXT: - - - - - - - - - - 1.00 - fxam
# CHECK-NEXT: - - - - - - - 0.25 0.25 0.25 0.25 - fxch %st(1)
# CHECK-NEXT: - - - - - - - 0.25 0.25 0.25 0.25 - fxch %st(3)
# CHECK-NEXT: - - - - - - - - - - - - fxrstor (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fxsave (%eax)
# CHECK-NEXT: - - - - - - - - - - - - fxtract
# CHECK-NEXT: - - - - - - - - - - - - fyl2x
# CHECK-NEXT: - - - - - - - - - - - - fyl2xp1
Index: vendor/llvm/dist-release_80/tools/yaml2obj/yaml2wasm.cpp
===================================================================
--- vendor/llvm/dist-release_80/tools/yaml2obj/yaml2wasm.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/tools/yaml2obj/yaml2wasm.cpp (revision 344166)
@@ -1,607 +1,608 @@
//===- yaml2wasm - Convert YAML to a Wasm object file --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// The Wasm component of yaml2obj.
///
//===----------------------------------------------------------------------===//
//
#include "llvm/Object/Wasm.h"
#include "llvm/ObjectYAML/ObjectYAML.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/LEB128.h"
using namespace llvm;
/// This parses a yaml stream that represents a Wasm object file.
/// See docs/yaml2obj for the yaml scheema.
class WasmWriter {
public:
WasmWriter(WasmYAML::Object &Obj) : Obj(Obj) {}
int writeWasm(raw_ostream &OS);
private:
int writeRelocSection(raw_ostream &OS, WasmYAML::Section &Sec,
uint32_t SectionIndex);
int writeSectionContent(raw_ostream &OS, WasmYAML::CustomSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::TypeSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::ImportSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::FunctionSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::TableSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::MemorySection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::GlobalSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::EventSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::ExportSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::StartSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::ElemSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::CodeSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::DataSection &Section);
// Custom section types
int writeSectionContent(raw_ostream &OS, WasmYAML::DylinkSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::NameSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::LinkingSection &Section);
WasmYAML::Object &Obj;
uint32_t NumImportedFunctions = 0;
uint32_t NumImportedGlobals = 0;
uint32_t NumImportedEvents = 0;
};
static int writeUint64(raw_ostream &OS, uint64_t Value) {
char Data[sizeof(Value)];
support::endian::write64le(Data, Value);
OS.write(Data, sizeof(Data));
return 0;
}
static int writeUint32(raw_ostream &OS, uint32_t Value) {
char Data[sizeof(Value)];
support::endian::write32le(Data, Value);
OS.write(Data, sizeof(Data));
return 0;
}
static int writeUint8(raw_ostream &OS, uint8_t Value) {
char Data[sizeof(Value)];
memcpy(Data, &Value, sizeof(Data));
OS.write(Data, sizeof(Data));
return 0;
}
static int writeStringRef(const StringRef &Str, raw_ostream &OS) {
encodeULEB128(Str.size(), OS);
OS << Str;
return 0;
}
static int writeLimits(const WasmYAML::Limits &Lim, raw_ostream &OS) {
writeUint8(OS, Lim.Flags);
encodeULEB128(Lim.Initial, OS);
if (Lim.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX)
encodeULEB128(Lim.Maximum, OS);
return 0;
}
static int writeInitExpr(const wasm::WasmInitExpr &InitExpr, raw_ostream &OS) {
writeUint8(OS, InitExpr.Opcode);
switch (InitExpr.Opcode) {
case wasm::WASM_OPCODE_I32_CONST:
encodeSLEB128(InitExpr.Value.Int32, OS);
break;
case wasm::WASM_OPCODE_I64_CONST:
encodeSLEB128(InitExpr.Value.Int64, OS);
break;
case wasm::WASM_OPCODE_F32_CONST:
writeUint32(OS, InitExpr.Value.Float32);
break;
case wasm::WASM_OPCODE_F64_CONST:
writeUint64(OS, InitExpr.Value.Float64);
break;
case wasm::WASM_OPCODE_GLOBAL_GET:
encodeULEB128(InitExpr.Value.Global, OS);
break;
default:
errs() << "Unknown opcode in init_expr: " << InitExpr.Opcode << "\n";
return 1;
}
writeUint8(OS, wasm::WASM_OPCODE_END);
return 0;
}
class SubSectionWriter {
raw_ostream &OS;
std::string OutString;
raw_string_ostream StringStream;
public:
SubSectionWriter(raw_ostream &OS) : OS(OS), StringStream(OutString) {}
void Done() {
StringStream.flush();
encodeULEB128(OutString.size(), OS);
OS << OutString;
OutString.clear();
}
raw_ostream &GetStream() { return StringStream; }
};
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::DylinkSection &Section) {
writeStringRef(Section.Name, OS);
encodeULEB128(Section.MemorySize, OS);
encodeULEB128(Section.MemoryAlignment, OS);
encodeULEB128(Section.TableSize, OS);
encodeULEB128(Section.TableAlignment, OS);
encodeULEB128(Section.Needed.size(), OS);
for (StringRef Needed : Section.Needed) {
writeStringRef(Needed, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::LinkingSection &Section) {
writeStringRef(Section.Name, OS);
encodeULEB128(Section.Version, OS);
SubSectionWriter SubSection(OS);
// SYMBOL_TABLE subsection
if (Section.SymbolTable.size()) {
writeUint8(OS, wasm::WASM_SYMBOL_TABLE);
encodeULEB128(Section.SymbolTable.size(), SubSection.GetStream());
#ifndef NDEBUG
uint32_t SymbolIndex = 0;
#endif
for (const WasmYAML::SymbolInfo &Info : Section.SymbolTable) {
assert(Info.Index == SymbolIndex++);
writeUint8(SubSection.GetStream(), Info.Kind);
encodeULEB128(Info.Flags, SubSection.GetStream());
switch (Info.Kind) {
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
case wasm::WASM_SYMBOL_TYPE_EVENT:
encodeULEB128(Info.ElementIndex, SubSection.GetStream());
- if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0)
+ if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0 ||
+ (Info.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
writeStringRef(Info.Name, SubSection.GetStream());
break;
case wasm::WASM_SYMBOL_TYPE_DATA:
writeStringRef(Info.Name, SubSection.GetStream());
if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0) {
encodeULEB128(Info.DataRef.Segment, SubSection.GetStream());
encodeULEB128(Info.DataRef.Offset, SubSection.GetStream());
encodeULEB128(Info.DataRef.Size, SubSection.GetStream());
}
break;
case wasm::WASM_SYMBOL_TYPE_SECTION:
encodeULEB128(Info.ElementIndex, SubSection.GetStream());
break;
default:
llvm_unreachable("unexpected kind");
}
}
SubSection.Done();
}
// SEGMENT_NAMES subsection
if (Section.SegmentInfos.size()) {
writeUint8(OS, wasm::WASM_SEGMENT_INFO);
encodeULEB128(Section.SegmentInfos.size(), SubSection.GetStream());
for (const WasmYAML::SegmentInfo &SegmentInfo : Section.SegmentInfos) {
writeStringRef(SegmentInfo.Name, SubSection.GetStream());
encodeULEB128(SegmentInfo.Alignment, SubSection.GetStream());
encodeULEB128(SegmentInfo.Flags, SubSection.GetStream());
}
SubSection.Done();
}
// INIT_FUNCS subsection
if (Section.InitFunctions.size()) {
writeUint8(OS, wasm::WASM_INIT_FUNCS);
encodeULEB128(Section.InitFunctions.size(), SubSection.GetStream());
for (const WasmYAML::InitFunction &Func : Section.InitFunctions) {
encodeULEB128(Func.Priority, SubSection.GetStream());
encodeULEB128(Func.Symbol, SubSection.GetStream());
}
SubSection.Done();
}
// COMDAT_INFO subsection
if (Section.Comdats.size()) {
writeUint8(OS, wasm::WASM_COMDAT_INFO);
encodeULEB128(Section.Comdats.size(), SubSection.GetStream());
for (const auto &C : Section.Comdats) {
writeStringRef(C.Name, SubSection.GetStream());
encodeULEB128(0, SubSection.GetStream()); // flags for future use
encodeULEB128(C.Entries.size(), SubSection.GetStream());
for (const WasmYAML::ComdatEntry &Entry : C.Entries) {
writeUint8(SubSection.GetStream(), Entry.Kind);
encodeULEB128(Entry.Index, SubSection.GetStream());
}
}
SubSection.Done();
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::NameSection &Section) {
writeStringRef(Section.Name, OS);
if (Section.FunctionNames.size()) {
writeUint8(OS, wasm::WASM_NAMES_FUNCTION);
SubSectionWriter SubSection(OS);
encodeULEB128(Section.FunctionNames.size(), SubSection.GetStream());
for (const WasmYAML::NameEntry &NameEntry : Section.FunctionNames) {
encodeULEB128(NameEntry.Index, SubSection.GetStream());
writeStringRef(NameEntry.Name, SubSection.GetStream());
}
SubSection.Done();
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::CustomSection &Section) {
if (auto S = dyn_cast<WasmYAML::DylinkSection>(&Section)) {
if (auto Err = writeSectionContent(OS, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::NameSection>(&Section)) {
if (auto Err = writeSectionContent(OS, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::LinkingSection>(&Section)) {
if (auto Err = writeSectionContent(OS, *S))
return Err;
} else {
writeStringRef(Section.Name, OS);
Section.Payload.writeAsBinary(OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::TypeSection &Section) {
encodeULEB128(Section.Signatures.size(), OS);
uint32_t ExpectedIndex = 0;
for (const WasmYAML::Signature &Sig : Section.Signatures) {
if (Sig.Index != ExpectedIndex) {
errs() << "Unexpected type index: " << Sig.Index << "\n";
return 1;
}
++ExpectedIndex;
writeUint8(OS, Sig.Form);
encodeULEB128(Sig.ParamTypes.size(), OS);
for (auto ParamType : Sig.ParamTypes)
writeUint8(OS, ParamType);
if (Sig.ReturnType == wasm::WASM_TYPE_NORESULT) {
encodeULEB128(0, OS);
} else {
encodeULEB128(1, OS);
writeUint8(OS, Sig.ReturnType);
}
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::ImportSection &Section) {
encodeULEB128(Section.Imports.size(), OS);
for (const WasmYAML::Import &Import : Section.Imports) {
writeStringRef(Import.Module, OS);
writeStringRef(Import.Field, OS);
writeUint8(OS, Import.Kind);
switch (Import.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
encodeULEB128(Import.SigIndex, OS);
NumImportedFunctions++;
break;
case wasm::WASM_EXTERNAL_GLOBAL:
writeUint8(OS, Import.GlobalImport.Type);
writeUint8(OS, Import.GlobalImport.Mutable);
NumImportedGlobals++;
break;
case wasm::WASM_EXTERNAL_EVENT:
writeUint32(OS, Import.EventImport.Attribute);
writeUint32(OS, Import.EventImport.SigIndex);
NumImportedGlobals++;
break;
case wasm::WASM_EXTERNAL_MEMORY:
writeLimits(Import.Memory, OS);
break;
case wasm::WASM_EXTERNAL_TABLE:
writeUint8(OS, Import.TableImport.ElemType);
writeLimits(Import.TableImport.TableLimits, OS);
break;
default:
errs() << "Unknown import type: " << Import.Kind << "\n";
return 1;
}
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::FunctionSection &Section) {
encodeULEB128(Section.FunctionTypes.size(), OS);
for (uint32_t FuncType : Section.FunctionTypes) {
encodeULEB128(FuncType, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::ExportSection &Section) {
encodeULEB128(Section.Exports.size(), OS);
for (const WasmYAML::Export &Export : Section.Exports) {
writeStringRef(Export.Name, OS);
writeUint8(OS, Export.Kind);
encodeULEB128(Export.Index, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::StartSection &Section) {
encodeULEB128(Section.StartFunction, OS);
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::TableSection &Section) {
encodeULEB128(Section.Tables.size(), OS);
for (auto &Table : Section.Tables) {
writeUint8(OS, Table.ElemType);
writeLimits(Table.TableLimits, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::MemorySection &Section) {
encodeULEB128(Section.Memories.size(), OS);
for (const WasmYAML::Limits &Mem : Section.Memories) {
writeLimits(Mem, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::GlobalSection &Section) {
encodeULEB128(Section.Globals.size(), OS);
uint32_t ExpectedIndex = NumImportedGlobals;
for (auto &Global : Section.Globals) {
if (Global.Index != ExpectedIndex) {
errs() << "Unexpected global index: " << Global.Index << "\n";
return 1;
}
++ExpectedIndex;
writeUint8(OS, Global.Type);
writeUint8(OS, Global.Mutable);
writeInitExpr(Global.InitExpr, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::EventSection &Section) {
encodeULEB128(Section.Events.size(), OS);
uint32_t ExpectedIndex = NumImportedEvents;
for (auto &Event : Section.Events) {
if (Event.Index != ExpectedIndex) {
errs() << "Unexpected event index: " << Event.Index << "\n";
return 1;
}
++ExpectedIndex;
encodeULEB128(Event.Attribute, OS);
encodeULEB128(Event.SigIndex, OS);
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::ElemSection &Section) {
encodeULEB128(Section.Segments.size(), OS);
for (auto &Segment : Section.Segments) {
encodeULEB128(Segment.TableIndex, OS);
writeInitExpr(Segment.Offset, OS);
encodeULEB128(Segment.Functions.size(), OS);
for (auto &Function : Segment.Functions) {
encodeULEB128(Function, OS);
}
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::CodeSection &Section) {
encodeULEB128(Section.Functions.size(), OS);
uint32_t ExpectedIndex = NumImportedFunctions;
for (auto &Func : Section.Functions) {
std::string OutString;
raw_string_ostream StringStream(OutString);
if (Func.Index != ExpectedIndex) {
errs() << "Unexpected function index: " << Func.Index << "\n";
return 1;
}
++ExpectedIndex;
encodeULEB128(Func.Locals.size(), StringStream);
for (auto &LocalDecl : Func.Locals) {
encodeULEB128(LocalDecl.Count, StringStream);
writeUint8(StringStream, LocalDecl.Type);
}
Func.Body.writeAsBinary(StringStream);
// Write the section size followed by the content
StringStream.flush();
encodeULEB128(OutString.size(), OS);
OS << OutString;
}
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::DataSection &Section) {
encodeULEB128(Section.Segments.size(), OS);
for (auto &Segment : Section.Segments) {
encodeULEB128(Segment.MemoryIndex, OS);
writeInitExpr(Segment.Offset, OS);
encodeULEB128(Segment.Content.binary_size(), OS);
Segment.Content.writeAsBinary(OS);
}
return 0;
}
int WasmWriter::writeRelocSection(raw_ostream &OS, WasmYAML::Section &Sec,
uint32_t SectionIndex) {
switch (Sec.Type) {
case wasm::WASM_SEC_CODE:
writeStringRef("reloc.CODE", OS);
break;
case wasm::WASM_SEC_DATA:
writeStringRef("reloc.DATA", OS);
break;
case wasm::WASM_SEC_CUSTOM: {
auto CustomSection = dyn_cast<WasmYAML::CustomSection>(&Sec);
if (!CustomSection->Name.startswith(".debug_")) {
llvm_unreachable("not yet implemented (only for debug sections)");
return 1;
}
writeStringRef(("reloc." + CustomSection->Name).str(), OS);
break;
}
default:
llvm_unreachable("not yet implemented");
return 1;
}
encodeULEB128(SectionIndex, OS);
encodeULEB128(Sec.Relocations.size(), OS);
for (auto Reloc : Sec.Relocations) {
writeUint8(OS, Reloc.Type);
encodeULEB128(Reloc.Offset, OS);
encodeULEB128(Reloc.Index, OS);
switch (Reloc.Type) {
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB:
case wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32:
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32:
encodeULEB128(Reloc.Addend, OS);
}
}
return 0;
}
int WasmWriter::writeWasm(raw_ostream &OS) {
// Write headers
OS.write(wasm::WasmMagic, sizeof(wasm::WasmMagic));
writeUint32(OS, Obj.Header.Version);
// Write each section
llvm::object::WasmSectionOrderChecker Checker;
for (const std::unique_ptr<WasmYAML::Section> &Sec : Obj.Sections) {
StringRef SecName = "";
if (auto S = dyn_cast<WasmYAML::CustomSection>(Sec.get()))
SecName = S->Name;
if (!Checker.isValidSectionOrder(Sec->Type, SecName)) {
errs() << "Out of order section type: " << Sec->Type << "\n";
return 1;
}
encodeULEB128(Sec->Type, OS);
std::string OutString;
raw_string_ostream StringStream(OutString);
if (auto S = dyn_cast<WasmYAML::CustomSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::TypeSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::ImportSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::FunctionSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::TableSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::MemorySection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::GlobalSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::EventSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::ExportSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::StartSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::ElemSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::CodeSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else if (auto S = dyn_cast<WasmYAML::DataSection>(Sec.get())) {
if (auto Err = writeSectionContent(StringStream, *S))
return Err;
} else {
errs() << "Unknown section type: " << Sec->Type << "\n";
return 1;
}
StringStream.flush();
// Write the section size followed by the content
encodeULEB128(OutString.size(), OS);
OS << OutString;
}
// write reloc sections for any section that have relocations
uint32_t SectionIndex = 0;
for (const std::unique_ptr<WasmYAML::Section> &Sec : Obj.Sections) {
if (Sec->Relocations.empty()) {
SectionIndex++;
continue;
}
writeUint8(OS, wasm::WASM_SEC_CUSTOM);
std::string OutString;
raw_string_ostream StringStream(OutString);
writeRelocSection(StringStream, *Sec, SectionIndex++);
StringStream.flush();
encodeULEB128(OutString.size(), OS);
OS << OutString;
}
return 0;
}
int yaml2wasm(llvm::WasmYAML::Object &Doc, raw_ostream &Out) {
WasmWriter Writer(Doc);
return Writer.writeWasm(Out);
}
Index: vendor/llvm/dist-release_80/utils/TableGen/X86RecognizableInstr.cpp
===================================================================
--- vendor/llvm/dist-release_80/utils/TableGen/X86RecognizableInstr.cpp (revision 344165)
+++ vendor/llvm/dist-release_80/utils/TableGen/X86RecognizableInstr.cpp (revision 344166)
@@ -1,1172 +1,1174 @@
//===- X86RecognizableInstr.cpp - Disassembler instruction spec --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#include "X86RecognizableInstr.h"
#include "X86DisassemblerShared.h"
#include "X86ModRMFilters.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
using namespace llvm;
using namespace X86Disassembler;
/// byteFromBitsInit - Extracts a value at most 8 bits in width from a BitsInit.
/// Useful for switch statements and the like.
///
/// @param init - A reference to the BitsInit to be decoded.
/// @return - The field, with the first bit in the BitsInit as the lowest
/// order bit.
static uint8_t byteFromBitsInit(BitsInit &init) {
int width = init.getNumBits();
assert(width <= 8 && "Field is too large for uint8_t!");
int index;
uint8_t mask = 0x01;
uint8_t ret = 0;
for (index = 0; index < width; index++) {
if (cast<BitInit>(init.getBit(index))->getValue())
ret |= mask;
mask <<= 1;
}
return ret;
}
/// byteFromRec - Extract a value at most 8 bits in with from a Record given the
/// name of the field.
///
/// @param rec - The record from which to extract the value.
/// @param name - The name of the field in the record.
/// @return - The field, as translated by byteFromBitsInit().
static uint8_t byteFromRec(const Record* rec, const std::string &name) {
BitsInit* bits = rec->getValueAsBitsInit(name);
return byteFromBitsInit(*bits);
}
RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid) {
UID = uid;
Rec = insn.TheDef;
Name = Rec->getName();
Spec = &tables.specForUID(UID);
if (!Rec->isSubClassOf("X86Inst")) {
ShouldBeEmitted = false;
return;
}
OpPrefix = byteFromRec(Rec, "OpPrefixBits");
OpMap = byteFromRec(Rec, "OpMapBits");
Opcode = byteFromRec(Rec, "Opcode");
Form = byteFromRec(Rec, "FormBits");
Encoding = byteFromRec(Rec, "OpEncBits");
OpSize = byteFromRec(Rec, "OpSizeBits");
AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
VEX_WPrefix = byteFromRec(Rec,"VEX_WPrefix");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2");
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
HasEVEX_KZ = Rec->getValueAsBit("hasEVEX_Z");
HasEVEX_B = Rec->getValueAsBit("hasEVEX_B");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
ForceDisassemble = Rec->getValueAsBit("ForceDisassemble");
CD8_Scale = byteFromRec(Rec, "CD8_Scale");
Name = Rec->getName();
Operands = &insn.Operands.OperandList;
HasVEX_LPrefix = Rec->getValueAsBit("hasVEX_L");
EncodeRC = HasEVEX_B &&
(Form == X86Local::MRMDestReg || Form == X86Local::MRMSrcReg);
// Check for 64-bit inst which does not require REX
Is32Bit = false;
Is64Bit = false;
// FIXME: Is there some better way to check for In64BitMode?
std::vector<Record*> Predicates = Rec->getValueAsListOfDefs("Predicates");
for (unsigned i = 0, e = Predicates.size(); i != e; ++i) {
if (Predicates[i]->getName().find("Not64Bit") != Name.npos ||
Predicates[i]->getName().find("In32Bit") != Name.npos) {
Is32Bit = true;
break;
}
if (Predicates[i]->getName().find("In64Bit") != Name.npos) {
Is64Bit = true;
break;
}
}
if (Form == X86Local::Pseudo || (IsCodeGenOnly && !ForceDisassemble)) {
ShouldBeEmitted = false;
return;
}
// Special case since there is no attribute class for 64-bit and VEX
if (Name == "VMASKMOVDQU64") {
ShouldBeEmitted = false;
return;
}
ShouldBeEmitted = true;
}
void RecognizableInstr::processInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid)
{
// Ignore "asm parser only" instructions.
if (insn.TheDef->getValueAsBit("isAsmParserOnly"))
return;
RecognizableInstr recogInstr(tables, insn, uid);
if (recogInstr.shouldBeEmitted()) {
recogInstr.emitInstructionSpecifier();
recogInstr.emitDecodePath(tables);
}
}
#define EVEX_KB(n) (HasEVEX_KZ && HasEVEX_B ? n##_KZ_B : \
(HasEVEX_K && HasEVEX_B ? n##_K_B : \
(HasEVEX_KZ ? n##_KZ : \
(HasEVEX_K? n##_K : (HasEVEX_B ? n##_B : n)))))
InstructionContext RecognizableInstr::insnContext() const {
InstructionContext insnContext;
if (Encoding == X86Local::EVEX) {
if (HasVEX_LPrefix && HasEVEX_L2Prefix) {
errs() << "Don't support VEX.L if EVEX_L2 is enabled: " << Name << "\n";
llvm_unreachable("Don't support VEX.L if EVEX_L2 is enabled");
}
// VEX_L & VEX_W
if (!EncodeRC && HasVEX_LPrefix && (VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X)) {
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (!EncodeRC && HasVEX_LPrefix) {
// VEX_L
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (!EncodeRC && HasEVEX_L2Prefix &&
(VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X)) {
// EVEX_L2 & VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (!EncodeRC && HasEVEX_L2Prefix) {
// EVEX_L2
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_OPSIZE);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L2_XD);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L2_XS);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
}
else if (VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X) {
// VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
}
// No L, no W
else if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_OPSIZE);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_XD);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_XS);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
/// eof EVEX
} else if (Encoding == X86Local::VEX || Encoding == X86Local::XOP) {
if (HasVEX_LPrefix && (VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X)) {
if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_L_W_OPSIZE;
else if (OpPrefix == X86Local::XS)
insnContext = IC_VEX_L_W_XS;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_W_XD;
else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX_L_W;
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (OpPrefix == X86Local::PD && HasVEX_LPrefix)
insnContext = IC_VEX_L_OPSIZE;
else if (OpPrefix == X86Local::PD && (VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X))
insnContext = IC_VEX_W_OPSIZE;
else if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_OPSIZE;
else if (HasVEX_LPrefix && OpPrefix == X86Local::XS)
insnContext = IC_VEX_L_XS;
else if (HasVEX_LPrefix && OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_XD;
else if ((VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X) && OpPrefix == X86Local::XS)
insnContext = IC_VEX_W_XS;
else if ((VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X) && OpPrefix == X86Local::XD)
insnContext = IC_VEX_W_XD;
else if ((VEX_WPrefix == X86Local::VEX_W1 ||
VEX_WPrefix == X86Local::VEX_W1X) && OpPrefix == X86Local::PS)
insnContext = IC_VEX_W;
else if (HasVEX_LPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_L;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_VEX_XS;
else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX;
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (Is64Bit || HasREX_WPrefix || AdSize == X86Local::AdSize64) {
if (HasREX_WPrefix && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
insnContext = IC_64BIT_REXW_OPSIZE;
else if (HasREX_WPrefix && AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_REXW_ADSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
insnContext = IC_64BIT_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_XS_OPSIZE;
else if (AdSize == X86Local::AdSize32 && OpPrefix == X86Local::PD)
insnContext = IC_64BIT_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_64BIT_OPSIZE;
else if (AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_ADSIZE;
else if (HasREX_WPrefix && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_REXW_XS;
else if (HasREX_WPrefix && OpPrefix == X86Local::XD)
insnContext = IC_64BIT_REXW_XD;
else if (OpPrefix == X86Local::XD)
insnContext = IC_64BIT_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_64BIT_XS;
else if (HasREX_WPrefix)
insnContext = IC_64BIT_REXW;
else
insnContext = IC_64BIT;
} else {
if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
insnContext = IC_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_XS_OPSIZE;
else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::XD)
insnContext = IC_XD_ADSIZE;
else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::XS)
insnContext = IC_XS_ADSIZE;
else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::PD)
insnContext = IC_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize16)
insnContext = IC_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_OPSIZE;
else if (AdSize == X86Local::AdSize16)
insnContext = IC_ADSIZE;
else if (OpPrefix == X86Local::XD)
insnContext = IC_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_XS;
else
insnContext = IC;
}
return insnContext;
}
void RecognizableInstr::adjustOperandEncoding(OperandEncoding &encoding) {
// The scaling factor for AVX512 compressed displacement encoding is an
// instruction attribute. Adjust the ModRM encoding type to include the
// scale for compressed displacement.
if ((encoding != ENCODING_RM && encoding != ENCODING_VSIB) ||CD8_Scale == 0)
return;
encoding = (OperandEncoding)(encoding + Log2_32(CD8_Scale));
assert(((encoding >= ENCODING_RM && encoding <= ENCODING_RM_CD64) ||
(encoding >= ENCODING_VSIB && encoding <= ENCODING_VSIB_CD64)) &&
"Invalid CDisp scaling");
}
void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned numPhysicalOperands,
const unsigned *operandMapping,
OperandEncoding (*encodingFromString)
(const std::string&,
uint8_t OpSize)) {
if (optional) {
if (physicalOperandIndex >= numPhysicalOperands)
return;
} else {
assert(physicalOperandIndex < numPhysicalOperands);
}
while (operandMapping[operandIndex] != operandIndex) {
Spec->operands[operandIndex].encoding = ENCODING_DUP;
Spec->operands[operandIndex].type =
(OperandType)(TYPE_DUP0 + operandMapping[operandIndex]);
++operandIndex;
}
StringRef typeName = (*Operands)[operandIndex].Rec->getName();
OperandEncoding encoding = encodingFromString(typeName, OpSize);
// Adjust the encoding type for an operand based on the instruction.
adjustOperandEncoding(encoding);
Spec->operands[operandIndex].encoding = encoding;
Spec->operands[operandIndex].type = typeFromString(typeName,
HasREX_WPrefix, OpSize);
++operandIndex;
++physicalOperandIndex;
}
void RecognizableInstr::emitInstructionSpecifier() {
Spec->name = Name;
Spec->insnContext = insnContext();
const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
unsigned numOperands = OperandList.size();
unsigned numPhysicalOperands = 0;
// operandMapping maps from operands in OperandList to their originals.
// If operandMapping[i] != i, then the entry is a duplicate.
unsigned operandMapping[X86_MAX_OPERANDS];
assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
if (!OperandList[operandIndex].Constraints.empty()) {
const CGIOperandList::ConstraintInfo &Constraint =
OperandList[operandIndex].Constraints[0];
if (Constraint.isTied()) {
operandMapping[operandIndex] = operandIndex;
operandMapping[Constraint.getTiedOperand()] = operandIndex;
} else {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
}
} else {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
}
}
#define HANDLE_OPERAND(class) \
handleOperand(false, \
operandIndex, \
physicalOperandIndex, \
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
#define HANDLE_OPTIONAL(class) \
handleOperand(true, \
operandIndex, \
physicalOperandIndex, \
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
// operandIndex should always be < numOperands
unsigned operandIndex = 0;
// physicalOperandIndex should always be < numPhysicalOperands
unsigned physicalOperandIndex = 0;
#ifndef NDEBUG
// Given the set of prefix bits, how many additional operands does the
// instruction have?
unsigned additionalOperands = 0;
if (HasVEX_4V)
++additionalOperands;
if (HasEVEX_K)
++additionalOperands;
#endif
switch (Form) {
default: llvm_unreachable("Unhandled form");
case X86Local::RawFrmSrc:
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrmDst:
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrmDstSrc:
HANDLE_OPERAND(relocation);
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrm:
// Operand 1 (optional) is an address or immediate.
assert(numPhysicalOperands <= 1 &&
"Unexpected number of operands for RawFrm");
HANDLE_OPTIONAL(relocation)
break;
case X86Local::RawFrmMemOffs:
// Operand 1 is an address.
HANDLE_OPERAND(relocation);
break;
case X86Local::AddRegFrm:
// Operand 1 is added to the opcode.
// Operand 2 (optional) is an address.
assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 &&
"Unexpected number of operands for AddRegFrm");
HANDLE_OPERAND(opcodeModifier)
HANDLE_OPTIONAL(relocation)
break;
case X86Local::MRMDestReg:
// Operand 1 is a register operand in the R/M field.
// - In AVX512 there may be a mask operand here -
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMDestRegFrm");
HANDLE_OPERAND(rmRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMDestMem:
// Operand 1 is a memory operand (possibly SIB-extended)
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMDestMemFrm with VEX_4V");
HANDLE_OPERAND(memory)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcReg:
// Operand 1 is a register operand in the Reg/Opcode field.
// Operand 2 is a register operand in the R/M field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
// Operand 4 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 4 + additionalOperands &&
"Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(rmRegister)
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
case X86Local::MRMSrcReg4VOp3:
assert(numPhysicalOperands == 3 &&
"Unexpected number of operands for MRMSrcReg4VOp3Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(rmRegister)
HANDLE_OPERAND(vvvvRegister)
break;
case X86Local::MRMSrcRegOp4:
assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcRegOp4Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(immediate) // Register in imm[7:4]
HANDLE_OPERAND(rmRegister)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
// Operand 2 is a memory operand (possibly SIB-extended)
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 4 + additionalOperands &&
"Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
case X86Local::MRMSrcMem4VOp3:
assert(numPhysicalOperands == 3 &&
"Unexpected number of operands for MRMSrcMem4VOp3Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(memory)
HANDLE_OPERAND(vvvvRegister)
break;
case X86Local::MRMSrcMemOp4:
assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcMemOp4Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(immediate) // Register in imm[7:4]
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMXr:
case X86Local::MRM0r:
case X86Local::MRM1r:
case X86Local::MRM2r:
case X86Local::MRM3r:
case X86Local::MRM4r:
case X86Local::MRM5r:
case X86Local::MRM6r:
case X86Local::MRM7r:
// Operand 1 is a register operand in the R/M field.
// Operand 2 (optional) is an immediate or relocation.
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 0 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMnr");
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
HANDLE_OPTIONAL(rmRegister)
HANDLE_OPTIONAL(relocation)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMXm:
case X86Local::MRM0m:
case X86Local::MRM1m:
case X86Local::MRM2m:
case X86Local::MRM3m:
case X86Local::MRM4m:
case X86Local::MRM5m:
case X86Local::MRM6m:
case X86Local::MRM7m:
// Operand 1 is a memory operand (possibly SIB-extended)
// Operand 2 (optional) is an immediate or relocation.
assert(numPhysicalOperands >= 1 + additionalOperands &&
numPhysicalOperands <= 2 + additionalOperands &&
"Unexpected number of operands for MRMnm");
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(relocation)
break;
case X86Local::RawFrmImm8:
// operand 1 is a 16-bit immediate
// operand 2 is an 8-bit immediate
assert(numPhysicalOperands == 2 &&
"Unexpected number of operands for X86Local::RawFrmImm8");
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(immediate)
break;
case X86Local::RawFrmImm16:
// operand 1 is a 16-bit immediate
// operand 2 is a 16-bit immediate
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(immediate)
break;
#define MAP(from, to) case X86Local::MRM_##from:
X86_INSTR_MRM_MAPPING
#undef MAP
HANDLE_OPTIONAL(relocation)
break;
}
#undef HANDLE_OPERAND
#undef HANDLE_OPTIONAL
}
void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
// Special cases where the LLVM tables are not complete
#define MAP(from, to) \
case X86Local::MRM_##from:
llvm::Optional<OpcodeType> opcodeType;
switch (OpMap) {
default: llvm_unreachable("Invalid map!");
case X86Local::OB: opcodeType = ONEBYTE; break;
case X86Local::TB: opcodeType = TWOBYTE; break;
case X86Local::T8: opcodeType = THREEBYTE_38; break;
case X86Local::TA: opcodeType = THREEBYTE_3A; break;
case X86Local::XOP8: opcodeType = XOP8_MAP; break;
case X86Local::XOP9: opcodeType = XOP9_MAP; break;
case X86Local::XOPA: opcodeType = XOPA_MAP; break;
case X86Local::ThreeDNow: opcodeType = THREEDNOW_MAP; break;
}
std::unique_ptr<ModRMFilter> filter;
switch (Form) {
default: llvm_unreachable("Invalid form!");
case X86Local::Pseudo: llvm_unreachable("Pseudo should not be emitted!");
case X86Local::RawFrm:
case X86Local::AddRegFrm:
case X86Local::RawFrmMemOffs:
case X86Local::RawFrmSrc:
case X86Local::RawFrmDst:
case X86Local::RawFrmDstSrc:
case X86Local::RawFrmImm8:
case X86Local::RawFrmImm16:
filter = llvm::make_unique<DumbFilter>();
break;
case X86Local::MRMDestReg:
case X86Local::MRMSrcReg:
case X86Local::MRMSrcReg4VOp3:
case X86Local::MRMSrcRegOp4:
case X86Local::MRMXr:
filter = llvm::make_unique<ModFilter>(true);
break;
case X86Local::MRMDestMem:
case X86Local::MRMSrcMem:
case X86Local::MRMSrcMem4VOp3:
case X86Local::MRMSrcMemOp4:
case X86Local::MRMXm:
filter = llvm::make_unique<ModFilter>(false);
break;
case X86Local::MRM0r: case X86Local::MRM1r:
case X86Local::MRM2r: case X86Local::MRM3r:
case X86Local::MRM4r: case X86Local::MRM5r:
case X86Local::MRM6r: case X86Local::MRM7r:
filter = llvm::make_unique<ExtendedFilter>(true, Form - X86Local::MRM0r);
break;
case X86Local::MRM0m: case X86Local::MRM1m:
case X86Local::MRM2m: case X86Local::MRM3m:
case X86Local::MRM4m: case X86Local::MRM5m:
case X86Local::MRM6m: case X86Local::MRM7m:
filter = llvm::make_unique<ExtendedFilter>(false, Form - X86Local::MRM0m);
break;
X86_INSTR_MRM_MAPPING
filter = llvm::make_unique<ExactFilter>(0xC0 + Form - X86Local::MRM_C0);
break;
} // switch (Form)
uint8_t opcodeToSet = Opcode;
unsigned AddressSize = 0;
switch (AdSize) {
case X86Local::AdSize16: AddressSize = 16; break;
case X86Local::AdSize32: AddressSize = 32; break;
case X86Local::AdSize64: AddressSize = 64; break;
}
assert(opcodeType && "Opcode type not set");
assert(filter && "Filter not set");
if (Form == X86Local::AddRegFrm) {
assert(((opcodeToSet & 7) == 0) &&
"ADDREG_FRM opcode not aligned");
uint8_t currentOpcode;
for (currentOpcode = opcodeToSet;
currentOpcode < opcodeToSet + 8;
++currentOpcode)
tables.setTableFields(*opcodeType, insnContext(), currentOpcode, *filter,
UID, Is32Bit, OpPrefix == 0,
IgnoresVEX_L || EncodeRC,
VEX_WPrefix == X86Local::VEX_WIG, AddressSize);
} else {
tables.setTableFields(*opcodeType, insnContext(), opcodeToSet, *filter, UID,
Is32Bit, OpPrefix == 0, IgnoresVEX_L || EncodeRC,
VEX_WPrefix == X86Local::VEX_WIG, AddressSize);
}
#undef MAP
}
#define TYPE(str, type) if (s == str) return type;
OperandType RecognizableInstr::typeFromString(const std::string &s,
bool hasREX_WPrefix,
uint8_t OpSize) {
if(hasREX_WPrefix) {
// For instructions with a REX_W prefix, a declared 32-bit register encoding
// is special.
TYPE("GR32", TYPE_R32)
}
if(OpSize == X86Local::OpSize16) {
// For OpSize16 instructions, a declared 16-bit register or
// immediate encoding is special.
TYPE("GR16", TYPE_Rv)
} else if(OpSize == X86Local::OpSize32) {
// For OpSize32 instructions, a declared 32-bit register or
// immediate encoding is special.
TYPE("GR32", TYPE_Rv)
}
TYPE("i16mem", TYPE_M)
TYPE("i16imm", TYPE_IMM)
TYPE("i16i8imm", TYPE_IMM)
TYPE("GR16", TYPE_R16)
TYPE("i32mem", TYPE_M)
TYPE("i32imm", TYPE_IMM)
TYPE("i32i8imm", TYPE_IMM)
TYPE("GR32", TYPE_R32)
TYPE("GR32orGR64", TYPE_R32)
TYPE("i64mem", TYPE_M)
TYPE("i64i32imm", TYPE_IMM)
TYPE("i64i8imm", TYPE_IMM)
TYPE("GR64", TYPE_R64)
TYPE("i8mem", TYPE_M)
TYPE("i8imm", TYPE_IMM)
TYPE("u8imm", TYPE_UIMM8)
TYPE("i32u8imm", TYPE_UIMM8)
TYPE("GR8", TYPE_R8)
TYPE("VR128", TYPE_XMM)
TYPE("VR128X", TYPE_XMM)
TYPE("f128mem", TYPE_M)
TYPE("f256mem", TYPE_M)
TYPE("f512mem", TYPE_M)
TYPE("FR128", TYPE_XMM)
TYPE("FR64", TYPE_XMM)
TYPE("FR64X", TYPE_XMM)
TYPE("f64mem", TYPE_M)
TYPE("sdmem", TYPE_M)
TYPE("FR32", TYPE_XMM)
TYPE("FR32X", TYPE_XMM)
TYPE("f32mem", TYPE_M)
TYPE("ssmem", TYPE_M)
TYPE("RST", TYPE_ST)
+ TYPE("RSTi", TYPE_ST)
TYPE("i128mem", TYPE_M)
TYPE("i256mem", TYPE_M)
TYPE("i512mem", TYPE_M)
TYPE("i64i32imm_pcrel", TYPE_REL)
TYPE("i16imm_pcrel", TYPE_REL)
TYPE("i32imm_pcrel", TYPE_REL)
TYPE("SSECC", TYPE_IMM3)
TYPE("XOPCC", TYPE_IMM3)
TYPE("AVXCC", TYPE_IMM5)
TYPE("AVX512ICC", TYPE_AVX512ICC)
TYPE("AVX512RC", TYPE_IMM)
TYPE("brtarget32", TYPE_REL)
TYPE("brtarget16", TYPE_REL)
TYPE("brtarget8", TYPE_REL)
TYPE("f80mem", TYPE_M)
TYPE("lea64_32mem", TYPE_M)
TYPE("lea64mem", TYPE_M)
TYPE("VR64", TYPE_MM64)
TYPE("i64imm", TYPE_IMM)
TYPE("anymem", TYPE_M)
TYPE("opaquemem", TYPE_M)
TYPE("SEGMENT_REG", TYPE_SEGMENTREG)
TYPE("DEBUG_REG", TYPE_DEBUGREG)
TYPE("CONTROL_REG", TYPE_CONTROLREG)
TYPE("srcidx8", TYPE_SRCIDX)
TYPE("srcidx16", TYPE_SRCIDX)
TYPE("srcidx32", TYPE_SRCIDX)
TYPE("srcidx64", TYPE_SRCIDX)
TYPE("dstidx8", TYPE_DSTIDX)
TYPE("dstidx16", TYPE_DSTIDX)
TYPE("dstidx32", TYPE_DSTIDX)
TYPE("dstidx64", TYPE_DSTIDX)
TYPE("offset16_8", TYPE_MOFFS)
TYPE("offset16_16", TYPE_MOFFS)
TYPE("offset16_32", TYPE_MOFFS)
TYPE("offset32_8", TYPE_MOFFS)
TYPE("offset32_16", TYPE_MOFFS)
TYPE("offset32_32", TYPE_MOFFS)
TYPE("offset32_64", TYPE_MOFFS)
TYPE("offset64_8", TYPE_MOFFS)
TYPE("offset64_16", TYPE_MOFFS)
TYPE("offset64_32", TYPE_MOFFS)
TYPE("offset64_64", TYPE_MOFFS)
TYPE("VR256", TYPE_YMM)
TYPE("VR256X", TYPE_YMM)
TYPE("VR512", TYPE_ZMM)
TYPE("VK1", TYPE_VK)
TYPE("VK1WM", TYPE_VK)
TYPE("VK2", TYPE_VK)
TYPE("VK2WM", TYPE_VK)
TYPE("VK4", TYPE_VK)
TYPE("VK4WM", TYPE_VK)
TYPE("VK8", TYPE_VK)
TYPE("VK8WM", TYPE_VK)
TYPE("VK16", TYPE_VK)
TYPE("VK16WM", TYPE_VK)
TYPE("VK32", TYPE_VK)
TYPE("VK32WM", TYPE_VK)
TYPE("VK64", TYPE_VK)
TYPE("VK64WM", TYPE_VK)
TYPE("vx64mem", TYPE_MVSIBX)
TYPE("vx128mem", TYPE_MVSIBX)
TYPE("vx256mem", TYPE_MVSIBX)
TYPE("vy128mem", TYPE_MVSIBY)
TYPE("vy256mem", TYPE_MVSIBY)
TYPE("vx64xmem", TYPE_MVSIBX)
TYPE("vx128xmem", TYPE_MVSIBX)
TYPE("vx256xmem", TYPE_MVSIBX)
TYPE("vy128xmem", TYPE_MVSIBY)
TYPE("vy256xmem", TYPE_MVSIBY)
TYPE("vy512xmem", TYPE_MVSIBY)
TYPE("vz256mem", TYPE_MVSIBZ)
TYPE("vz512mem", TYPE_MVSIBZ)
TYPE("BNDR", TYPE_BNDR)
errs() << "Unhandled type string " << s << "\n";
llvm_unreachable("Unhandled type string");
}
#undef TYPE
#define ENCODING(str, encoding) if (s == str) return encoding;
OperandEncoding
RecognizableInstr::immediateEncodingFromString(const std::string &s,
uint8_t OpSize) {
if(OpSize != X86Local::OpSize16) {
// For instructions without an OpSize prefix, a declared 16-bit register or
// immediate encoding is special.
ENCODING("i16imm", ENCODING_IW)
}
ENCODING("i32i8imm", ENCODING_IB)
ENCODING("SSECC", ENCODING_IB)
ENCODING("XOPCC", ENCODING_IB)
ENCODING("AVXCC", ENCODING_IB)
ENCODING("AVX512ICC", ENCODING_IB)
ENCODING("AVX512RC", ENCODING_IRC)
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i32imm", ENCODING_Iv)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("u8imm", ENCODING_IB)
ENCODING("i32u8imm", ENCODING_IB)
// This is not a typo. Instructions like BLENDVPD put
// register IDs in 8-bit immediates nowadays.
ENCODING("FR32", ENCODING_IB)
ENCODING("FR64", ENCODING_IB)
ENCODING("FR128", ENCODING_IB)
ENCODING("VR128", ENCODING_IB)
ENCODING("VR256", ENCODING_IB)
ENCODING("FR32X", ENCODING_IB)
ENCODING("FR64X", ENCODING_IB)
ENCODING("VR128X", ENCODING_IB)
ENCODING("VR256X", ENCODING_IB)
ENCODING("VR512", ENCODING_IB)
errs() << "Unhandled immediate encoding " << s << "\n";
llvm_unreachable("Unhandled immediate encoding");
}
OperandEncoding
RecognizableInstr::rmRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("RST", ENCODING_FP)
+ ENCODING("RSTi", ENCODING_FP)
ENCODING("GR16", ENCODING_RM)
ENCODING("GR32", ENCODING_RM)
ENCODING("GR32orGR64", ENCODING_RM)
ENCODING("GR64", ENCODING_RM)
ENCODING("GR8", ENCODING_RM)
ENCODING("VR128", ENCODING_RM)
ENCODING("VR128X", ENCODING_RM)
ENCODING("FR128", ENCODING_RM)
ENCODING("FR64", ENCODING_RM)
ENCODING("FR32", ENCODING_RM)
ENCODING("FR64X", ENCODING_RM)
ENCODING("FR32X", ENCODING_RM)
ENCODING("VR64", ENCODING_RM)
ENCODING("VR256", ENCODING_RM)
ENCODING("VR256X", ENCODING_RM)
ENCODING("VR512", ENCODING_RM)
ENCODING("VK1", ENCODING_RM)
ENCODING("VK2", ENCODING_RM)
ENCODING("VK4", ENCODING_RM)
ENCODING("VK8", ENCODING_RM)
ENCODING("VK16", ENCODING_RM)
ENCODING("VK32", ENCODING_RM)
ENCODING("VK64", ENCODING_RM)
ENCODING("BNDR", ENCODING_RM)
errs() << "Unhandled R/M register encoding " << s << "\n";
llvm_unreachable("Unhandled R/M register encoding");
}
OperandEncoding
RecognizableInstr::roRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR16", ENCODING_REG)
ENCODING("GR32", ENCODING_REG)
ENCODING("GR32orGR64", ENCODING_REG)
ENCODING("GR64", ENCODING_REG)
ENCODING("GR8", ENCODING_REG)
ENCODING("VR128", ENCODING_REG)
ENCODING("FR128", ENCODING_REG)
ENCODING("FR64", ENCODING_REG)
ENCODING("FR32", ENCODING_REG)
ENCODING("VR64", ENCODING_REG)
ENCODING("SEGMENT_REG", ENCODING_REG)
ENCODING("DEBUG_REG", ENCODING_REG)
ENCODING("CONTROL_REG", ENCODING_REG)
ENCODING("VR256", ENCODING_REG)
ENCODING("VR256X", ENCODING_REG)
ENCODING("VR128X", ENCODING_REG)
ENCODING("FR64X", ENCODING_REG)
ENCODING("FR32X", ENCODING_REG)
ENCODING("VR512", ENCODING_REG)
ENCODING("VK1", ENCODING_REG)
ENCODING("VK2", ENCODING_REG)
ENCODING("VK4", ENCODING_REG)
ENCODING("VK8", ENCODING_REG)
ENCODING("VK16", ENCODING_REG)
ENCODING("VK32", ENCODING_REG)
ENCODING("VK64", ENCODING_REG)
ENCODING("VK1WM", ENCODING_REG)
ENCODING("VK2WM", ENCODING_REG)
ENCODING("VK4WM", ENCODING_REG)
ENCODING("VK8WM", ENCODING_REG)
ENCODING("VK16WM", ENCODING_REG)
ENCODING("VK32WM", ENCODING_REG)
ENCODING("VK64WM", ENCODING_REG)
ENCODING("BNDR", ENCODING_REG)
errs() << "Unhandled reg/opcode register encoding " << s << "\n";
llvm_unreachable("Unhandled reg/opcode register encoding");
}
OperandEncoding
RecognizableInstr::vvvvRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR32", ENCODING_VVVV)
ENCODING("GR64", ENCODING_VVVV)
ENCODING("FR32", ENCODING_VVVV)
ENCODING("FR128", ENCODING_VVVV)
ENCODING("FR64", ENCODING_VVVV)
ENCODING("VR128", ENCODING_VVVV)
ENCODING("VR256", ENCODING_VVVV)
ENCODING("FR32X", ENCODING_VVVV)
ENCODING("FR64X", ENCODING_VVVV)
ENCODING("VR128X", ENCODING_VVVV)
ENCODING("VR256X", ENCODING_VVVV)
ENCODING("VR512", ENCODING_VVVV)
ENCODING("VK1", ENCODING_VVVV)
ENCODING("VK2", ENCODING_VVVV)
ENCODING("VK4", ENCODING_VVVV)
ENCODING("VK8", ENCODING_VVVV)
ENCODING("VK16", ENCODING_VVVV)
ENCODING("VK32", ENCODING_VVVV)
ENCODING("VK64", ENCODING_VVVV)
errs() << "Unhandled VEX.vvvv register encoding " << s << "\n";
llvm_unreachable("Unhandled VEX.vvvv register encoding");
}
OperandEncoding
RecognizableInstr::writemaskRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("VK1WM", ENCODING_WRITEMASK)
ENCODING("VK2WM", ENCODING_WRITEMASK)
ENCODING("VK4WM", ENCODING_WRITEMASK)
ENCODING("VK8WM", ENCODING_WRITEMASK)
ENCODING("VK16WM", ENCODING_WRITEMASK)
ENCODING("VK32WM", ENCODING_WRITEMASK)
ENCODING("VK64WM", ENCODING_WRITEMASK)
errs() << "Unhandled mask register encoding " << s << "\n";
llvm_unreachable("Unhandled mask register encoding");
}
OperandEncoding
RecognizableInstr::memoryEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("i16mem", ENCODING_RM)
ENCODING("i32mem", ENCODING_RM)
ENCODING("i64mem", ENCODING_RM)
ENCODING("i8mem", ENCODING_RM)
ENCODING("ssmem", ENCODING_RM)
ENCODING("sdmem", ENCODING_RM)
ENCODING("f128mem", ENCODING_RM)
ENCODING("f256mem", ENCODING_RM)
ENCODING("f512mem", ENCODING_RM)
ENCODING("f64mem", ENCODING_RM)
ENCODING("f32mem", ENCODING_RM)
ENCODING("i128mem", ENCODING_RM)
ENCODING("i256mem", ENCODING_RM)
ENCODING("i512mem", ENCODING_RM)
ENCODING("f80mem", ENCODING_RM)
ENCODING("lea64_32mem", ENCODING_RM)
ENCODING("lea64mem", ENCODING_RM)
ENCODING("anymem", ENCODING_RM)
ENCODING("opaquemem", ENCODING_RM)
ENCODING("vx64mem", ENCODING_VSIB)
ENCODING("vx128mem", ENCODING_VSIB)
ENCODING("vx256mem", ENCODING_VSIB)
ENCODING("vy128mem", ENCODING_VSIB)
ENCODING("vy256mem", ENCODING_VSIB)
ENCODING("vx64xmem", ENCODING_VSIB)
ENCODING("vx128xmem", ENCODING_VSIB)
ENCODING("vx256xmem", ENCODING_VSIB)
ENCODING("vy128xmem", ENCODING_VSIB)
ENCODING("vy256xmem", ENCODING_VSIB)
ENCODING("vy512xmem", ENCODING_VSIB)
ENCODING("vz256mem", ENCODING_VSIB)
ENCODING("vz512mem", ENCODING_VSIB)
errs() << "Unhandled memory encoding " << s << "\n";
llvm_unreachable("Unhandled memory encoding");
}
OperandEncoding
RecognizableInstr::relocationEncodingFromString(const std::string &s,
uint8_t OpSize) {
if(OpSize != X86Local::OpSize16) {
// For instructions without an OpSize prefix, a declared 16-bit register or
// immediate encoding is special.
ENCODING("i16imm", ENCODING_IW)
}
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i32imm", ENCODING_Iv)
ENCODING("i32i8imm", ENCODING_IB)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("u8imm", ENCODING_IB)
ENCODING("i32u8imm", ENCODING_IB)
ENCODING("i64i32imm_pcrel", ENCODING_ID)
ENCODING("i16imm_pcrel", ENCODING_IW)
ENCODING("i32imm_pcrel", ENCODING_ID)
ENCODING("brtarget32", ENCODING_ID)
ENCODING("brtarget16", ENCODING_IW)
ENCODING("brtarget8", ENCODING_IB)
ENCODING("i64imm", ENCODING_IO)
ENCODING("offset16_8", ENCODING_Ia)
ENCODING("offset16_16", ENCODING_Ia)
ENCODING("offset16_32", ENCODING_Ia)
ENCODING("offset32_8", ENCODING_Ia)
ENCODING("offset32_16", ENCODING_Ia)
ENCODING("offset32_32", ENCODING_Ia)
ENCODING("offset32_64", ENCODING_Ia)
ENCODING("offset64_8", ENCODING_Ia)
ENCODING("offset64_16", ENCODING_Ia)
ENCODING("offset64_32", ENCODING_Ia)
ENCODING("offset64_64", ENCODING_Ia)
ENCODING("srcidx8", ENCODING_SI)
ENCODING("srcidx16", ENCODING_SI)
ENCODING("srcidx32", ENCODING_SI)
ENCODING("srcidx64", ENCODING_SI)
ENCODING("dstidx8", ENCODING_DI)
ENCODING("dstidx16", ENCODING_DI)
ENCODING("dstidx32", ENCODING_DI)
ENCODING("dstidx64", ENCODING_DI)
errs() << "Unhandled relocation encoding " << s << "\n";
llvm_unreachable("Unhandled relocation encoding");
}
OperandEncoding
RecognizableInstr::opcodeModifierEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR32", ENCODING_Rv)
ENCODING("GR64", ENCODING_RO)
ENCODING("GR16", ENCODING_Rv)
ENCODING("GR8", ENCODING_RB)
errs() << "Unhandled opcode modifier encoding " << s << "\n";
llvm_unreachable("Unhandled opcode modifier encoding");
}
#undef ENCODING

File Metadata

Mime Type
application/octet-stream
Expires
Fri, May 3, 2:03 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
pHhZwPVHWrz0
Default Alt Text
(5 MB)

Event Timeline